Repository: trekhleb/links-detector Branch: master Commit: 746d387743f7 Files: 87 Total size: 919.4 KB Directory structure: gitextract_y0_euu71/ ├── .eslintrc.js ├── .gitignore ├── LICENCE ├── README.DEV.md ├── README.md ├── articles/ │ └── printed_links_detection/ │ ├── printed_links_detection.md │ └── printed_links_detection.ru.md ├── package.json ├── public/ │ ├── index.css │ ├── index.html │ ├── manifest.json │ ├── models/ │ │ └── links_detector/ │ │ └── v1/ │ │ └── model.json │ ├── robots.txt │ ├── videos/ │ │ └── demo-black-720p.webm │ └── wasm/ │ ├── tfjs-backend-wasm-simd.wasm │ ├── tfjs-backend-wasm-threaded-simd.wasm │ └── tfjs-backend-wasm.wasm ├── serve.json ├── src/ │ ├── components/ │ │ ├── App.tsx │ │ ├── Routes.tsx │ │ ├── elements/ │ │ │ ├── BoxesCanvas.tsx │ │ │ ├── DebugInfo.tsx │ │ │ ├── DetectedLinks.tsx │ │ │ ├── DetectedLinksPrefixes.tsx │ │ │ ├── LinksDetector.tsx │ │ │ ├── PerformanceMonitor.tsx │ │ │ └── PixelsCanvas.tsx │ │ ├── screens/ │ │ │ ├── DebugScreen.tsx │ │ │ ├── DemoScreen.tsx │ │ │ ├── DetectorScreen.tsx │ │ │ ├── HomeScreen.tsx │ │ │ └── NotFoundScreen.tsx │ │ └── shared/ │ │ ├── CameraStream.tsx │ │ ├── Demo.tsx │ │ ├── EnhancedRow.tsx │ │ ├── ErrorBoundary.tsx │ │ ├── Footer.tsx │ │ ├── Grid.tsx │ │ ├── Header.tsx │ │ ├── HyperLink.tsx │ │ ├── Icon.tsx │ │ ├── LaunchButton.tsx │ │ ├── Logo.tsx │ │ ├── MainNavigation.tsx │ │ ├── Modal.tsx │ │ ├── ModalCloseButton.tsx │ │ ├── Notification.tsx │ │ ├── PageTitle.tsx │ │ ├── ProgressBar.tsx │ │ ├── Promo.tsx │ │ ├── Spinner.css │ │ ├── Spinner.tsx │ │ └── Template.tsx │ ├── configs/ │ │ ├── analytics.ts │ │ ├── detectionConfig.ts │ │ └── pwa.ts │ ├── constants/ │ │ ├── debug.ts │ │ ├── links.ts │ │ ├── page.ts │ │ ├── routes.ts │ │ └── style.ts │ ├── hooks/ │ │ ├── useGraphModel.ts │ │ ├── useLinksDetector.ts │ │ ├── useLogger.ts │ │ ├── usePageTitle.ts │ │ ├── useTesseract.ts │ │ └── useWindowSize.ts │ ├── icons/ │ │ ├── README.md │ │ └── index.ts │ ├── index.tsx │ ├── react-app-env.d.ts │ ├── service-worker.ts │ ├── serviceWorkerRegistration.ts │ ├── setupTests.ts │ ├── styles/ │ │ └── index.css │ └── utils/ │ ├── analytics.ts │ ├── debug.ts │ ├── graphModel.ts │ ├── image.ts │ ├── logger.ts │ ├── numbers.ts │ ├── profiler.ts │ ├── routes.ts │ ├── tesseract.ts │ └── types.ts ├── tailwind.config.js └── tsconfig.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .eslintrc.js ================================================ module.exports = { env: { browser: true, es6: true, node: true, jest: true, }, extends: [ 'eslint:recommended', 'airbnb', 'airbnb/hooks', 'plugin:import/typescript', ], parser: '@typescript-eslint/parser', plugins: [ '@typescript-eslint', ], parserOptions: { ecmaVersion: 2017, sourceType: 'module', }, rules: { indent: ['error', 2], 'linebreak-style': ['error', 'unix'], quotes: ['error', 'single'], 'no-console': 'warn', 'no-unused-vars': 'off', '@typescript-eslint/no-unused-vars': [ 'error', { vars: 'all', args: 'after-used', ignoreRestSiblings: false }, ], // Consider using explicit annotations for object literals and function return types even when they can be inferred. '@typescript-eslint/explicit-function-return-type': 'warn', 'no-empty': 'warn', 'react/jsx-filename-extension': [1, { extensions: ['.js', '.jsx', '.tsx'] }], 'import/extensions': [1, { extensions: ['.js', '.jsx', '.tsx'] }], 'no-use-before-define': 'off', '@typescript-eslint/no-use-before-define': 'error', 'no-shadow': 'off', '@typescript-eslint/no-shadow': 'error', 'react/require-default-props': 'off', 'no-useless-return': 'off', 'import/prefer-default-export': 'off', 'arrow-body-style': 'off', 'react/jsx-one-expression-per-line': 'off', }, }; ================================================ FILE: .gitignore ================================================ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. # dependencies /node_modules /.pnp .pnp.js # testing /coverage # production /build # misc .DS_Store .env.local .env.development.local .env.test.local .env.production.local .idea .vscode npm-debug.log* yarn-debug.log* yarn-error.log* # Tailwind generated styles src/styles/tailwind.css ================================================ FILE: LICENCE ================================================ MIT License Copyright (c) 2020 Oleksii Trekhleb Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.DEV.md ================================================ # Links Detector: Engineering Notes ## Working with the repository #### Installation `yarn install` #### Running locally over `http` `yarn start` The app will be available at [http://localhost:3000/links-detector/](http://localhost:3000/links-detector/) #### Running locally over `https` It might be needed to get a camera access while testing the app on mobile devices through a local network. `yarn start-https` The app will be available at [https://localhost:3000/links-detector/](http://localhost:3000/links-detector/). You may also access it through the mobile device at `https:///links/detector` if it is on the same network. #### Running the production build Service workers and [PWA](https://web.dev/progressive-web-apps/) (Progressive Web App) features might be tested against production builds only. To build production version of the app and serve it, run: `yarn start-prod` The app will be available at [http://localhost:4000/links-detector/](http://localhost:4000/links-detector/) ## Version locks `react-router-dom v5.X.X` isn't compatible with `history v5.X.X`. Therefore `package.json` locked `history` package version to `v4.X.X`. See [StackOverflow question](https://stackoverflow.com/questions/62449663/react-router-with-custom-history-not-working) for more details. ================================================ FILE: README.md ================================================ # 📖 👆🏻 Links Detector > Links Detector makes printed links clickable _via your smartphone camera_. No need to type a link in, just scan and click on it. 🚀 [**Launch Links Detector**](https://trekhleb.github.io/links-detector/) _(preferably from your smartphone)_ [![Links Detector](./public/images/links-detector-banner-bg-black-2.png)](https://trekhleb.github.io/links-detector) [📖 Long-read about how the detector works](https://trekhleb.dev/blog/2020/printed-links-detection/) ## 🤷🏻‍ The Problem So you read a book or a magazine and see the link like `https://some-url.com/which/may/be/long?and_with_params=true`, but you can't click on it since it is printed. To visit this link you need to start typing it character by character in the browser's address bar, which may be pretty annoying and error-prone. ## 💡 The Solution Similarly to QR-code detection, we may try to "teach" the smartphone to _detect_ and _recognize_ printed links for us and to make them _clickable_. This way you'll do just _one_ click instead of _multiple_ keystrokes. Your operational complexity goes from `O(N)` to `O(1)`. This is exactly what _Links Detector_ tries to achieve. It makes you do just one click on the link instead of typing the whole link manually character by character. ![Links Detector Demo](./public/videos/demo-white.gif) ## ⚠️ Limitations Currently, the application is in _experimental_ _Alpha_ stage and has [many issues and limitations](https://github.com/trekhleb/links-detector/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement). So don't raise your expectations level too high until these issues are resolved 🤷🏻‍. ## 🏋🏻‍ Model Training The detection model was trained using [TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). You may found the details of the training in [**📖 👆🏻 Making the Printed Links Clickable Using TensorFlow 2 Object Detection API**](https://trekhleb.dev/blog/2020/printed-links-detection/) long read article. ## ⚙️ Technologies _Links Detector_ is a pure frontend [React](https://create-react-app.dev/) application written on [TypeScript](https://www.typescriptlang.org/). Links detection is happening right in your browser without the need of sending images to the server. _Links Detector_ is [PWA](https://web.dev/progressive-web-apps/) (Progressive Web App) friendly application made on top of a [Workbox](https://developers.google.com/web/tools/workbox) library. While you navigate through the app it tries to cache all resources to make them available offline and to make consequent visits much faster for you. You may also [install](https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Developer_guide/Installing) Links Detector as a standalone app on your smartphone. Links detection and recognition happens by means of [TensorFlow](https://www.tensorflow.org) and [Tesseract.js](https://github.com/naptha/tesseract.js) libraries which in turn rely on [WebGL](https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API) and [WebAssembly](https://developer.mozilla.org/en-US/docs/WebAssembly) browser support. ## Author - [@trekhleb](https://trekhleb.dev) ================================================ FILE: articles/printed_links_detection/printed_links_detection.md ================================================ # 📖 👆🏻 Making the Printed Links Clickable Using TensorFlow 2 Object Detection API ![Links Detector Cover](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/01-banner.png) ## 📃 TL;DR _In this article we will start solving the issue of making the printed links (i.e. in a book or in a magazine) clickable via your smartphone camera._ We will use [TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to train a custom object detector model to find positions and bounding boxes of the sub-strings like `https://` in the text image (i.e. in smartphone camera stream). The text of each link (right continuation of `https://` bounding box) will be recognized by using [Tesseract](https://tesseract.projectnaptha.com/) library. The recognition part will not be covered in this article, but you may find the complete code example of the application in [links-detector repository](https://github.com/trekhleb/links-detector). > 🚀 [**Launch Links Detector demo**](https://trekhleb.github.io/links-detector/) from your smartphone to see the final result. > 📝 [**Open links-detector repository**](https://github.com/trekhleb/links-detector) on GitHub to see the complete source code of the application. Here is how the final solution will look like: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) > ⚠️ Currently the application is in _experimental_ _Alpha_ stage and has [many issues and limitations](https://github.com/trekhleb/links-detector/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement). So don't raise your expectations level too high until these issues are resolved 🤷🏻‍. Also, the purpose of this article is more about learning how to work with TensorFlow 2 Object Detection API rather than coming up with a production-ready model. > In case if Python code blocks in this article will lack proper formatting on this platform feel free to [to read the article on GitHub](https://github.com/trekhleb/links-detector/blob/master/articles/printed_links_detection/printed_links_detection.md) ## 🤷🏻‍️ The Problem I work as a software engineer and in my own time, I learn Machine Learning as a hobby. But this is not the problem yet. I bought a printed book about Machine Learning recently and while I was reading through the first several chapters I've encountered many printed links in the text that looked like `https://tensorflow.org/` or `https://some-url.com/which/may/be/even/longer?and_with_params=true`. ![Printed Links](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/02-printed-links.jpg) I saw all these links, but I couldn't click on them since they were printed (thanks, cap!). To visit these links I needed to start typing them character by character in the browser's address bar, which was pretty annoying and error-prone. ## 💡 Possible Solution So, I was thinking, what if, similarly to QR-code detection, we will try to "teach" the smartphone to _(1)_ _detect_ and _(2)_ _recognize_ printed links for us and to make them _clickable_? This way you would do just one click instead of multiple keystrokes. The operational complexity of "clicking" the printed links goes from `O(N)` to `O(1)`. This is how the final workflow will look like: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) ## 📝 Solution Requirements As I've mentioned earlier I'm just studying Machine Learning as a hobby. Thus, the purpose of this article is more about _learning_ how to work with TensorFlow 2 Object Detection API rather than coming up with a production-ready application. With that being said, I simplified the solution requirements to the following: 1. The detection and recognition processes should have a **close-to-real-time** performance (i.e. `0.5-1` frames per second) on a device like iPhone X. It means that the whole _detection + recognition_ process should take up to `2` seconds (pretty bearable as for the amateur project). 2. Only **English** links should be supported. 3. Only **dark text** (i.e. black or dark-grey) on **light background** (i.e. white or light-grey) should be supported. 4. Only `https://` links should be supported for now (it is ok if our model will not recognize the `http://`, `ftp://`, `tcp://` or other types of links). ## 🧩 Solution Breakdown ### High-level breakdown Let's see how we could approach the problem on a high level. #### Option 1: Detection model on the back-end **The flow:** 1. Get camera stream (frame by frame) on the client-side. 2. Send each frame one by one over the network to the back-end. 3. Do link detection and recognition on the back-end and send the response back to the client. 4. Client draws the detection boxes with the clickable links. ![Model on the back-end](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/04-frontend-backend.jpg) **Pros:** - 💚 The detection performance is not limited by the client's device. We may speed the detection up by scaling the service horizontally (adding more instances) and vertically (adding more cores/GPUs). - 💚 The model might be bigger since there is no need to upload it to the client-side. Downloading the `~10Mb` model on the client-side may be ok, but loading the `~100Mb` model might be a big issue for the client's network and application UX (user experience) otherwise. - 💚 It is possible to control who is using the model. Model is guarded behind the API, so we would have complete control over its callers/clients. **Cons:** - 💔 System complexity growth. The application tech stack grew from just `JavaScript` to, let's say, `JavaScript + Python`. We need to take care of the autoscaling. - 💔 Offline mode for the app is not possible since it needs an internet connection to work. - 💔 Too many HTTP requests between the client and the server may become a bottleneck at some point. Imagine if we would want to improve the performance of the detection, let's say, from `1` to `10+` frames per second. This means that each client will send `10+` requests per second. For `10` simultaneous clients it is already `100+` requests per second. The `HTTP/2` bidirectional streaming and `gRPC` might be useful in this case, but we're going back to the increased system complexity here. - 💔 System becomes more expensive. Almost all points from the Pros section need to be paid for. #### Option 2: Detection model on the front-end **The flow:** 1. Get camera stream (frame by frame) on the client-side. 2. Do link detection and recognition on the client-side (without sending anything to the back-end). 3. Client draws the detection boxes with the clickable links. ![Model on the front-end](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/05-frontend-only.jpg) **Pros:** - 💚 System is less complex. We don't need to set up the servers, build the API, and introduce an additional Python stack to the system. - 💚 Offline mode is possible. The app doesn't need an internet connection to work since the model is fully loaded to the device. So the Progressive Web Application ([PWA](https://web.dev/progressive-web-apps/)) might be built to support that. - 💚 System is "kind of" scaling automatically. The more clients you have, the more cores and GPUs they bring. This is not a proper scaling solution though (more about that in a Cons section below). - 💚 System is cheaper. We only need a server for static assets (`HTML`, `JS`, `CSS`, model files, etc.). This may be done for free, let's say, on GitHub. - 💚 No issue with the growing number of HTTP requests per second to the server-side. **Cons:** - 💔 Only horizontal scaling is possible (each client will have its own CPU/GPU). Vertical scaling is not possible since we can't influence the client's device performance. As a result, we can't guarantee fast detection for low performant devices. - 💔 It is not possible to guard the model usage and control the callers/clients of the model. Everyone could download the model and re-use it. - 💔 Battery consumption of the client's device might become an issue. For the model to work it needs computational resources. So clients might not be happy with their iPhone getting warmer and warmer while the app is working. #### High-level conclusion Since the purpose of the project was more about learning and not coming up with a production-ready solution _I decided to go with the second option of serving the model from the client side_. This made the whole project much cheaper (actually with GitHub it was free to host it), and I could focus more on Machine Learning than on the autoscaling back-end infrastructure. ### Lower level breakdown Ok, so we've decided to go with the serverless solution. Now we have an image from the camera stream as an input that looks something like this: ![Printed Links Input](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/06-printed-links-clean.jpg) We need to solve two sub-tasks for this image: 1. Links **detection** (finding the position and bounding boxes of the links) 2. Links **recognition** (recognizing the text of the links) #### Option 1: Tesseract based solution The first and the most obvious approach would be to solve the _Optical Character Recognition_ ([OCR](https://en.wikipedia.org/wiki/Optical_character_recognition)) task by recognizing the whole text of the image by using, let's say, [Tesseract.js](https://github.com/naptha/tesseract.js) library. It returns the bounding boxes of the paragraphs, text lines, and text blocks along with the recognized text. ![Recognized text with bounding boxes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/07-printed-links-boxes.jpg) We may try then to extract the links from the recognized text lines or text blocks with a regular expression like [this one](https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url) (example is on TypeScript): ```typescript const URL_REG_EXP = /https?:\/\/(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)/gi; const extractLinkFromText = (text: string): string | null => { const urls: string[] | null = text.match(URL_REG_EXP); if (!urls || !urls.length) { return null; } return urls[0]; }; ``` 💚 Seems like the issue is solved in a pretty straightforward and simple way: - We know the bounding boxes of the links - We also know the text of the links to make them clickable 💔 The thing is that the _recognition + detection_ time may vary from `2` to `20+` seconds depending on the size of the text, on the amount of "something that looks like a text" on the image, on the image quality and on other factors. So it will be really hard to achieve those `0.5-1` frames per second to make the user experience at least _close_ to real-time. 💔 Also if we would think about it, we're asking the library to recognize the **whole** text from the image for us even though it might contain only one or two links in it (i.e. only ~10% of the text might be useful for us), or it may even not contain the links at all. In this case, it sounds like a waste of computational resources. #### Option 2: Tesseract + TensorFlow based solution We could make Tesseract work faster if we used some _additional "adviser" algorithm_ prior to the links text recognition. This "adviser" algorithm should detect, but not recognize, _the leftmost position_ of each link on the image if there are any. This will allow us to speed up the recognition part by following these rules: 1. If the image does not contain any link we should not call Tesseract detection/recognition at all. 2. If the image does have the links then we need to ask Tesseract to recognize only those parts of the image that contains the links. We're not interested in spending the time for recognition of the irrelevant text that does not contain the links. The "adviser" algorithm that will take place before the Tesseract should work with a constant time regardless of the image quality, or the presence/absence of the text on the image. It also should be pretty fast and detect the leftmost positions of the links for less than `1s` so that we could satisfy the "close-to-real-time" requirement (i.e. on iPhone X). > 💡 So what if we will use another object detection model to help us find all occurrences of the `https://` substrings (every secure link has this prefix, doesn't it) in the image? Then, having these `https://` bounding boxes in the text we may extract the right-side continuation of them and send them to the Tesseract for text recognition. Take a look at the picture below: ![Tesseract and TensorFlow based solution](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/08-tesseract-vs-tensorflow.jpg) You may notice that Tesseract needs to do **much less** work in case if it would have some hints about where are the links might be located (see the number of blue boxes on both pictures). So the question now is which object detection model we should choose and how to re-train it to support the detection of the custom `https://` objects. > Finally! We've got closer to the TensorFlow part of the article 😀 ## 🤖 Selecting the Object Detection Model Training a new object detection model is not a reasonable option in our context because of the following reasons: - 💔 The training process might take days/weeks and bucks. - 💔 We most probably won't be able to collect hundreds of thousands of _labeled_ images of the books that have links in them (we might try to generate them though, but more about that later). So instead of creating a new model, we should better teach an existing object detection model to do the custom object detection for us (to do the [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning)). In our case, the "custom objects" would be the images with `https://` text drawn in them. This approach has the following benefits: - 💚 The dataset might be much smaller. We don't need to collect hundreds of thousands of the labeled images. Instead, we may do `~100` pictures and label them manually. This is because the model is already pre-trained on the general dataset like [COCO dataset](https://cocodataset.org/#home) and already learned how to extract general image features. - 💚 The training process will be much faster (minutes/hours on GPU instead of days/weeks). Again, this is because of a smaller dataset (smaller batches) and because of fewer trainable parameters. We may choose the existing model from [TensorFlow 2 Detection Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) which provides a collection of detection models pre-trained on the [COCO 2017 dataset](https://cocodataset.org/#home). Now it contains `~40` model variations to choose from. To re-train and fine-tune the model on the custom dataset we will use a [TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). The TensorFlow Object Detection API is an open-source framework built on top of [TensorFlow](https://www.tensorflow.org/) that makes it easy to construct, train, and deploy object detection models. If you follow the [Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) link you will find the _detection speed_ and _accuracy_ for each model. ![Model Zoo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/09-model-zoo.jpg) _Image source: [TensorFlow Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) repository_ Of course, we would want to find the right balance between the detection **speed** and **accuracy** while picking the model. But what might be even more important in our case is the **size** of the model since it will be loaded to the client-side. The size of the archived model might vary drastically from `~20Mb` to `~1Gb`. Here are several examples: - `1386 (Mb)` `centernet_hg104_1024x1024_kpts_coco17_tpu-32` - ` 330 (Mb)` `centernet_resnet101_v1_fpn_512x512_coco17_tpu-8` - ` 195 (Mb)` `centernet_resnet50_v1_fpn_512x512_coco17_tpu-8` - ` 198 (Mb)` `centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8` - ` 227 (Mb)` `centernet_resnet50_v2_512x512_coco17_tpu-8` - ` 230 (Mb)` `centernet_resnet50_v2_512x512_kpts_coco17_tpu-8` - ` 29 (Mb)` `efficientdet_d0_coco17_tpu-32` - ` 49 (Mb)` `efficientdet_d1_coco17_tpu-32` - ` 60 (Mb)` `efficientdet_d2_coco17_tpu-32` - ` 89 (Mb)` `efficientdet_d3_coco17_tpu-32` - ` 151 (Mb)` `efficientdet_d4_coco17_tpu-32` - ` 244 (Mb)` `efficientdet_d5_coco17_tpu-32` - ` 376 (Mb)` `efficientdet_d6_coco17_tpu-32` - ` 376 (Mb)` `efficientdet_d7_coco17_tpu-32` - ` 665 (Mb)` `extremenet` - ` 427 (Mb)` `faster_rcnn_inception_resnet_v2_1024x1024_coco17_tpu-8` - ` 424 (Mb)` `faster_rcnn_inception_resnet_v2_640x640_coco17_tpu-8` - ` 337 (Mb)` `faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8` - ` 337 (Mb)` `faster_rcnn_resnet101_v1_640x640_coco17_tpu-8` - ` 343 (Mb)` `faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8` - ` 449 (Mb)` `faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8` - ` 449 (Mb)` `faster_rcnn_resnet152_v1_640x640_coco17_tpu-8` - ` 454 (Mb)` `faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8` - ` 202 (Mb)` `faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8` - ` 202 (Mb)` `faster_rcnn_resnet50_v1_640x640_coco17_tpu-8` - ` 207 (Mb)` `faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8` - ` 462 (Mb)` `mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8` - ` 86 (Mb)` `ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8` - ` 44 (Mb)` `ssd_mobilenet_v2_320x320_coco17_tpu-8` - ` 20 (Mb)` `ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8` - ` 20 (Mb)` `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` - ` 369 (Mb)` `ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8` - ` 369 (Mb)` `ssd_resnet101_v1_fpn_640x640_coco17_tpu-8` - ` 481 (Mb)` `ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8` - ` 480 (Mb)` `ssd_resnet152_v1_fpn_640x640_coco17_tpu-8` - ` 233 (Mb)` `ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8` - ` 233 (Mb)` `ssd_resnet50_v1_fpn_640x640_coco17_tpu-8` The **`ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8`** model might be a good fit in our case: - 💚 It is relatively lightweight: `20Mb` archived. - 💚 It is pretty fast: `39ms` for the detection. - 💚 It uses the MobileNet v2 network as a feature extractor which is optimized for usage on mobile devices to reduce energy consumption. - 💚 It does the object detection for the whole image and for all objects in it **in one go** regardless of the image content (no [regions proposal](https://en.wikipedia.org/wiki/Region_Based_Convolutional_Neural_Networks) step is involved which makes the detection faster). - 💔 It is not the most accurate model though (everything is a tradeoff ⚖️). The model name encodes some several important characteristics that you may read more about if you want: - The expected image input size is `640x640px`. - The model implements [Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) (SSD) and [Feature Pyramid Network](https://arxiv.org/abs/1612.03144) (FPN). - [MobileNet v2](https://ai.googleblog.com/2018/04/mobilenetv2-next-generation-of-on.html) convolutional neural network ([CNN](https://en.wikipedia.org/wiki/Convolutional_neural_network)) is used as a feature extractor. - The model was trained on [COCO dataset](https://cocodataset.org/#home) ## 🛠 Installing Object Detection API In this article, we're going to install the Tensorflow 2 Object Detection API _as a Python package_. It is convenient in case if you're experimenting in [Google Colab](https://colab.research.google.com/) (recommended) or in [Jupyter](https://jupyter.org/try). For both cases no local installation is needed, you may experiment right in your browser. You may also follow the [official documentation](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2.md) if you would prefer to install Object Detection API via Docker. > If you stuck with something during the API installation or during the dataset preparation try to read through the [TensorFlow 2 Object Detection API tutorial](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/index.html) which adds a lot of useful details to this process. First, let's clone the [API repository](https://github.com/tensorflow/models): ```bash git clone --depth 1 https://github.com/tensorflow/models ``` _output →_ ``` Cloning into 'models'... remote: Enumerating objects: 2301, done. remote: Counting objects: 100% (2301/2301), done. remote: Compressing objects: 100% (2000/2000), done. remote: Total 2301 (delta 561), reused 922 (delta 278), pack-reused 0 Receiving objects: 100% (2301/2301), 30.60 MiB | 13.90 MiB/s, done. Resolving deltas: 100% (561/561), done. ``` Now, let's compile the [API proto files](https://github.com/tensorflow/models/tree/master/research/object_detection/protos) into Python files by using [protoc](https://grpc.io/docs/protoc-installation/) tool: ```bash cd ./models/research protoc object_detection/protos/*.proto --python_out=. ``` Finally, let's install the TF2 version of [setup.py](https://github.com/tensorflow/models/blob/master/research/object_detection/packages/tf2/setup.py) via `pip`: ```bash cp ./object_detection/packages/tf2/setup.py . pip install . --quiet ``` > It is possible that the last step will fail because of some dependency errors. In this case, you might want to run `pip install . --quiet` one more time. We may test that installation went successfully by running the following tests: ```bash python object_detection/builders/model_builder_tf2_test.py ``` You should see the logs that end with something similar to this: ``` [ OK ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor ---------------------------------------------------------------------- Ran 20 tests in 45.072s OK (skipped=1) ``` The TensorFlow Object Detection API is installed! You may now use the scripts that API provides for doing the model [inference](https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/inference_tf2_colab.ipynb), [training](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_training_and_evaluation.md) or [fine-tuning](https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb). ## ⬇️ Downloading the Pre-Trained Model Let's download our selected `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` model from the TensorFlow Model Zoo and check how it does the general object detection (detection of the objects of classes from COCO dataset like "cat", "dog", "car", etc.). We will use the [get_file()](https://www.tensorflow.org/api_docs/python/tf/keras/utils/get_file) TensorFlow helper to download the archived model from the URL and unpack it. ```python import tensorflow as tf import pathlib MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8' TF_MODELS_BASE_PATH = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/' CACHE_FOLDER = './cache' def download_tf_model(model_name, cache_folder): model_url = TF_MODELS_BASE_PATH + model_name + '.tar.gz' model_dir = tf.keras.utils.get_file( fname=model_name, origin=model_url, untar=True, cache_dir=pathlib.Path(cache_folder).absolute() ) return model_dir # Start the model download. model_dir = download_tf_model(MODEL_NAME, CACHE_FOLDER) print(model_dir) ``` _output →_ ``` /content/cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` Here is how the folder structure looks so far: ![Cache Folder](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/10-cache-folder.jpg) The `checkpoint` folder contains the snapshot of the pre-trained model. The `pipeline.config` file contains the detection settings of the model. We'll come back to this file later when we will need to fine-tune the model. ## 🏄🏻‍️ Trying the Model (Doing the Inference) For now, the model can detect the object of [90 COCO dataset classes](https://cocodataset.org/#explore) like a `car`, `bird`, `hot dog` etc. ![COCO classes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/11-coco-classes.jpg) _Image source: [COCO dataset](https://cocodataset.org/#explore) website_ Let's see how the model performs on some general images that contain the objects of these classes. ### Loading COCO labels Object Detection API already has a complete set of COCO labels (classes) defined for us. ```python import os # Import Object Detection API helpers. from object_detection.utils import label_map_util # Loads the COCO labels data (class names and indices relations). def load_coco_labels(): # Object Detection API already has a complete set of COCO classes defined for us. label_map_path = os.path.join( 'models/research/object_detection/data', 'mscoco_complete_label_map.pbtxt' ) label_map = label_map_util.load_labelmap(label_map_path) # Class ID to Class Name mapping. categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=label_map_util.get_max_label_map_index(label_map), use_display_name=True ) category_index = label_map_util.create_category_index(categories) # Class Name to Class ID mapping. label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True) return category_index, label_map_dict # Load COCO labels. coco_category_index, coco_label_map_dict = load_coco_labels() print('coco_category_index:', coco_category_index) print('coco_label_map_dict:', coco_label_map_dict) ``` _output →_ ``` coco_category_index: { 1: {'id': 1, 'name': 'person'}, 2: {'id': 2, 'name': 'bicycle'}, ... 90: {'id': 90, 'name': 'toothbrush'}, } coco_label_map_dict: { 'background': 0, 'person': 1, 'bicycle': 2, 'car': 3, ... 'toothbrush': 90, } ``` ### Build a detection function We need to create a detection function that will use the pre-trained model we've downloaded to do the object detection. ```python import tensorflow as tf # Import Object Detection API helpers. from object_detection.utils import config_util from object_detection.builders import model_builder # Generates the detection function for specific model and specific model's checkpoint def detection_fn_from_checkpoint(config_path, checkpoint_path): # Build the model. pipeline_config = config_util.get_configs_from_pipeline_file(config_path) model_config = pipeline_config['model'] model = model_builder.build( model_config=model_config, is_training=False, ) # Restore checkpoints. ckpt = tf.compat.v2.train.Checkpoint(model=model) ckpt.restore(checkpoint_path).expect_partial() # This is a function that will do the detection. @tf.function def detect_fn(image): image, shapes = model.preprocess(image) prediction_dict = model.predict(image, shapes) detections = model.postprocess(prediction_dict, shapes) return detections, prediction_dict, tf.reshape(shapes, [-1]) return detect_fn inference_detect_fn = detection_fn_from_checkpoint( config_path=os.path.join('cache', 'datasets', MODEL_NAME, 'pipeline.config'), checkpoint_path=os.path.join('cache', 'datasets', MODEL_NAME, 'checkpoint', 'ckpt-0'), ) ``` This `inference_detect_fn` function will accept an image and will return the detected objects' info. ### Loading the images for inference Let's try to detect the object on this image: ![General Object Inference](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/12-inference-01.jpg) _Image source: [oleksii_trekhleb](https://www.instagram.com/oleksii_trekhleb/?hl=en) Instagram_ To do that let's save the image to the `inference/test/` folder of our project. If you're using Google Colab you may create this folder and upload the image manually. Here is how the folder structure looks so far: ![Folder structure](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/14-inference-folders.jpg) ```python import matplotlib.pyplot as plt %matplotlib inline # Creating a TensorFlow dataset of just one image. inference_ds = tf.keras.preprocessing.image_dataset_from_directory( directory='inference', image_size=(640, 640), batch_size=1, shuffle=False, label_mode=None ) # Numpy version of the dataset. inference_ds_numpy = list(inference_ds.as_numpy_iterator()) # You may preview the images in dataset like this. plt.figure(figsize=(14, 14)) for i, image in enumerate(inference_ds_numpy): plt.subplot(2, 2, i + 1) plt.imshow(image[0].astype("uint8")) plt.axis("off") plt.show() ``` ### Running the detection on test data Now we're ready to run the detection. The `inference_ds_numpy[0]` array stores the pixel data for the first image in `Numpy` format. ```python detections, predictions_dict, shapes = inference_detect_fn( inference_ds_numpy[0] ) ``` Let's see the shapes of the output: ```python boxes = detections['detection_boxes'].numpy() scores = detections['detection_scores'].numpy() classes = detections['detection_classes'].numpy() num_detections = detections['num_detections'].numpy()[0] print('boxes.shape: ', boxes.shape) print('scores.shape: ', scores.shape) print('classes.shape: ', classes.shape) print('num_detections:', num_detections) ``` _output →_ ``` boxes.shape: (1, 100, 4) scores.shape: (1, 100) classes.shape: (1, 100) num_detections: 100.0 ``` The model has made a `100` detections for us. It doesn't mean that it found `100` objects on the image though. It means that the model has `100` slots, and it can detect `100` objects at max on a single image. Each detection has a score that represents the confidence of the model about it. The bounding boxes for each detection are stored in the `boxes` array. The scores or confidences of the model about each detection are stored in the `scores` array. Finally, the `classes` array stores the labels (classes) for each detection. Let's check the first 5 detections: ```python print('First 5 boxes:') print(boxes[0,:5]) print('First 5 scores:') print(scores[0,:5]) print('First 5 classes:') print(classes[0,:5]) class_names = [coco_category_index[idx + 1]['name'] for idx in classes[0]] print('First 5 class names:') print(class_names[:5]) ``` _output →_ ``` First 5 boxes: [[0.17576033 0.84654826 0.25642633 0.88327974] [0.5187813 0.12410264 0.6344235 0.34545377] [0.5220358 0.5181462 0.6329132 0.7669856 ] [0.50933677 0.7045719 0.5619138 0.7446198 ] [0.44761637 0.51942706 0.61237675 0.75963426]] First 5 scores: [0.6950246 0.6343004 0.591157 0.5827219 0.5415643] First 5 classes: [9. 8. 8. 0. 8.] First 5 class names: ['traffic light', 'boat', 'boat', 'person', 'boat'] ``` The model sees the `traffic light`, three `boats`, and a `person` on the image. We may confirm that indeed these objects are seen on the image. From the `scores` array may see that the model is most confident (close to 70% of probability) in the `traffic light` object. Each entry of `boxes` array is `[y1, x1, y2, x2]`, where `(x1, y1)` and `(x2, y2)` are the top-left and bottom-right corners of the bounding box. Let's visualize the detection boxes: ```python # Importing Object Detection API helpers. from object_detection.utils import visualization_utils # Visualizes the bounding boxes on top of the image. def visualize_detections(image_np, detections, category_index): label_id_offset = 1 image_np_with_detections = image_np.copy() visualization_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'][0].numpy(), (detections['detection_classes'][0].numpy() + label_id_offset).astype(int), detections['detection_scores'][0].numpy(), category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=.4, agnostic_mode=False, ) plt.figure(figsize=(12, 16)) plt.imshow(image_np_with_detections) plt.show() # Visualizing the detections. visualize_detections( image_np=tf.cast(inference_ds_numpy[0][0], dtype=tf.uint32).numpy(), detections=detections, category_index=coco_category_index, ) ``` Here is the output: ![Inference result](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/14-inference-results-01.jpg) If we will do the detection for the text image here is what we will see: ![Inference result for text image](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/15-inference-results-02.jpg) The model couldn't detect anything on this image. This is what we're going to change, we want to teach the model to "see" the `https://` prefixes on this image. ## 📝 Preparing the Custom Dataset To "teach" the `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` model to detect the custom objects which are _not_ a part of a COCO dataset we need to do the fine-tune training on a new custom dataset. The datasets for object detection consist of two parts: 1. The image itself (i.e. the image of the book page) 2. The boundary boxes that show where exactly on the image the custom objects are located. ![Bounding Boxes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/16-detection-boxes.jpg) In the example above each box has `left-top` and `right-bottom` coordinates in _absolute_ values (in pixels). However, there are also different formats of writing the location of the bounding boxes exists. For example, we may locate the bounding box by setting the coordinate of its `center point` and its `width` and `height`. We might also use _relative_ values (percentage of the width and height of the image) for setting up the coordinates. But you've got the idea, the network needs to know what the image is and where on the image the objects are located. Now, how can we get the custom dataset for training? We have three options here: 1. _Re-use_ the existing dataset. 2. _Generate_ a new dataset of fake book images. 3. _Create_ the dataset manually by taking or downloading the pictures of real book pages which contain `https://` links and labeling all bounding boxes. ### Option 1: Re-using the existing dataset There are plenty of the datasets that are shared to be re-used by researches. We could start from the following resources to find a proper dataset: - [Google Dataset Search](https://datasetsearch.research.google.com/) - [Kaggle Datasets](https://www.kaggle.com/datasets) - [awesome-public-datasets](https://github.com/awesomedata/awesome-public-datasets) repository - etc. 💚 If you could find the needed dataset and its license allows you to re-use it, it is probably the fastest way to get straight to the model training. 💔 I couldn't find the dataset with labeled `https://` prefixes though. So we need to skip this option. ### Option 2: Generating the synthetic dataset There are tools that exist (i.e. [keras_ocr](https://keras-ocr.readthedocs.io/en/latest/examples/end_to_end_training.html#generating-synthetic-data)) that might help us to generate random text, include the link in it, and draw it on images with some background and distortions. 💚 The cool part about this approach is that we have the freedom to generate training examples for different _fonts_, _ligatures_, _text colors_, _background colors_. This is very useful if we want to avoid the [model overfitting](https://en.wikipedia.org/wiki/Overfitting) during the training (so that the model could generalize well to unseen real-world examples instead of failing once the background shade is changed for a bit). 💚 It is also possible to generate a variety of link types like `http://`, `http://`, `ftp://`, `tcp://` etc. Otherwise, it might be hard to find enough real-world examples of this kind of links for training. 💚 Another benefit of this approach is that we could generate as many training examples as we want. We're not limited to the number of pages of the printed book we've found for the dataset. Increasing the number of training examples may also increase the accuracy of the model. 💔 It is possible though to misuse the generator and to generate the training images that will be quite different from real-world examples. Let's say we may use the wrong and unrealistic distortions for the page (i.e. using waves bend instead of the arc one). In this case, the model will not generalize well to real-world examples. > I see this approach as a really promising one. It may help to overcome many model issues (more on that below). I didn't try it yet though. But it might be a good candidate for another article. ### Option 3: Creating the dataset manually The most straightforward way though is to get the book (or books) and to make the pictures of the pages with the links and to label all of them manually. The good news is that the dataset might be pretty small (hundreds of images might be enough) because we're not going to train the model _from scratch_ but instead, we're going to do a [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning) (also see the [few-shot learning](https://paperswithcode.com/task/few-shot-learning).) 💚 In this case, the training dataset will be really close to real-world data. You will literally take the printed book, take a picture of it with realistic fonts, bends, shades, perspectives, and colors. 💔 Even though it doesn't require a lot of images it may still be time-consuming. 💔 It is hard to come up with a diverse database where training examples would have different fonts, background colors, and different types of links (we need to find many diverse books and magazines to accomplish that). Since the article has a learning purpose and since we're not trying to win an object detection competition let's go with this option for now and try to create a dataset by ourselves. ### Preprocessing the data So, I've ended up shooting `125` images of the book pages that contain one or more `https://` links on them. ![Raw Dataset](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/17-dataset-raw.jpg) I put all these images in the `dataset/printed_links/raw` folder. Next, I'm going to preprocess the images by doing the following: - **Resize** each image to the width of `1024px` (they are too big originally and have a width of `3024px`) - **Crop** each image to make them squared (this is optional, and we could just resize the image by simply squeezing it, but I want the model to be trained on realistic proportions of `https:` boxes). - **Rotate** image if needed by applying the [exif](https://en.wikipedia.org/wiki/Exif) metadata. - **Greyscale** the image (we don't need the model to take the colors into consideration). - **Increase brightness** - **Increase contrast** - **Increase sharpness** Remember, that once we've decided to apply these transformations and adjustments to the dataset we need to do the same in the future for each image that we will send to the model for detection. Here is how we could apply these adjustments to the image using Python: ```python import os import math import shutil from pathlib import Path from PIL import Image, ImageOps, ImageEnhance # Resize an image. def preprocess_resize(target_width): def preprocess(image: Image.Image, log) -> Image.Image: (width, height) = image.size ratio = width / height if width > target_width: target_height = math.floor(target_width / ratio) log(f'Resizing: To size {target_width}x{target_height}') image = image.resize((target_width, target_height)) else: log('Resizing: Image already resized, skipping...') return image return preprocess # Crop an image. def preprocess_crop_square(): def preprocess(image: Image.Image, log) -> Image.Image: (width, height) = image.size left = 0 top = 0 right = width bottom = height crop_size = min(width, height) if width >= height: # Horizontal image. log(f'Squre cropping: Horizontal {crop_size}x{crop_size}') left = width // 2 - crop_size // 2 right = left + crop_size else: # Vetyical image. log(f'Squre cropping: Vertical {crop_size}x{crop_size}') top = height // 2 - crop_size // 2 bottom = top + crop_size image = image.crop((left, top, right, bottom)) return image return preprocess # Apply exif transpose to an image. def preprocess_exif_transpose(): # @see: https://pillow.readthedocs.io/en/stable/reference/ImageOps.html def preprocess(image: Image.Image, log) -> Image.Image: log('EXif transpose') image = ImageOps.exif_transpose(image) return image return preprocess # Apply color transformations to the image. def preprocess_color(brightness, contrast, color, sharpness): # @see: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html def preprocess(image: Image.Image, log) -> Image.Image: log('Coloring') enhancer = ImageEnhance.Color(image) image = enhancer.enhance(color) enhancer = ImageEnhance.Brightness(image) image = enhancer.enhance(brightness) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(contrast) enhancer = ImageEnhance.Sharpness(image) image = enhancer.enhance(sharpness) return image return preprocess # Image pre-processing pipeline. def preprocess_pipeline(src_dir, dest_dir, preprocessors=[], files_num_limit=0, override=False): # Create destination folder if not exists. Path(dest_dir).mkdir(parents=False, exist_ok=True) # Get the list of files to be copied. src_file_names = os.listdir(src_dir) files_total = files_num_limit if files_num_limit > 0 else len(src_file_names) files_processed = 0 # Logger function. def preprocessor_log(message): print(' ' + message) # Iterate through files. for src_file_index, src_file_name in enumerate(src_file_names): if files_num_limit > 0 and src_file_index >= files_num_limit: break # Copy file. src_file_path = os.path.join(src_dir, src_file_name) dest_file_path = os.path.join(dest_dir, src_file_name) progress = math.floor(100 * (src_file_index + 1) / files_total) print(f'Image {src_file_index + 1}/{files_total} | {progress}% | {src_file_path}') if not os.path.isfile(src_file_path): preprocessor_log('Source is not a file, skipping...\n') continue if not override and os.path.exists(dest_file_path): preprocessor_log('File already exists, skipping...\n') continue shutil.copy(src_file_path, dest_file_path) files_processed += 1 # Preprocess file. image = Image.open(dest_file_path) for preprocessor in preprocessors: image = preprocessor(image, preprocessor_log) image.save(dest_file_path, quality=95) print('') print(f'{files_processed} out of {files_total} files have been processed') # Launching the image preprocessing pipeline. preprocess_pipeline( src_dir='dataset/printed_links/raw', dest_dir='dataset/printed_links/processed', override=True, # files_num_limit=1, preprocessors=[ preprocess_exif_transpose(), preprocess_resize(target_width=1024), preprocess_crop_square(), preprocess_color(brightness=2, contrast=1.3, color=0, sharpness=1), ] ) ``` As a result, all processed images were saved to the `dataset/printed_links/processed` folder. ![Dataset Processed](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/18-dataset-processed.jpg) You may preview the images like this: ```python import matplotlib.pyplot as plt import numpy as np def preview_images(images_dir, images_num=1, figsize=(15, 15)): image_names = os.listdir(images_dir) image_names = image_names[:images_num] num_cells = math.ceil(math.sqrt(images_num)) figure = plt.figure(figsize=figsize) for image_index, image_name in enumerate(image_names): image_path = os.path.join(images_dir, image_name) image = Image.open(image_path) figure.add_subplot(num_cells, num_cells, image_index + 1) plt.imshow(np.asarray(image)) plt.show() preview_images('dataset/printed_links/processed', images_num=4, figsize=(16, 16)) ``` ### Labeling the dataset To do the labeling (to mark the locations of the objects that we're interested in, namely the `https://` prefixes) we may use the [LabelImg](https://github.com/tzutalin/labelImg) graphical image annotation tool. > For this step you might want to install the LabelImg tool on your local machine (not in Colab). You may find the detailed installation instructions in [LabelImg README](https://github.com/tzutalin/labelImg). Once you have LabelImg tool installed you may launch it for the `dataset/printed_links/processed` folder from the root of your project like this: ```bash labelImg dataset/printed_links/processed ``` Then you'll need to label all the images from the `dataset/printed_links/processed` folder and save annotations as XML files to `dataset/printed_links/labels/xml/` folder. ![Labeling](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/18-labeling.jpg) ![Labeling Process](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/19-labeling-process.gif) After the labeling we should have an XML file with bounding boxes data for each image: ![Labels folder structure](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/20-labels-folder.jpg) ### Splitting the dataset into train, test, and validation subsets To identify the model's [overfitting or underfitting](https://en.wikipedia.org/wiki/Overfitting) issue we need to split the dataset into `train` and `test` dataset. Let's say `80%` of our images will be used to train the model and `20%` of the images will be used to check how well the model generalizes to the images that it didn't see before. > In this section we'll do the files splitting by copying them into different folders (`test` and `train` folders). However, this might not be the most optimal way. Instead, the splitting of the dataset may be done on [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) level. ```python import re import random def partition_dataset( images_dir, xml_labels_dir, train_dir, test_dir, val_dir, train_ratio, test_ratio, val_ratio, copy_xml ): if not os.path.exists(train_dir): os.makedirs(train_dir) if not os.path.exists(test_dir): os.makedirs(test_dir) if not os.path.exists(val_dir): os.makedirs(val_dir) images = [f for f in os.listdir(images_dir) if re.search(r'([a-zA-Z0-9\s_\\.\-\(\):])+(.jpg|.jpeg|.png)$', f, re.IGNORECASE)] num_images = len(images) num_train_images = math.ceil(train_ratio * num_images) num_test_images = math.ceil(test_ratio * num_images) num_val_images = math.ceil(val_ratio * num_images) print('Intended split') print(f' train: {num_train_images}/{num_images} images') print(f' test: {num_test_images}/{num_images} images') print(f' val: {num_val_images}/{num_images} images') actual_num_train_images = 0 actual_num_test_images = 0 actual_num_val_images = 0 def copy_random_images(num_images, dest_dir): copied_num = 0 if not num_images: return copied_num for i in range(num_images): if not len(images): break idx = random.randint(0, len(images)-1) filename = images[idx] shutil.copyfile(os.path.join(images_dir, filename), os.path.join(dest_dir, filename)) if copy_xml: xml_filename = os.path.splitext(filename)[0]+'.xml' shutil.copyfile(os.path.join(xml_labels_dir, xml_filename), os.path.join(dest_dir, xml_filename)) images.remove(images[idx]) copied_num += 1 return copied_num actual_num_train_images = copy_random_images(num_train_images, train_dir) actual_num_test_images = copy_random_images(num_test_images, test_dir) actual_num_val_images = copy_random_images(num_val_images, val_dir) print('\n', 'Actual split') print(f' train: {actual_num_train_images}/{num_images} images') print(f' test: {actual_num_test_images}/{num_images} images') print(f' val: {actual_num_val_images}/{num_images} images') partition_dataset( images_dir='dataset/printed_links/processed', train_dir='dataset/printed_links/partitioned/train', test_dir='dataset/printed_links/partitioned/test', val_dir='dataset/printed_links/partitioned/val', xml_labels_dir='dataset/printed_links/labels/xml', train_ratio=0.8, test_ratio=0.2, val_ratio=0, copy_xml=True ) ``` After splitting your dataset folder structure should look similar to this: ``` dataset/ └── printed_links ├── labels │ └── xml ├── partitioned │ ├── test │ └── train │ ├── IMG_9140.JPG │ ├── IMG_9140.xml │ ├── IMG_9141.JPG │ ├── IMG_9141.xml │ ... ├── processed └── raw ``` ### Exporting the dataset The last manipulation we should do with the data is to convert our datasets into [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) format. The `TFRecord` format is a format that TensorFlow is using for storing a sequence of binary records. First, let's create two folders: one is for the labels in `CSV` format, and the other one is for the final dataset in `TFRecord` format. ```bash mkdir -p dataset/printed_links/labels/csv mkdir -p dataset/printed_links/tfrecords ``` Now we need to create a `dataset/printed_links/labels/label_map.pbtxt` proto file that will describe the classes of the objects in our dataset. In our case, we only have _one class_ which we may call `http`. Here is the content of this file: ``` item { id: 1 name: 'http' } ``` Now we're ready to generate the TFRecord datasets out of images in `jpg` format and labels in `xml` format: ```python import os import io import math import glob import tensorflow as tf import pandas as pd import xml.etree.ElementTree as ET from PIL import Image from collections import namedtuple from object_detection.utils import dataset_util, label_map_util tf1 = tf.compat.v1 # Convers labels from XML format to CSV. def xml_to_csv(path): xml_list = [] for xml_file in glob.glob(path + '/*.xml'): tree = ET.parse(xml_file) root = tree.getroot() for member in root.findall('object'): value = (root.find('filename').text, int(root.find('size')[0].text), int(root.find('size')[1].text), member[0].text, int(member[4][0].text), int(member[4][1].text), int(member[4][2].text), int(member[4][3].text) ) xml_list.append(value) column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'] xml_df = pd.DataFrame(xml_list, columns=column_name) return xml_df def class_text_to_int(row_label, label_map_dict): return label_map_dict[row_label] def split(df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] # Creates a TFRecord. def create_tf_example(group, path, label_map_dict): with tf1.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(class_text_to_int(row['class'], label_map_dict)) tf_example = tf1.train.Example(features=tf1.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example def dataset_to_tfrecord( images_dir, xmls_dir, label_map_path, output_path, csv_path=None ): label_map = label_map_util.load_labelmap(label_map_path) label_map_dict = label_map_util.get_label_map_dict(label_map) tfrecord_writer = tf1.python_io.TFRecordWriter(output_path) images_path = os.path.join(images_dir) csv_examples = xml_to_csv(xmls_dir) grouped_examples = split(csv_examples, 'filename') for group in grouped_examples: tf_example = create_tf_example(group, images_path, label_map_dict) tfrecord_writer.write(tf_example.SerializeToString()) tfrecord_writer.close() print('Successfully created the TFRecord file: {}'.format(output_path)) if csv_path is not None: csv_examples.to_csv(csv_path, index=None) print('Successfully created the CSV file: {}'.format(csv_path)) # Generate a TFRecord for train dataset. dataset_to_tfrecord( images_dir='dataset/printed_links/partitioned/train', xmls_dir='dataset/printed_links/partitioned/train', label_map_path='dataset/printed_links/labels/label_map.pbtxt', output_path='dataset/printed_links/tfrecords/train.record', csv_path='dataset/printed_links/labels/csv/train.csv' ) # Generate a TFRecord for test dataset. dataset_to_tfrecord( images_dir='dataset/printed_links/partitioned/test', xmls_dir='dataset/printed_links/partitioned/test', label_map_path='dataset/printed_links/labels/label_map.pbtxt', output_path='dataset/printed_links/tfrecords/test.record', csv_path='dataset/printed_links/labels/csv/test.csv' ) ``` As a result we should now have two files: `test.record` and `train.record` in `dataset/printed_links/tfrecords/` folder: ``` dataset/ └── printed_links ├── labels │ ├── csv │ ├── label_map.pbtxt │ └── xml ├── partitioned │ ├── test │ ├── train │ └── val ├── processed ├── raw └── tfrecords ├── test.record └── train.record ``` These two files `test.record` and `train.record` are our final datasets that we will use to fine-tune the `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` model. ## 📖 Exploring the TFRecord Datasets In this section, we will see how we may use the TensorFlow 2 Object Detection API to explore the datasets in `TFRecord` format. **Checking the number of items in a dataset** To count the number of items in the dataset we may do the following: ```python import tensorflow as tf # Count the number of examples in the dataset. def count_tfrecords(tfrecords_filename): raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) # Keep in mind that the list() operation might be # a performance bottleneck for large datasets. return len(list(raw_dataset)) TRAIN_RECORDS_NUM = count_tfrecords('dataset/printed_links/tfrecords/train.record') TEST_RECORDS_NUM = count_tfrecords('dataset/printed_links/tfrecords/test.record') print('TRAIN_RECORDS_NUM: ', TRAIN_RECORDS_NUM) print('TEST_RECORDS_NUM: ', TEST_RECORDS_NUM) ``` _output →_ ``` TRAIN_RECORDS_NUM: 100 TEST_RECORDS_NUM: 25 ``` So we will train the model on `100` examples, and we will check the model accuracy on `25` test images. **Previewing the dataset images with bounding boxes** To preview images with detection boxes we may do the following: ```python import tensorflow as tf import numpy as np from google.protobuf import text_format import matplotlib.pyplot as plt # Import Object Detection API. from object_detection.utils import visualization_utils from object_detection.protos import string_int_label_map_pb2 from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder %matplotlib inline # Visualize the TFRecord dataset. def visualize_tfrecords(tfrecords_filename, label_map=None, print_num=1): decoder = TfExampleDecoder( label_map_proto_file=label_map, use_display_name=False ) if label_map is not None: label_map_proto = string_int_label_map_pb2.StringIntLabelMap() with tf.io.gfile.GFile(label_map,'r') as f: text_format.Merge(f.read(), label_map_proto) class_dict = {} for entry in label_map_proto.item: class_dict[entry.id] = {'name': entry.name} raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) for raw_record in raw_dataset.take(print_num): example = decoder.decode(raw_record) image = example['image'].numpy() boxes = example['groundtruth_boxes'].numpy() confidences = example['groundtruth_image_confidences'] filename = example['filename'] area = example['groundtruth_area'] classes = example['groundtruth_classes'].numpy() image_classes = example['groundtruth_image_classes'] weights = example['groundtruth_weights'] scores = np.ones(boxes.shape[0]) visualization_utils.visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, class_dict, max_boxes_to_draw=None, use_normalized_coordinates=True ) plt.figure(figsize=(8, 8)) plt.imshow(image) plt.show() # Visualizing the training TFRecord dataset. visualize_tfrecords( tfrecords_filename='dataset/printed_links/tfrecords/train.record', label_map='dataset/printed_links/labels/label_map.pbtxt', print_num=3 ) ``` As a result, we should see several images with bounding boxes drawn on top of each image. ![TFRecord Preview](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/23-tfrecords-preview.jpg) ## 📈 Setting Up TensorBoard Before starting the training process we need to launch a [TensorBoard](https://www.tensorflow.org/tensorboard). TensorBoard will allow us to monitor the training process and see if the model is actually learning something or should we better stop the training and adjust training parameters. It will also help us to analyze what objects and at what location the model is detecting. ![TensorBoard](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/24-tensorboard.gif) _Image source: [TensorBoard homepage](https://www.tensorflow.org/tensorboard)_ The cool part about TensorBoard is that we may run it directly in Google Colab. However, if you're running the notebook in your local installation of Jupyter you may also [install it as Python package](https://github.com/tensorflow/tensorboard/blob/master/README.md) and launch it from the terminal. First, let's create a `./logs` folder where all training logs will be written: ```bash mkdir -p logs ``` Next, we may load the TensorBoard extension on Google Colab: ``` %load_ext tensorboard ``` And finally we may launch a TensorBoard to monitor the `./logs` folder: ``` %tensorboard --logdir ./logs ``` As a result, you should see the empty TensorBoard panel: ![Empty TensorBoard Panel](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/25-tensorboard-launch.jpg) After the model training is be started you may get back to this panel and see the training process progress. ## 🏋🏻‍️ Model Training ### Configuring the Detection Pipeline Now it's time to get back to the `cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config` file that we've mentioned earlier. This file defines the parameters of `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` model training. We need to copy the `pipeline.config` file to the root of the project and adjust a couple of things in it: 1. We should change the **number of classes** from `90` (the COCO classes) to just `1` (the `http` class). 2. We should reduce the **batch size** to `8` to avoid the errors that are connected to the insufficient memory. 3. We need to point the model to its **checkpoints** since we don't want to train the model from scratch. 4. We need to change the `fine_tune_checkpoint_type` to `detection`. 5. We need to point the model to a proper **labels map**. 6. Lastly, we need to pint the model to the **train and test datasets**. All these changes may be done manually directly in `pipeline.config` file. But we may also do them through code: ```python import tensorflow as tf from shutil import copyfile from google.protobuf import text_format from object_detection.protos import pipeline_pb2 # Adjust pipeline config modification here if needed. def modify_config(pipeline): # Model config. pipeline.model.ssd.num_classes = 1 # Train config. pipeline.train_config.batch_size = 8 pipeline.train_config.fine_tune_checkpoint = 'cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0' pipeline.train_config.fine_tune_checkpoint_type = 'detection' # Train input reader config. pipeline.train_input_reader.label_map_path = 'dataset/printed_links/labels/label_map.pbtxt' pipeline.train_input_reader.tf_record_input_reader.input_path[0] = 'dataset/printed_links/tfrecords/train.record' # Eval input reader config. pipeline.eval_input_reader[0].label_map_path = 'dataset/printed_links/labels/label_map.pbtxt' pipeline.eval_input_reader[0].tf_record_input_reader.input_path[0] = 'dataset/printed_links/tfrecords/test.record' return pipeline def clone_pipeline_config(): copyfile( 'cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config', 'pipeline.config' ) def setup_pipeline(pipeline_config_path): clone_pipeline_config() pipeline = read_pipeline_config(pipeline_config_path) pipeline = modify_config(pipeline) write_pipeline_config(pipeline_config_path, pipeline) return pipeline def read_pipeline_config(pipeline_config_path): pipeline = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(pipeline_config_path, "r") as f: proto_str = f.read() text_format.Merge(proto_str, pipeline) return pipeline def write_pipeline_config(pipeline_config_path, pipeline): config_text = text_format.MessageToString(pipeline) with tf.io.gfile.GFile(pipeline_config_path, "wb") as f: f.write(config_text) # Adjusting the pipeline configuration. pipeline = setup_pipeline('pipeline.config') print(pipeline) ``` Here is the content of the `pipeline.config` file: ``` model { ssd { num_classes: 1 image_resizer { fixed_shape_resizer { height: 640 width: 640 } } feature_extractor { type: "ssd_mobilenet_v2_fpn_keras" depth_multiplier: 1.0 min_depth: 16 conv_hyperparams { regularizer { l2_regularizer { weight: 3.9999998989515007e-05 } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.009999999776482582 } } activation: RELU_6 batch_norm { decay: 0.996999979019165 scale: true epsilon: 0.0010000000474974513 } } use_depthwise: true override_base_feature_extractor_hyperparams: true fpn { min_level: 3 max_level: 7 additional_layer_depth: 128 } } box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true use_matmul_gather: true } } similarity_calculator { iou_similarity { } } box_predictor { weight_shared_convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { weight: 3.9999998989515007e-05 } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.009999999776482582 } } activation: RELU_6 batch_norm { decay: 0.996999979019165 scale: true epsilon: 0.0010000000474974513 } } depth: 128 num_layers_before_predictor: 4 kernel_size: 3 class_prediction_bias_init: -4.599999904632568 share_prediction_tower: true use_depthwise: true } } anchor_generator { multiscale_anchor_generator { min_level: 3 max_level: 7 anchor_scale: 4.0 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 scales_per_octave: 2 } } post_processing { batch_non_max_suppression { score_threshold: 9.99999993922529e-09 iou_threshold: 0.6000000238418579 max_detections_per_class: 100 max_total_detections: 100 use_static_shapes: false } score_converter: SIGMOID } normalize_loss_by_num_matches: true loss { localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_sigmoid_focal { gamma: 2.0 alpha: 0.25 } } classification_weight: 1.0 localization_weight: 1.0 } encode_background_as_zeros: true normalize_loc_loss_by_codesize: true inplace_batchnorm_update: true freeze_batchnorm: false } } train_config { batch_size: 8 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { random_crop_image { min_object_covered: 0.0 min_aspect_ratio: 0.75 max_aspect_ratio: 3.0 min_area: 0.75 max_area: 1.0 overlap_thresh: 0.0 } } sync_replicas: true optimizer { momentum_optimizer { learning_rate { cosine_decay_learning_rate { learning_rate_base: 0.07999999821186066 total_steps: 50000 warmup_learning_rate: 0.026666000485420227 warmup_steps: 1000 } } momentum_optimizer_value: 0.8999999761581421 } use_moving_average: false } fine_tune_checkpoint: "cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0" num_steps: 50000 startup_delay_steps: 0.0 replicas_to_aggregate: 8 max_number_of_boxes: 100 unpad_groundtruth_tensors: false fine_tune_checkpoint_type: "detection" fine_tune_checkpoint_version: V2 } train_input_reader { label_map_path: "dataset/printed_links/labels/label_map.pbtxt" tf_record_input_reader { input_path: "dataset/printed_links/tfrecords/train.record" } } eval_config { metrics_set: "coco_detection_metrics" use_moving_averages: false } eval_input_reader { label_map_path: "dataset/printed_links/labels/label_map.pbtxt" shuffle: false num_epochs: 1 tf_record_input_reader { input_path: "dataset/printed_links/tfrecords/test.record" } } ``` ### Launching the training process We're ready now to launch a training process using the TensorFlow 2 Object Detection API. The API contains a [model_main_tf2.py](https://github.com/tensorflow/models/blob/master/research/object_detection/model_main_tf2.py) script that will run training for us. Feel free to explore the flags that this Python script supports in the source-code (i.e. `num_train_steps`, `model_dir` and others) to see their meanings. We will be training the model for `1000` iterations (epochs). Feel free to train it for a smaller or larger number of iterations depending on the learning progress (see the TensorBoard charts). ```bash %%bash NUM_TRAIN_STEPS=1000 CHECKPOINT_EVERY_N=1000 PIPELINE_CONFIG_PATH=pipeline.config MODEL_DIR=./logs SAMPLE_1_OF_N_EVAL_EXAMPLES=1 python ./models/research/object_detection/model_main_tf2.py \ --model_dir=$MODEL_DIR \ --num_train_steps=$NUM_TRAIN_STEPS \ --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ --pipeline_config_path=$PIPELINE_CONFIG_PATH \ --checkpoint_every_n=$CHECKPOINT_EVERY_N \ --alsologtostderr ``` While the model is training (it may take around`~10 minutes` for `1000` iterations in [GoogleColab GPU](https://colab.research.google.com/notebooks/gpu.ipynb) runtime) you should be able to observe the training progress in TensorBoard. The `localization` and `classification` losses should decrease which means that the model is doing a good job in localizing and classifying new custom objects. ![Training Process](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/26-tensorboard-training.jpg) Also during the training, the new model checkpoints (parameters that the model has learned during the training) will be saved to the `logs` folder. The `logs` folder structure now looks like this: ``` logs ├── checkpoint ├── ckpt-1.data-00000-of-00001 ├── ckpt-1.index └── train └── events.out.tfevents.1606560330.b314c371fa10.1747.1628.v2 ``` ### Evaluating the Model (Optional) The evaluation process uses the trained model checkpoints and evaluates how well the model performs in detecting objects in the test dataset. The results of this evaluation are summarised in the form of some [metrics](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/evaluation_protocols.md), which can be examined over time. You may read more about how to evaluate these metrics [here](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/training.html#evaluating-the-model-optional). We will skip the metrics evaluation step in this article. But we may still use the evaluation step to see the model's detections in TensorBoard: ```bash %%bash PIPELINE_CONFIG_PATH=pipeline.config MODEL_DIR=logs python ./models/research/object_detection/model_main_tf2.py \ --model_dir=$MODEL_DIR \ --pipeline_config_path=$PIPELINE_CONFIG_PATH \ --checkpoint_dir=$MODEL_DIR \ ``` After launching the script you should be able to see several side-by-side images with detections boxes: ![Model Evaluation](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/27-tensorboard-evaluation.jpg) ## 🗜 Exporting the Model Once the training process is complete we should save the trained model for further usage. To export the model we will use the [exporter_main_v2.py](https://github.com/tensorflow/models/blob/master/research/object_detection/exporter_main_v2.py) script from Object Detection API. It prepares an object detection TensorFlow graph for inference using model configuration and a trained checkpoint. The script outputs associated checkpoint files, a SavedModel, and a copy of the model config: ```bash %%bash python ./models/research/object_detection/exporter_main_v2.py \ --input_type=image_tensor \ --pipeline_config_path=pipeline.config \ --trained_checkpoint_dir=logs \ --output_directory=exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` Here is what the `exported` folder contains after the export: ``` exported └── ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ├── checkpoint │ ├── checkpoint │ ├── ckpt-0.data-00000-of-00001 │ └── ckpt-0.index ├── pipeline.config └── saved_model ├── assets ├── saved_model.pb └── variables ├── variables.data-00000-of-00001 └── variables.index ``` At this moment we have a `saved_model` that may be used for inference. ## 🚀 Using the Exported Model Let's see how can we use the saved model from the previous step for object detections. First, we need to create a detection function that will use the saved model. It will accept the image and will output the detected objects: ```python import time import math PATH_TO_SAVED_MODEL = 'exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model' def detection_function_from_saved_model(saved_model_path): print('Loading saved model...', end='') start_time = time.time() # Load saved model and build the detection function detect_fn = tf.saved_model.load(saved_model_path) end_time = time.time() elapsed_time = end_time - start_time print('Done! Took {} seconds'.format(math.ceil(elapsed_time))) return detect_fn exported_detect_fn = detection_function_from_saved_model( PATH_TO_SAVED_MODEL ) ``` _output →_ ``` Loading saved model...Done! Took 9 seconds ``` To map the IDs of the detected classes back to the class names we need to load the label map as well: ```python from object_detection.utils import label_map_util category_index = label_map_util.create_category_index_from_labelmap( 'dataset/printed_links/labels/label_map.pbtxt', use_display_name=True ) print(category_index) ``` _output →_ ``` {1: {'id': 1, 'name': 'http'}} ``` Testing the model on a test dataset. ```python import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from object_detection.utils import visualization_utils from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder %matplotlib inline def tensors_from_tfrecord( tfrecords_filename, tfrecords_num, dtype=tf.float32 ): decoder = TfExampleDecoder() raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) images = [] for raw_record in raw_dataset.take(tfrecords_num): example = decoder.decode(raw_record) image = example['image'] image = tf.cast(image, dtype=dtype) images.append(image) return images def test_detection(tfrecords_filename, tfrecords_num, detect_fn): image_tensors = tensors_from_tfrecord( tfrecords_filename, tfrecords_num, dtype=tf.uint8 ) for image_tensor in image_tensors: image_np = image_tensor.numpy() # The model expects a batch of images, so add an axis with `tf.newaxis`. input_tensor = tf.expand_dims(image_tensor, 0) detections = detect_fn(input_tensor) # All outputs are batches tensors. # Convert to numpy arrays, and take index [0] to remove the batch dimension. # We're only interested in the first num_detections. num_detections = int(detections.pop('num_detections')) detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()} detections['num_detections'] = num_detections # detection_classes should be ints. detections['detection_classes'] = detections['detection_classes'].astype(np.int64) image_np_with_detections = image_np.astype(int).copy() visualization_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'], detections['detection_classes'], detections['detection_scores'], category_index, use_normalized_coordinates=True, max_boxes_to_draw=100, min_score_thresh=.3, agnostic_mode=False ) plt.figure(figsize=(8, 8)) plt.imshow(image_np_with_detections) plt.show() test_detection( tfrecords_filename='dataset/printed_links/tfrecords/test.record', tfrecords_num=10, detect_fn=exported_detect_fn ) ``` As a result, you should see `10` images from the test dataset and highlighted `https:` prefixes that were detected by the model: ![Testing the model on a test dataset](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/28-testing-the-model.jpg) The fact that the model is able to detect custom objects (in our case the `https://` prefixes) on the images it hasn't seen before is a good sign and something that we wanted to achieve. ## 🗜 Converting the Model for Web As you remember from the beginning of this article, our goal was to use the custom object detection model in the browser. Luckily, there is a [TensorFlow.js](https://www.tensorflow.org/js) JavaScript version of the TensorFlow library exists. In JavaScript, we can't work with our saved model directly. Instead, we need to convert it to [tfjs_graph_model](https://www.tensorflow.org/js/tutorials/conversion/import_saved_model) format. To do this we need to install the tensorflowjs Python package: ```bash pip install tensorflowjs --quiet ``` The model may be exported like this: ```bash %%bash tensorflowjs_converter \ --input_format=tf_saved_model \ --output_format=tfjs_graph_model \ exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model \ exported_web/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` The `exported_web` folder contains the `.json` file with the model metadata and a bunch of `.bin` files with trained model parameters: ``` exported_web └── ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ├── group1-shard1of4.bin ├── group1-shard2of4.bin ├── group1-shard3of4.bin ├── group1-shard4of4.bin └── model.json ``` Finally, we have the model that is able to detect `https://` prefixes for us, and it is saved in JavaScript-understandable format. Let's check the model size to see if it is light enough to be loaded completely to the client-side: ```python import pathlib def get_folder_size(folder_path): mB = 1000000 root_dir = pathlib.Path(folder_path) sizeBytes = sum(f.stat().st_size for f in root_dir.glob('**/*') if f.is_file()) return f'{sizeBytes//mB} MB' print(f'Original model size: {get_folder_size("cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') print(f'Exported model size: {get_folder_size("exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') print(f'Exported WEB model size: {get_folder_size("exported_web/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') ``` _output →_ ``` Original model size: 31 MB Exported model size: 28 MB Exported WEB model size: 13 MB ``` As you may see the model that we're going to use for the Web has `13MB` which is quite acceptable in our case. Later in JavaScript we may start using the model like this: ```javascript import * as tf from '@tensorflow/tfjs'; const model = await tf.loadGraphModel(modelURL); ``` > 🧭 The next step is to implement the Links Detector UI which will use this model, but this is another story for another article. The final source code of the application may be found in [links-detector repository](https://github.com/trekhleb/links-detector) on GitHub. ## 🤔 Conclusions In this article, we started to solve the issue with printed links detection. We ended up creating the custom object detector to recognize the `https://` prefixes on text images (i.e. on smartphone camera stream images). We have also converted the model to a `tfjs_graph_model` to be able to re-use it on the client-side. You may 🚀 [**launch Links Detector demo**](https://trekhleb.github.io/links-detector/) from your smartphone to see the final result and to try how the model performs on your books or magazines. Here is how the final solution looks like: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) You may also 📝 [**browse the links-detector repository**](https://github.com/trekhleb/links-detector) on GitHub to see the complete source code of the UI part of the application. > ⚠️ Currently the application is in _experimental_ _Alpha_ stage and has [many issues and limitations](https://github.com/trekhleb/links-detector/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement). So don't raise your expectations level too high until these issues are resolved 🤷🏻‍. As the next steps which might improve the model performance we might do the following: - Extend the dataset with more link types (`http://`, `tcp://`, `ftp://` etc) - Extended the dataset with images that have dark backgrounds - Extend the dataset with underlined links - Extend the dataset with examples of different fonts and ligatures - etc. Even though the model has a lot to be improved to make it closer to the production-ready state, I still hope that this article was useful for you and gave you some guidelines and inspiration to play around with your custom object detectors. Happy training, folks! ================================================ FILE: articles/printed_links_detection/printed_links_detection.ru.md ================================================ # 📖 👆🏻 Делаем печатные ссылки кликабельными с помощью TensorFlow 2 Object Detection API ![Links Detector Cover](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/01-banner.png) ## 📃 TL;DR _В этой статье мы начнем решать проблему того, как сделать печатные ссылки в книгах или журналах кликабельными используя камеру смартфона._ С помощью [TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) мы научим TensorFlow модель находить позиции и габариты строк `https://` в изображениях (например в каждом кадре видео из камеры смартфона). Текст каждой ссылки, расположенный по правую сторону от `https://`, будет распознан с помощью библиотеки [Tesseract](https://tesseract.projectnaptha.com/). Работа с библиотекой Tesseract не является предметом этой статьи, но вы можете найти полный исходный код приложения в репозитории [links-detector repository](https://github.com/trekhleb/links-detector) на GitHub. > 🚀 [**Запустить Links Detector**](https://trekhleb.github.io/links-detector/) со смартфона, чтобы увидеть конечный результат. > 📝 [**Открыть репозиторий links-detector**](https://github.com/trekhleb/links-detector) на GitHub с полным исходным кодом приложения. Вот так в итоге будет выглядеть процесс распознавания печатных ссылок: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) > ⚠️ На данный момент приложение находится в _экспериментальной_ стадии и имеет [множество недоработок и ограничений](https://github.com/trekhleb/links-detector/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement). Поэтому, до тех пор, пока вышеуказанные недоработки не будут ликвидированы, не ожидайте от приложения слишком многого 🤷🏻‍. Также стоит отметить, что целью данной статьи является экспериментирование с TensorFlow 2 Object Detection API, а не создание production-ready приложения. > В случае, если блоки с исходным кодом в этой статье будут отображаться без подсветки кода вы можете [перейти на GitHub версию этой статьи](https://github.com/trekhleb/links-detector/blob/master/articles/printed_links_detection/printed_links_detection.ru.md) ## 🤷🏻‍️ Проблема Я работаю программистом, и в свободное от работы время учу Machine Learning в качестве хобби. Но проблема не в этом. Я купил книгу по машинному обучению и, читая первые главы, столкнулся с множеством печатных ссылок на подобии `https://tensorflow.org/` или `https://some-url.com/which/may/be/even/longer?and_with_params=true`. ![Printed Links](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/02-printed-links.jpg) К сожалению, кликать по печатным ссылкам не представлялось возможным (спасибо, Кэп!). Чтобы открыть ссылки в браузере мне приходилось набирать их посимвольно в адресной строке, что было довольно медленно. К тому же опечатки никто не отменял. ## 💡 Возможное решение Я подумал, а что если, по аналогии с распознавателем QR кодов, мы "научим" смартфон _(1)_ _определять местоположение_ и _(2)_ _распознавать_ печатные гипер-ссылки и делать их кликабельными? В таком случае читатель делал бы всего один клик вместо посимвольного ввода с множеством нажатий на клавиши. Операционная сложность всей этой операции уменьшилась бы с `O(N)` до `O(1)`. Вот так бы этот процесс выглядел: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) ## 📝 Требования к решению Как я уже упомянул выше, я не эксперт в машинном обучении. Для меня это больше как хобби. Поэтому и цель этой статьи заключается больше в _экспериментировании_ и _обучении_ работе с TensorFlow 2 Object Detection API, чем в попытке создания production-ready приложения. С учетом вышесказанного, я упростил требования к финальному решению и свел их к следующим пунктам: 1. Производительность процесса обнаружения и распознавания должна быть **близка** к реальному времени (например, `0.5-1` кадров в секунду на устройстве схожем по производительности с iPhone X). Это будет означать, что весь процесс _обнаружения + распознавания_ должен происходить не более чем за `2` секунды. 2. Должны поддерживаться только ссылки на **английском** языке. 3. Должны поддерживаться только ссылки **черного (темно-серого) цвета на белом (светло-сером) фоне**. 4. Должны поддерживаться только `https://` ссылки (допускается, что `http://`, `ftp://`, `tcp://` и прочие ссылки не будут распознаны). ## 🧩 Находим решение ### Общий подход #### Вариант №1: Модель на стороне сервера **Алгоритм действий:** 1. Получаем видео-поток (кадр за кадром) на стороне клиента. 2. Отправляем каждый кадр на сервер. 3. Осуществляем обнаружение и распознавание ссылок на сервере и отправляем результат клиенту. 4. Отображаем распознанные ссылки ни стороне клиента и делаем их кликабельными. ![Model on the back-end](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/04-frontend-backend.jpg) **Преимущества:** - 💚 Скорость обнаружения и распознавания ссылок не ограничена производительностью клиентского устройства. При желании мы можем ускорить скорость обнаружения ссылок масштабируя наши сервера горизонтально (больше серверов) или вертикально (больше ядер и GPUs). - 💚 Модель может иметь больший размер (и, возможно, большую точность), поскольку отсутствует необходимость ее загрузки на сторону клиента. Загрузить модель размером `~10Mb` на сторону клиента выглядит реалистичным, но все-же загрузить модель размером `~100Mb` может быть довольно проблематичным с точки зрения пользовательского UX (user experience). - 💚 У нас появляется возможность контролировать доступ к модели. Поскольку модель "спрятана" за публичным API, мы можем контролировать каким клиентам она будет доступна. **Недостатки:** - 💔 Сложность системы растет. Вместо использования одного лишь `JavaScript` на стороне клиента нам необходимо будет так же создать, например, `Python` инфраструктуру на стороне сервера. Нам так же будет необходимо позаботиться об автоматическом масштабировании сервиса. - 💔 Работа приложения в режиме оффлайн невозможна поскольку для работы приложения требуется доступ к интернету. - 💔 Множество HTTP запросов к сервису со стороны клиента может стать слабым местом системы с точки зрения производительности. Предположим, мы хотим улучшить производительность обнаружения и распознавания ссылок с `1` до `10+` кадров в секунду. В таком случае каждый клиент будет слать `10+` запросов в секунду на сервер. Для `10` клиентов, работающих одновременно, это уже будет означать `100+` запросов в секунду. На помощь могут прийти двусторонний стриминг `HTTP/2` и `gRPC`, но мы снова возвращаемся к первому пункту, связанному с растущей сложностью системы. - 💔 Стоимость системы растет. В основном это связано с оплатой за аренду серверов. #### Вариант №2: Модель на стороне клиента **Алгоритм действий:** 1. Получаем видео-поток (кадр за кадром) на стороне клиента. 2. Осуществляем обнаружение и распознавание ссылок на стороне клиента (без отправки на сервер). 3. Отображаем распознанные ссылки ни стороне клиента и делаем их кликабельными. ![Model on the front-end](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/05-frontend-only.jpg) **Преимущества:** - 💚 Менее сложная система. Нет необходимости в разработке серверной части приложения и создания API. - 💚 Приложение может работать в режиме оффлайн. Модель загружена на сторону клиента и нет необходимости в доступе к интернету (см. [Progressive Web Application](https://web.dev/progressive-web-apps/)) - 💚 Система "почти" автоматически масштабируема. Каждый новый клиент приложения "приходит" со своим процессором и видеокартой. Это конечно же неполноценное масштабирование (мы затронем причины ниже). - 💚 Система гораздо дешевле. Нам необходимо заплатить только за сервер со статическими данными (`HTML`, `JS`, `CSS`, файлы модели и пр.). В случае с GitHub, такой сервер может быть предоставлен бесплатно. - 💚 Отсутствует (так же как и серверы) проблема большого количества HTTP запросов в секунду к серверам. **Недостатки:** - 💔 Возможно только горизонтальное масштабирование, когда каждый клиент автоматически имеет свои собственные процессоры и графическую карту. Вертикальное масштабирование невозможно поскольку мы не можем повлиять на производительность клиентского устройства. В результате мы не можем гарантировать быстрого обнаружения и распознавания ссылок для медленных устройств. - 💔 Невозможно контролировать использование модели клиентами. Каждый может загрузить к себе модель и использовать ее где и как угодно. - 💔 Скорость расхода батареи клиентского устройства может стать проблемой. Модель при работе потребляет вычислительные ресурсы. Пользователи приложения могут быть недовольны тем, что их iPhone становится все теплее и теплее во время работы. #### Выбираем общий подход Поскольку целю этой статьи и проекта в целом является обучение, а не создание приложения коммерческого уровня _мы можем выбрать второй вариант и хранить модель на стороне клиента_. Это сделает весь проект менее затратным и у нас будет возможность больше сфокусироваться на машинном обучении, а не на создании автоматически масштабируемой серверной инфраструктуры. ### Углубляемся в детали Итак, мы выбрали вариант приложения без серверной части. Предположим теперь, что у нас на входе есть изображение (кадр) из видео-потока камеры, который выглядит так: ![Printed Links Input](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/06-printed-links-clean.jpg) Нам необходимо решить две подзадачи: 1. **Обнаружение** ссылок (найти позицию и габариты ссылок на странице) 2. **Распознавание** ссылок (распознать текст ссылок) #### Вариант №1: Решение на основе библиотеки Tesseract Первым и наиболее очевидным вариантом решением задачи _оптического распознавания символов_ ([OCR](https://en.wikipedia.org/wiki/Optical_character_recognition)) может быть распознавания текста всего изображения с помощью, например, библиотеки [Tesseract.js](https://github.com/naptha/tesseract.js). Она принимает изображение на вход и выдает распознанные параграфы, текстовые строки, блоки текста и слова и вместе с габаритами и координатами. ![Recognized text with bounding boxes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/07-printed-links-boxes.jpg) Далее мы можем попытаться найти ссылки в распознанном тексте с помощью регулярного выражения [похожего на это](https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url) (пример на TypeScript): ```typescript const URL_REG_EXP = /https?:\/\/(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)/gi; const extractLinkFromText = (text: string): string | null => { const urls: string[] | null = text.match(URL_REG_EXP); if (!urls || !urls.length) { return null; } return urls[0]; }; ``` 💚 Похоже, что задача решена довольно прямолинейным и простым способом: - Мы знаем габариты и координаты ссылок. - Мы так же знаем текст ссылок и можем сделать их кликабельными. 💔 Проблема в том, что время _обнаружения + распознавания_ может варьироваться от `2` до `20+` секунд в зависимости от размера изображения, его качества и "похожих на текст" объектов в изображении. В итоге будет очень сложно достичь той _близкой_ к реальному времени производительности в `0.5-1` кадров в секунду. 💔 Также, если подумать, то мы просим библиотеку распознать **весь** текст на картинке, даже если в тексте совсем нет ссылок или если в тексте есть одна-две ссылки, которые составляют, пускай, ~10% от всего объема текста. Это звучит как неэффективная трата вычислительных ресурсов. #### Вариант №2: Решение на основе библиотек Tesseract и TensorFlow (+1 модель) Мы могли бы заставить Tesseract работать быстрее используя еще один _дополнительный "алгоритм-советчик"_ перед тем, как приступить к распознаванию ссылок. Этот "алгоритм-советчик" должен обнаруживать (но не распознавать) _начало ссылок (координаты самой левой границы ссылки)_ для каждой ссылки в изображении. Это позволит нам ускорить задачу распознавания текста ссылок, если мы будем следовать следующим правилам: 1. Если изображение не содержит ни одной ссылки мы должны полностью избежать распознавания текста библиотекой Tesseract. 2. Если изображение содержит ссылки, то мы должны "попросить" Tesseract распознать только те части изображения, которые содержат текст ссылок. Мы хотим тратить время на распознавание "полезного" для нашей задачи текста. Этот "алгоритм-советчик", который будет срабатывать перед вызовом Tesseract должен выполняться каждый раз за одно и то же время, независимо от качества и содержимого изображения. Он также должен быть достаточно быстрым и должен определять наличие и позиции ссылок быстрее чем за `1` секунду (например, на iPhone X). В таком случае мы сможем попытаться заставить наше приложение работать в режиме близком к реальному времени (определения "близости" мы дали выше). > 💡 Итак, что если мы воспользуемся еще одним алгоритмом (еще одной моделью) обнаружения объектов, который поможет нам найти строки `https://` в изображении (каждая защищенная ссылка начинается с `https://`, не так ли?). Тогда, зная расположение и габариты префиксов `https://` в изображении, мы сможем отправить на распознавание текста с помощью библиотеки Tesseract только те части изображения, которые находятся по правую сторону от префиксов `https://` и являются их продолжением. Обратите внимание на изображение ниже: ![Tesseract and TensorFlow based solution](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/08-tesseract-vs-tensorflow.jpg) На этом изображении можно заметить, что Tesseract будет выполнять **гораздо меньше** работы по распознаванию текста, если мы подскажем ему, где в тексте могут находиться ссылки (обратите внимание на количество голубых прямоугольников, чем не доказательство 🤓). Итак, вопрос, на который нам необходимо ответить теперь, какую же модель обнаружения объектов нам выбрать и как "научить" ее находить на изображении префиксы `https://`. > Наконец-то мы подобрались ближе к TensorFlow 😀 ## 🤖 Выбираем подходящую модель обнаружения объектов Тренировка новой модели обнаружения объектов с нуля не является хорошим вариантом в нашем случае по следующим причинам: - 💔 Тренировка может занять дни/недели и стоить много денег (за аренду тех-же серверов с GPU). - 💔 У нас скорее всего не получится собрать набор данных, состоящий из сотен тысяч фотографий книг и журналов со ссылками. Тем-более, что нам нужны не только изображения, но еще и координаты префиксов `https://` для каждого из них. С другой стороны мы можем попытаться сгенерировать такой набор данных, но об этом ниже. Итак, вместо создания новой модели обнаружения объектов, мы будем обучать уже существующую и натренированную модель обнаруживать новый для нее класс объектов (см. [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning)). В нашем случае под "новым классом" объектов мы имеем в виду изображения префикса `https://`. Такой подход имеет следующие преимущества: - 💚 Набор данных может быть гораздо меньшим. Нет необходимости собирать сотни тысяч изображений с локализациями (координатами объектов в изображении). Вместо этого мы можем обойтись сотней изображений и сделать локализацию объектов вручную. Это возможно по той причине, что модель уже натренированна на общем наборе данных типа [COCO](https://cocodataset.org/#home) и уже умеет извлекать основные характеристики изображения (научить "первокурсника" линейной алгебре, _как правило_, легче, чем "первоклассника"). - 💚 Время тренировки так же будет гораздо меньшим (на GPU получим минуты/часы вместо дней/недель). Время сокращается за счет меньшего объема данных (меньших партий данных во время тренировки) и меньшего количества тренируемых параметров модели. Мы можем выбрать существующую модель из ["зоопарка" моделей TensorFlow 2](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md), который представляет собой коллекцию моделей натренированных на наборе данных [COCO 2017](https://cocodataset.org/#home). На данный момент эта коллекция включает в себя `~40` разных вариаций моделей. Для того, чтобы "научить" модель обнаруживать новые, ранее неизвестные ей объекты, мы можем воспользоваться [TensorFlow 2 Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). TensorFlow Object Detection API - это фреймворк на основе [TensorFlow](https://www.tensorflow.org/), который позволяет конструировать и тренировать модели обнаружения объектов. Если вы перейдете по ссылке на ["зоопарк" моделей](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) вы увидите, что для каждой модели там указана _скорость_ и _точность_ обнаружения объектов. ![Model Zoo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/09-model-zoo.jpg) _Изображение взято с репозитория [TensorFlow Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md)_ Конечно же, для того, чтобы выбрать подходящую модель, нам важно найти правильный баланс между **скоростью** и **точностью** обнаружения. Но что еще важнее в нашем случае, это **размер** модели, поскольку мы планируем загружать ее на сторону клиента. Размер архива с моделью может варьироваться от `~20Mb` до `~1Gb`. Вот несколько примеров: - `1386 (Mb)` `centernet_hg104_1024x1024_kpts_coco17_tpu-32` - ` 330 (Mb)` `centernet_resnet101_v1_fpn_512x512_coco17_tpu-8` - ` 195 (Mb)` `centernet_resnet50_v1_fpn_512x512_coco17_tpu-8` - ` 198 (Mb)` `centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8` - ` 227 (Mb)` `centernet_resnet50_v2_512x512_coco17_tpu-8` - ` 230 (Mb)` `centernet_resnet50_v2_512x512_kpts_coco17_tpu-8` - ` 29 (Mb)` `efficientdet_d0_coco17_tpu-32` - ` 49 (Mb)` `efficientdet_d1_coco17_tpu-32` - ` 60 (Mb)` `efficientdet_d2_coco17_tpu-32` - ` 89 (Mb)` `efficientdet_d3_coco17_tpu-32` - ` 151 (Mb)` `efficientdet_d4_coco17_tpu-32` - ` 244 (Mb)` `efficientdet_d5_coco17_tpu-32` - ` 376 (Mb)` `efficientdet_d6_coco17_tpu-32` - ` 376 (Mb)` `efficientdet_d7_coco17_tpu-32` - ` 665 (Mb)` `extremenet` - ` 427 (Mb)` `faster_rcnn_inception_resnet_v2_1024x1024_coco17_tpu-8` - ` 424 (Mb)` `faster_rcnn_inception_resnet_v2_640x640_coco17_tpu-8` - ` 337 (Mb)` `faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8` - ` 337 (Mb)` `faster_rcnn_resnet101_v1_640x640_coco17_tpu-8` - ` 343 (Mb)` `faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8` - ` 449 (Mb)` `faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8` - ` 449 (Mb)` `faster_rcnn_resnet152_v1_640x640_coco17_tpu-8` - ` 454 (Mb)` `faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8` - ` 202 (Mb)` `faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8` - ` 202 (Mb)` `faster_rcnn_resnet50_v1_640x640_coco17_tpu-8` - ` 207 (Mb)` `faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8` - ` 462 (Mb)` `mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8` - ` 86 (Mb)` `ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8` - ` 44 (Mb)` `ssd_mobilenet_v2_320x320_coco17_tpu-8` - ` 20 (Mb)` `ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8` - ` 20 (Mb)` `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` - ` 369 (Mb)` `ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8` - ` 369 (Mb)` `ssd_resnet101_v1_fpn_640x640_coco17_tpu-8` - ` 481 (Mb)` `ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8` - ` 480 (Mb)` `ssd_resnet152_v1_fpn_640x640_coco17_tpu-8` - ` 233 (Mb)` `ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8` - ` 233 (Mb)` `ssd_resnet50_v1_fpn_640x640_coco17_tpu-8` Модель **`ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8`** выглядит наиболее подходящей в нашем случае: - 💚 Она относительно небольшая - `20Mb` в архиве. - 💚 Она достаточно быстрая - `39ms` на одно обнаружение. - 💚 Она использует сеть MobileNet v2 в качестве экстрактора свойств изображения (feature extractor), которая в свою очередь оптимизирована под работу на мобильных устройствах и обеспечивает меньший расход батареи. - 💚 Она производит обнаружение всех известных ей объектов в изображении **за один проход** независимо от содержимого изображения (отсутствует шаг [regions proposal](https://en.wikipedia.org/wiki/Region_Based_Convolutional_Neural_Networks), что делает работу сети быстрее). - 💔 В то же время это не самая точная модель (все является компромиссом ⚖️) Название модели включает в себя ее несколько важных характеристик, с которыми вы при желании можете ознакомиться детальнее: - Ожидаемый размер изображения на входе - `640x640px`. - Модель построена на основе [Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) (SSD) и [Feature Pyramid Network](https://arxiv.org/abs/1612.03144) (FPN). - Сверточная нейронная сеть ([CNN](https://en.wikipedia.org/wiki/Convolutional_neural_network)) [MobileNet v2](https://ai.googleblog.com/2018/04/mobilenetv2-next-generation-of-on.html) используется в качестве экстрактора свойств изображения (feature extractor). - Модель была обучена на наборе данных [COCO](https://cocodataset.org/#home) ## 🛠 Устанавливаем Object Detection API В этой статье мы будем устанавливать Tensorflow 2 Object Detection API _в виде пакета Python_. Это достаточно удобно, в случае если вы экспериментируете в [Google Colab](https://colab.research.google.com/) (предпочтительно) или в [Jupyter](https://jupyter.org/try). В обоих случаях вы можете избежать локальной инсталляции пакетов и проводить эксперименты непосредственно в браузере. Также есть возможность установки Object Detection API используя Docker, о котором вы можете прочитать в [документации](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2.md). > Если у вас возникнут трудности во время установки API или во время создания набора данных (следующие разделы), вы можете обратиться к статье [TensorFlow 2 Object Detection API tutorial](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/index.html), в которой есть много полезных деталей и советов. Для начала давайте клонируем [репозиторий с API](https://github.com/tensorflow/models): ```bash git clone --depth 1 https://github.com/tensorflow/models ``` _output →_ ``` Cloning into 'models'... remote: Enumerating objects: 2301, done. remote: Counting objects: 100% (2301/2301), done. remote: Compressing objects: 100% (2000/2000), done. remote: Total 2301 (delta 561), reused 922 (delta 278), pack-reused 0 Receiving objects: 100% (2301/2301), 30.60 MiB | 13.90 MiB/s, done. Resolving deltas: 100% (561/561), done. ``` Теперь можем скомпилировать [файлы-прототипы API](https://github.com/tensorflow/models/tree/master/research/object_detection/protos) в Python формат, используя [protoc](https://grpc.io/docs/protoc-installation/): ```bash cd ./models/research protoc object_detection/protos/*.proto --python_out=. ``` Следующим шагом будет установка API для версии TensorFlow 2 используя `pip` и файл [setup.py](https://github.com/tensorflow/models/blob/master/research/object_detection/packages/tf2/setup.py)`: ```bash cp ./object_detection/packages/tf2/setup.py . pip install . --quiet ``` > Если на этом шаге вы обнаружите ошибки, связанные установкой зависимых пакетов, попробуйте запустить `pip install . --quiet` во второй раз. Проверить успешность установки вы можете запустив тест: ```bash python object_detection/builders/model_builder_tf2_test.py ``` В итоге вы должны будете увидеть в консоли, что-то вроде этого: ``` [ OK ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor ---------------------------------------------------------------------- Ran 20 tests in 45.072s OK (skipped=1) ``` TensorFlow Object Detection API установлена! Теперь мы можем использовать скрипты, предоставляемы этой API, для [обнаружения объектов в изображениях](https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/inference_tf2_colab.ipynb), [тренировки](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_training_and_evaluation.md) или [доработки](https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb) моделей. ## ⬇️ Загружаем заранее обученную модель Давайте загрузим ранее выбранную нами модель `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` из коллекции моделей TensorFlow и посмотрим, как мы можем использовать ее для обнаружения общих объектов, таких как "кот", "собака", "машина" и пр. (объектов с классами, поддерживаемыми набором данных COCO). Мы воспользуемся утилитой TensorFlow [get_file()](https://www.tensorflow.org/api_docs/python/tf/keras/utils/get_file) для загрузки архивированной модели по URL и для дальнейшей ее распаковки. ```python import tensorflow as tf import pathlib MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8' TF_MODELS_BASE_PATH = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/' CACHE_FOLDER = './cache' def download_tf_model(model_name, cache_folder): model_url = TF_MODELS_BASE_PATH + model_name + '.tar.gz' model_dir = tf.keras.utils.get_file( fname=model_name, origin=model_url, untar=True, cache_dir=pathlib.Path(cache_folder).absolute() ) return model_dir # Start the model download. model_dir = download_tf_model(MODEL_NAME, CACHE_FOLDER) print(model_dir) ``` _output →_ ``` /content/cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` Вот как на данный момент выглядит структура папок: ![Cache Folder](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/10-cache-folder.jpg) Папка `checkpoint` содержит "слепок" параметров обученной модели. Файл `pipeline.config` содержит настройки обнаружения. Мы еще вернемся к этому файлу ниже, когда будем обучать нашу модель. ## 🏄🏻‍️ Обнаружение объектов с помощью загруженной модели На данный момент модель способна обнаруживать объекты классов, поддерживаемых набором данных COCO ([их всего 90](https://cocodataset.org/#explore)), таких, как `car`, `bird`, `hot dog` и пр. Эти классы еще могут называть ярлыками (labels). ![COCO classes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/11-coco-classes.jpg) _Источник изображения: [сайт COCO](https://cocodataset.org/#explore)_ Попробуем, обнаружит ли модель объекты этих классов. ### Загружаем ярлыки COCO Object Detection API уже содержит файл с полным набор классов (ярлыков) COCO для нашего удобства. ```python import os # Import Object Detection API helpers. from object_detection.utils import label_map_util # Loads the COCO labels data (class names and indices relations). def load_coco_labels(): # Object Detection API already has a complete set of COCO classes defined for us. label_map_path = os.path.join( 'models/research/object_detection/data', 'mscoco_complete_label_map.pbtxt' ) label_map = label_map_util.load_labelmap(label_map_path) # Class ID to Class Name mapping. categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=label_map_util.get_max_label_map_index(label_map), use_display_name=True ) category_index = label_map_util.create_category_index(categories) # Class Name to Class ID mapping. label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True) return category_index, label_map_dict # Load COCO labels. coco_category_index, coco_label_map_dict = load_coco_labels() print('coco_category_index:', coco_category_index) print('coco_label_map_dict:', coco_label_map_dict) ``` _output →_ ``` coco_category_index: { 1: {'id': 1, 'name': 'person'}, 2: {'id': 2, 'name': 'bicycle'}, ... 90: {'id': 90, 'name': 'toothbrush'}, } coco_label_map_dict: { 'background': 0, 'person': 1, 'bicycle': 2, 'car': 3, ... 'toothbrush': 90, } ``` ### Создаем функцию обнаружения В этом разделе мы создадим так называемую функцию обнаружения, которая будет использовать загруженную нами ранее модель, собственно, для обнаружения объектов в изображении. ```python import tensorflow as tf # Import Object Detection API helpers. from object_detection.utils import config_util from object_detection.builders import model_builder # Generates the detection function for specific model and specific model's checkpoint def detection_fn_from_checkpoint(config_path, checkpoint_path): # Build the model. pipeline_config = config_util.get_configs_from_pipeline_file(config_path) model_config = pipeline_config['model'] model = model_builder.build( model_config=model_config, is_training=False, ) # Restore checkpoints. ckpt = tf.compat.v2.train.Checkpoint(model=model) ckpt.restore(checkpoint_path).expect_partial() # This is a function that will do the detection. @tf.function def detect_fn(image): image, shapes = model.preprocess(image) prediction_dict = model.predict(image, shapes) detections = model.postprocess(prediction_dict, shapes) return detections, prediction_dict, tf.reshape(shapes, [-1]) return detect_fn inference_detect_fn = detection_fn_from_checkpoint( config_path=os.path.join('cache', 'datasets', MODEL_NAME, 'pipeline.config'), checkpoint_path=os.path.join('cache', 'datasets', MODEL_NAME, 'checkpoint', 'ckpt-0'), ) ``` Функция `inference_detect_fn` принимает на входе изображение и возвращает информацию об обнаруженных в нем объектах. ### Загружаем тестовые изображения Давайте попробуем найти объекты на следующем изображении: ![General Object Inference](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/12-inference-01.jpg) Для этого сохраним это изображение в папку `inference/test/` нашего проекта. Если вы используете Google Colab, вы можете создать эту папку и произвести загрузку файла вручную. Вот как структура папок должна выглядеть на данный момент: ![Folder structure](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/14-inference-folders.jpg) ```python import matplotlib.pyplot as plt %matplotlib inline # Creating a TensorFlow dataset of just one image. inference_ds = tf.keras.preprocessing.image_dataset_from_directory( directory='inference', image_size=(640, 640), batch_size=1, shuffle=False, label_mode=None ) # Numpy version of the dataset. inference_ds_numpy = list(inference_ds.as_numpy_iterator()) # You may preview the images in dataset like this. plt.figure(figsize=(14, 14)) for i, image in enumerate(inference_ds_numpy): plt.subplot(2, 2, i + 1) plt.imshow(image[0].astype("uint8")) plt.axis("off") plt.show() ``` ### Запускаем обнаружение для тестового изображения На данном этапе мы готовы запустить обнаружение. Первый элемент массива `inference_ds_numpy[0]` содержит наше первое тестовое изображение в формате массива `Numpy`. ```python detections, predictions_dict, shapes = inference_detect_fn( inference_ds_numpy[0] ) ``` Проверим размерность массивов, которые нам вернула функция: ```python boxes = detections['detection_boxes'].numpy() scores = detections['detection_scores'].numpy() classes = detections['detection_classes'].numpy() num_detections = detections['num_detections'].numpy()[0] print('boxes.shape: ', boxes.shape) print('scores.shape: ', scores.shape) print('classes.shape: ', classes.shape) print('num_detections:', num_detections) ``` _output →_ ``` boxes.shape: (1, 100, 4) scores.shape: (1, 100) classes.shape: (1, 100) num_detections: 100.0 ``` Модель вернула нам массив со `100` "обнаружениями". Это не означает, что модель нашла `100` объектов в изображении. Это скорее говорит нам, что модель имеет `100` ячеек и поддерживает обнаружение максимум `100` объектов одновременно в одном изображении. Каждое "обнаружение" имеет соответствующий рейтинг (вероятность, score), который говорит об уверенности модели в том, что обнаружен именно этот объект. Габариты каждого найденного объекта хранятся в массиве `boxes`. Рейтинг каждого обнаружения хранится в массиве `scores`. Массив `classes` хранит ярлыки для каждого "обнаружения". Давайте проверим первые 5 таких "обнаружений": ```python print('First 5 boxes:') print(boxes[0,:5]) print('First 5 scores:') print(scores[0,:5]) print('First 5 classes:') print(classes[0,:5]) class_names = [coco_category_index[idx + 1]['name'] for idx in classes[0]] print('First 5 class names:') print(class_names[:5]) ``` _output →_ ``` First 5 boxes: [[0.17576033 0.84654826 0.25642633 0.88327974] [0.5187813 0.12410264 0.6344235 0.34545377] [0.5220358 0.5181462 0.6329132 0.7669856 ] [0.50933677 0.7045719 0.5619138 0.7446198 ] [0.44761637 0.51942706 0.61237675 0.75963426]] First 5 scores: [0.6950246 0.6343004 0.591157 0.5827219 0.5415643] First 5 classes: [9. 8. 8. 0. 8.] First 5 class names: ['traffic light', 'boat', 'boat', 'person', 'boat'] ``` Модель видит светофор (`traffic light`), три лодки (`boats`) и человека (`person`). И мы можем подтвердить, что эти объекты действительно существуют в изображении. В массиве `scores` мы видим, что модель наиболее уверенна (с 70% вероятностью) в найденном объекте класса `traffic light`. Каждый элемент массива `boxes` представляет собой координаты `[y1, x1, y2, x2]`, где `(x1, y1)` и `(x2, y2)` соответственно координаты левого верхнего и правого нижнего углов габаритного прямоугольника. Попробуем визуализировать габаритные прямоугольники: ```python # Importing Object Detection API helpers. from object_detection.utils import visualization_utils # Visualizes the bounding boxes on top of the image. def visualize_detections(image_np, detections, category_index): label_id_offset = 1 image_np_with_detections = image_np.copy() visualization_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'][0].numpy(), (detections['detection_classes'][0].numpy() + label_id_offset).astype(int), detections['detection_scores'][0].numpy(), category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=.4, agnostic_mode=False, ) plt.figure(figsize=(12, 16)) plt.imshow(image_np_with_detections) plt.show() # Visualizing the detections. visualize_detections( image_np=tf.cast(inference_ds_numpy[0][0], dtype=tf.uint32).numpy(), detections=detections, category_index=coco_category_index, ) ``` В итоге мы увидим: ![Inference result](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/14-inference-results-01.jpg) В то же время, если мы попробуем обнаружить объекты на текстовом изображении мы увидим следующее: ![Inference result for text image](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/15-inference-results-02.jpg) Модель не смогла найти ничего в этом изображении. Это как-раз то, что мы собираемся исправить и чему хотим научить нашу модель - видеть приставки `https://` в текстовых изображениях. ## 📝 Подготавливаем набор данных для тренировки Для того, чтобы научить модель `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8` обнаруживать объекты, которые _не были_ описаны в наборе данных COCO нам необходимо подготовить свой набор данных и "доучить" модель на нем. Наборы данных для задачи обнаружения объектов состоят из двух компонентов: 1. Собственно само изображение (например, изображение печатной странички книги или журнала) 2. Габаритные прямоугольники, которые показывают где именно в изображении расположены объекты. ![Bounding Boxes](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/16-detection-boxes.jpg) В примере выше координаты `левого верхнего` и `правого нижнего` углов имеют _абсолютные_ значения (в пикселях). Также существуют альтернативные способы записи параметров таких габаритных прямоугольников. Например, мы можем описать прямоугольник с помощью его `координат центра`, а так же `ширины` и `высоты`. Мы также можем использовать _относительные_ значения координат (процент от ширины или высоты изображения). Но в целом, думаю идея понятна: модель должна знать где именно в изображении находится тот или иной объект. Вопрос в том, где же нам взять такие данные для тренировки. У нас есть три варианта: 1. _Воспользоваться имеющимся_ набором данных. 2. _Сгенерировать новый_ искусственный набор данных. 3. _Создать_ набор данных вручную путем фотографирования или загрузки реальных изображений с текстом и `https://` ссылками и дальнейшей аннотацией (указанием позиций объектов) каждого изображения вручную. ### Вариант №1: Использование существующих наборов данных Есть множество общедоступных наборов данных. Мы можем воспользоваться следующими ресурсами для поиска подходящего набора: - [Google Dataset Search](https://datasetsearch.research.google.com/) - [Kaggle Datasets](https://www.kaggle.com/datasets) - репозиторий [awesome-public-datasets](https://github.com/awesomedata/awesome-public-datasets) - и пр. 💚 Если у вас получится найти подходящий набор данных с лицензией, позволяющей его использовать, то это, пожалуй, наиболее быстрый способ начать тренировку модели. 💔 Но проблема в том, что мне не удалось найти набор данных, содержащий изображения книг со ссылками и их координатами. Этот вариант нам прийдется пропустить. ### Вариант №2: Генерирование искусственного набора данных Существуют библиотеки (например [keras_ocr](https://keras-ocr.readthedocs.io/en/latest/examples/end_to_end_training.html#generating-synthetic-data)), которые могли бы нам помочь сгенерировать случайный текст, поместить в него ссылку и отрисовать текст на различных фонах и с различными искажениями. 💚 Преимущество данного подхода заключается в том, что он дает нам возможность сгенерировать экземпляры данных с разными _шрифтами_, _лигатурами_, _цветами текста_ и _фона_. Это помогло бы нам избежать проблемы [переученности модели](https://en.wikipedia.org/wiki/Overfitting). Модель могла-бы легко обобщать свои "знания" в случае с изображениями, которые она не видела ранее. 💚 Этот подход дает нам возможность сгенерировать разные типы ссылок, таких как: `http://`, `http://`, `ftp://`, `tcp://` и пр. Ведь найти множество реальных изображений с разными типами ссылок могло бы стать проблемой. 💚 Еще одним преимуществом этого подхода является то, что мы можем сгенерировать столько изображений сколько хотим. Мы не ограничены количеством страниц со ссылками в книге, которую нам удалось найти. Увеличение набора данных может в итоге улучшить точность модели. 💔 С другой стороны, существует возможность неправильного использования такого генератора, что в итоге может привести к набору данных, который будет существенно отличаться от реальных изображений. Например, мы можем ошибочно применить неправдоподобные изгибы страниц (волна вместо дуги) или неправдоподобные фоны. Модель в таком может не обобщить свои "знания" на изображения из реального мира. > Этот подход мне кажется очень многообещающим. Он может помочь нам преодолеть множество недостатков модели (о них мы упомянем ниже в статье). Я пока еще не пробовал применить этот подход, но, возможно, это будет предметом отдельной статьи. ### Вариант №3: Создание набора данных вручную Наиболее прямолинейный способ - это взять книгу (или книги), сфотографировать странички, содержащие ссылки и обозначить локации префиксов `https://` для каждой странички вручную. Хорошая новость в том, что набор данных, который нам нужен, может быть достаточно небольшим (сотни изображений будет достаточно). Это обусловлено тем, что мы не собираемся тренировать модель _с нуля_. Вместо этого мы будем "доучивать" уже обученную модель (см. [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning) и [few-shot learning](https://paperswithcode.com/task/few-shot-learning)). 💚 В данном случае набор данных будет максимально приближен к реальному миру. Мы в буквальном смысле возьмем книгу, сфотографируем странички с реальными шрифтами, изгибами, тенями и цветами. 💔 С другой стороны, даже с учетом того, что нам нужны всего сотни страничек, работа по сбору таких страничек и их дальнейшей аннотации может занять достаточно много времени. 💔 Тяжело найти разные книги и журналы с разными шрифтами, типами ссылок, с разными фонами и лигатурами. В итоге набора данных будет достаточно узконаправленным (у пользователей должны будут быть книги со шрифтами и фонами похожими на ваши). Поскольку целью этой статьи, как было упомянуто выше, не является создание модели, которая должна выиграть соревнование по обнаружению объектов, мы можем пойти по пути создания модели вручную. ### Обрабатываем фото для набора данных Я сфотографировал `125` страничек одной книги, в которых нашел `https://` ссылки. ![Raw Dataset](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/17-dataset-raw.jpg) Все изображения были помещены в папку `dataset/printed_links/raw`. Следующим шаг - обработка изображений. Давайте применим следующие преобразования: - **Изменим размер** каждого изображения так, чтобы их ширина составила `1024px` (изначально изображения были чересчур большими с шириной в `3024px`) - **Обрежем** каждое изображение так, чтобы оно стало квадратным (это делать не обязательно, можно просто сжать изображение до квадратных пропорций, не обрезая его, но я хотел сохранить естественные пропорции префиксов `https:` перед обучением). - **Развернем** каждое изображения до правильной ориентации, применив метаданные из тега [exif](https://en.wikipedia.org/wiki/Exif). - **Сделаем каждое изображение черно-белым**, поскольку мы не хотим, чтобы модель брала во внимание цвет. - **Увеличим яркость** - **Увеличим контраст** - **Увеличим резкость** Стоить отметить, что в будущем, мы должны будем применять эти же манипуляции над изображениями перед тем, как отправлять их на вход нашей модели (если тренировочные изображения были черно-белыми и квадратными, то и реальные изображения, которые мы будем отправлять в нашу модель должны быть такими же квадратными и черно-белыми). Мы можем применить все вышеописанные трансформации используя Python: ```python import os import math import shutil from pathlib import Path from PIL import Image, ImageOps, ImageEnhance # Resize an image. def preprocess_resize(target_width): def preprocess(image: Image.Image, log) -> Image.Image: (width, height) = image.size ratio = width / height if width > target_width: target_height = math.floor(target_width / ratio) log(f'Resizing: To size {target_width}x{target_height}') image = image.resize((target_width, target_height)) else: log('Resizing: Image already resized, skipping...') return image return preprocess # Crop an image. def preprocess_crop_square(): def preprocess(image: Image.Image, log) -> Image.Image: (width, height) = image.size left = 0 top = 0 right = width bottom = height crop_size = min(width, height) if width >= height: # Horizontal image. log(f'Squre cropping: Horizontal {crop_size}x{crop_size}') left = width // 2 - crop_size // 2 right = left + crop_size else: # Vetyical image. log(f'Squre cropping: Vertical {crop_size}x{crop_size}') top = height // 2 - crop_size // 2 bottom = top + crop_size image = image.crop((left, top, right, bottom)) return image return preprocess # Apply exif transpose to an image. def preprocess_exif_transpose(): # @see: https://pillow.readthedocs.io/en/stable/reference/ImageOps.html def preprocess(image: Image.Image, log) -> Image.Image: log('EXif transpose') image = ImageOps.exif_transpose(image) return image return preprocess # Apply color transformations to the image. def preprocess_color(brightness, contrast, color, sharpness): # @see: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html def preprocess(image: Image.Image, log) -> Image.Image: log('Coloring') enhancer = ImageEnhance.Color(image) image = enhancer.enhance(color) enhancer = ImageEnhance.Brightness(image) image = enhancer.enhance(brightness) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(contrast) enhancer = ImageEnhance.Sharpness(image) image = enhancer.enhance(sharpness) return image return preprocess # Image pre-processing pipeline. def preprocess_pipeline(src_dir, dest_dir, preprocessors=[], files_num_limit=0, override=False): # Create destination folder if not exists. Path(dest_dir).mkdir(parents=False, exist_ok=True) # Get the list of files to be copied. src_file_names = os.listdir(src_dir) files_total = files_num_limit if files_num_limit > 0 else len(src_file_names) files_processed = 0 # Logger function. def preprocessor_log(message): print(' ' + message) # Iterate through files. for src_file_index, src_file_name in enumerate(src_file_names): if files_num_limit > 0 and src_file_index >= files_num_limit: break # Copy file. src_file_path = os.path.join(src_dir, src_file_name) dest_file_path = os.path.join(dest_dir, src_file_name) progress = math.floor(100 * (src_file_index + 1) / files_total) print(f'Image {src_file_index + 1}/{files_total} | {progress}% | {src_file_path}') if not os.path.isfile(src_file_path): preprocessor_log('Source is not a file, skipping...\n') continue if not override and os.path.exists(dest_file_path): preprocessor_log('File already exists, skipping...\n') continue shutil.copy(src_file_path, dest_file_path) files_processed += 1 # Preprocess file. image = Image.open(dest_file_path) for preprocessor in preprocessors: image = preprocessor(image, preprocessor_log) image.save(dest_file_path, quality=95) print('') print(f'{files_processed} out of {files_total} files have been processed') # Launching the image preprocessing pipeline. preprocess_pipeline( src_dir='dataset/printed_links/raw', dest_dir='dataset/printed_links/processed', override=True, # files_num_limit=1, preprocessors=[ preprocess_exif_transpose(), preprocess_resize(target_width=1024), preprocess_crop_square(), preprocess_color(brightness=2, contrast=1.3, color=0, sharpness=1), ] ) ``` В результате все обработанные изображения будут сохранены в папке `dataset/printed_links/processed`. ![Dataset Processed](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/18-dataset-processed.jpg) Мы можем просмотреть полученные изображения следующим образом: ```python import matplotlib.pyplot as plt import numpy as np def preview_images(images_dir, images_num=1, figsize=(15, 15)): image_names = os.listdir(images_dir) image_names = image_names[:images_num] num_cells = math.ceil(math.sqrt(images_num)) figure = plt.figure(figsize=figsize) for image_index, image_name in enumerate(image_names): image_path = os.path.join(images_dir, image_name) image = Image.open(image_path) figure.add_subplot(num_cells, num_cells, image_index + 1) plt.imshow(np.asarray(image)) plt.show() preview_images('dataset/printed_links/processed', images_num=4, figsize=(16, 16)) ``` ### Указываем позиции и габариты объектов для нашего набора данных Для того, чтобы указать позиции и габариты объектов (префиксов `https://`) в нашем наборе данных мы можем воспользоваться программой аннотации изображений [LabelImg](https://github.com/tzutalin/labelImg). > Вам понадобится установить LabelImg локально на ваш компьютер. Детальную инструкцию по установке вы сможете найти в [документации LabelImg](https://github.com/tzutalin/labelImg) После установки LabelImg, вы можете запустить программу из консоли, указав папку с изображениями (в нашем случае `dataset/printed_links/processed`), которую вы хотите аннотировать: ```bash labelImg dataset/printed_links/processed ``` В открывшемся окне вам необходимо аннотировать все изображения из папки `dataset/printed_links/processed` и сохранить все изображения в формате XML в папку `dataset/printed_links/labels/xml/`. ![Labeling](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/18-labeling.jpg) ![Labeling Process](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/19-labeling-process.gif) После завершения процесса аннотирования для каждого изображения мы должны получить XML файл с позицией и габаритами каждого объекта: ![Labels folder structure](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/20-labels-folder.jpg) ### Разбиваем общий набор данных на тренировочный и тестовый наборы Для того, чтобы идентифицировать проблему [переучивания или недоучивания](https://en.wikipedia.org/wiki/Overfitting) модели, нам необходимо разбить наш общий набор данных на тренировочный и тестовый наборы. Мы можем использовать `80%` всех изображений для тренировки и `20%` изображений для тестирования модели. Задача тестового набора - понять насколько наша модель может обобщить свои "знания" на данных, которые она не "видела" раньше. > В этой статье мы будем разбивать файлы путем их перемешивания и копирования в разные папки (в папки `test` и `train`). Стоит отметить, что такой подход, возможно, не является оптимальным. Вместо физического размещения файлов в разных папках мы так же можем разбивать набор данных на подгруппы на лету с помощью [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset). ```python import re import random def partition_dataset( images_dir, xml_labels_dir, train_dir, test_dir, val_dir, train_ratio, test_ratio, val_ratio, copy_xml ): if not os.path.exists(train_dir): os.makedirs(train_dir) if not os.path.exists(test_dir): os.makedirs(test_dir) if not os.path.exists(val_dir): os.makedirs(val_dir) images = [f for f in os.listdir(images_dir) if re.search(r'([a-zA-Z0-9\s_\\.\-\(\):])+(.jpg|.jpeg|.png)$', f, re.IGNORECASE)] num_images = len(images) num_train_images = math.ceil(train_ratio * num_images) num_test_images = math.ceil(test_ratio * num_images) num_val_images = math.ceil(val_ratio * num_images) print('Intended split') print(f' train: {num_train_images}/{num_images} images') print(f' test: {num_test_images}/{num_images} images') print(f' val: {num_val_images}/{num_images} images') actual_num_train_images = 0 actual_num_test_images = 0 actual_num_val_images = 0 def copy_random_images(num_images, dest_dir): copied_num = 0 if not num_images: return copied_num for i in range(num_images): if not len(images): break idx = random.randint(0, len(images)-1) filename = images[idx] shutil.copyfile(os.path.join(images_dir, filename), os.path.join(dest_dir, filename)) if copy_xml: xml_filename = os.path.splitext(filename)[0]+'.xml' shutil.copyfile(os.path.join(xml_labels_dir, xml_filename), os.path.join(dest_dir, xml_filename)) images.remove(images[idx]) copied_num += 1 return copied_num actual_num_train_images = copy_random_images(num_train_images, train_dir) actual_num_test_images = copy_random_images(num_test_images, test_dir) actual_num_val_images = copy_random_images(num_val_images, val_dir) print('\n', 'Actual split') print(f' train: {actual_num_train_images}/{num_images} images') print(f' test: {actual_num_test_images}/{num_images} images') print(f' val: {actual_num_val_images}/{num_images} images') partition_dataset( images_dir='dataset/printed_links/processed', train_dir='dataset/printed_links/partitioned/train', test_dir='dataset/printed_links/partitioned/test', val_dir='dataset/printed_links/partitioned/val', xml_labels_dir='dataset/printed_links/labels/xml', train_ratio=0.8, test_ratio=0.2, val_ratio=0, copy_xml=True ) ``` После разбития нашего набора данных структура папок должна выглядеть так: ``` dataset/ └── printed_links ├── labels │ └── xml ├── partitioned │ ├── test │ └── train │ ├── IMG_9140.JPG │ ├── IMG_9140.xml │ ├── IMG_9141.JPG │ ├── IMG_9141.xml │ ... ├── processed └── raw ``` ### Экспортируем набор данных Последней манипуляцией над данными, которую нам необходимо произвести, будет конвертация данных в формат [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord). Формат `TFRecord` используется TensorFlow для хранения последовательности записей (в нашем случае для хранения последовательности изображений). Сначала создадим две папки: одну для хранения аннотаций в формате `CSV`, другую для хранения нашей финальной версии набора данных в формате `TFRecord`. ```bash mkdir -p dataset/printed_links/labels/csv mkdir -p dataset/printed_links/tfrecords ``` Теперь нам необходимо создать файл-прототип `dataset/printed_links/labels/label_map.pbtxt` с классами объектов, которые наша модель должна научиться распознавать. В нашем случае у нас будет всего _один класс_, который мы назовем `http`. Содержимое файла должно быть следующим: ``` item { id: 1 name: 'http' } ``` Теперь мы готовы конвертировать набор данных в формат TFRecord из набора `jpg` изображений и аннотаций в `xml` формате: ```python import os import io import math import glob import tensorflow as tf import pandas as pd import xml.etree.ElementTree as ET from PIL import Image from collections import namedtuple from object_detection.utils import dataset_util, label_map_util tf1 = tf.compat.v1 # Convers labels from XML format to CSV. def xml_to_csv(path): xml_list = [] for xml_file in glob.glob(path + '/*.xml'): tree = ET.parse(xml_file) root = tree.getroot() for member in root.findall('object'): value = (root.find('filename').text, int(root.find('size')[0].text), int(root.find('size')[1].text), member[0].text, int(member[4][0].text), int(member[4][1].text), int(member[4][2].text), int(member[4][3].text) ) xml_list.append(value) column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'] xml_df = pd.DataFrame(xml_list, columns=column_name) return xml_df def class_text_to_int(row_label, label_map_dict): return label_map_dict[row_label] def split(df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] # Creates a TFRecord. def create_tf_example(group, path, label_map_dict): with tf1.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(class_text_to_int(row['class'], label_map_dict)) tf_example = tf1.train.Example(features=tf1.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example def dataset_to_tfrecord( images_dir, xmls_dir, label_map_path, output_path, csv_path=None ): label_map = label_map_util.load_labelmap(label_map_path) label_map_dict = label_map_util.get_label_map_dict(label_map) tfrecord_writer = tf1.python_io.TFRecordWriter(output_path) images_path = os.path.join(images_dir) csv_examples = xml_to_csv(xmls_dir) grouped_examples = split(csv_examples, 'filename') for group in grouped_examples: tf_example = create_tf_example(group, images_path, label_map_dict) tfrecord_writer.write(tf_example.SerializeToString()) tfrecord_writer.close() print('Successfully created the TFRecord file: {}'.format(output_path)) if csv_path is not None: csv_examples.to_csv(csv_path, index=None) print('Successfully created the CSV file: {}'.format(csv_path)) # Generate a TFRecord for train dataset. dataset_to_tfrecord( images_dir='dataset/printed_links/partitioned/train', xmls_dir='dataset/printed_links/partitioned/train', label_map_path='dataset/printed_links/labels/label_map.pbtxt', output_path='dataset/printed_links/tfrecords/train.record', csv_path='dataset/printed_links/labels/csv/train.csv' ) # Generate a TFRecord for test dataset. dataset_to_tfrecord( images_dir='dataset/printed_links/partitioned/test', xmls_dir='dataset/printed_links/partitioned/test', label_map_path='dataset/printed_links/labels/label_map.pbtxt', output_path='dataset/printed_links/tfrecords/test.record', csv_path='dataset/printed_links/labels/csv/test.csv' ) ``` В результате мы должны получить файлы `test.record` и `train.record` в папке `dataset/printed_links/tfrecords/`: ``` dataset/ └── printed_links ├── labels │ ├── csv │ ├── label_map.pbtxt │ └── xml ├── partitioned │ ├── test │ ├── train │ └── val ├── processed ├── raw └── tfrecords ├── test.record └── train.record ``` Эти два файла `test.record` и `train.record` являются конечной версией нашего набора данных, который мы будем использовать для обучения модели `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8`. ## 📖 Работаем с набором данных в формате TFRecord В этом разделе мы посмотрим, какие инструменты для исследования наборов данных в формате `TFRecord` имеются в TensorFlow 2 Object Detection API. **Проверяем количество экземпляров в наборе данных** Посчитать количество экземпляров мы можем следующим образом: ```python import tensorflow as tf # Count the number of examples in the dataset. def count_tfrecords(tfrecords_filename): raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) # Keep in mind that the list() operation might be # a performance bottleneck for large datasets. return len(list(raw_dataset)) TRAIN_RECORDS_NUM = count_tfrecords('dataset/printed_links/tfrecords/train.record') TEST_RECORDS_NUM = count_tfrecords('dataset/printed_links/tfrecords/test.record') print('TRAIN_RECORDS_NUM: ', TRAIN_RECORDS_NUM) print('TEST_RECORDS_NUM: ', TEST_RECORDS_NUM) ``` _output →_ ``` TRAIN_RECORDS_NUM: 100 TEST_RECORDS_NUM: 25 ``` Итак, мы будем тренировать нашу модель на `100` экземплярах и проверять ее способность к обобщению на `25` изображениях. **Отображаем габариты и локализацию объектов в изображениях** Отобразить габариты и позицию объектов в изображении мы можем следующим образом: ```python import tensorflow as tf import numpy as np from google.protobuf import text_format import matplotlib.pyplot as plt # Import Object Detection API. from object_detection.utils import visualization_utils from object_detection.protos import string_int_label_map_pb2 from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder %matplotlib inline # Visualize the TFRecord dataset. def visualize_tfrecords(tfrecords_filename, label_map=None, print_num=1): decoder = TfExampleDecoder( label_map_proto_file=label_map, use_display_name=False ) if label_map is not None: label_map_proto = string_int_label_map_pb2.StringIntLabelMap() with tf.io.gfile.GFile(label_map,'r') as f: text_format.Merge(f.read(), label_map_proto) class_dict = {} for entry in label_map_proto.item: class_dict[entry.id] = {'name': entry.name} raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) for raw_record in raw_dataset.take(print_num): example = decoder.decode(raw_record) image = example['image'].numpy() boxes = example['groundtruth_boxes'].numpy() confidences = example['groundtruth_image_confidences'] filename = example['filename'] area = example['groundtruth_area'] classes = example['groundtruth_classes'].numpy() image_classes = example['groundtruth_image_classes'] weights = example['groundtruth_weights'] scores = np.ones(boxes.shape[0]) visualization_utils.visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, class_dict, max_boxes_to_draw=None, use_normalized_coordinates=True ) plt.figure(figsize=(8, 8)) plt.imshow(image) plt.show() # Visualizing the training TFRecord dataset. visualize_tfrecords( tfrecords_filename='dataset/printed_links/tfrecords/train.record', label_map='dataset/printed_links/labels/label_map.pbtxt', print_num=3 ) ``` В результате мы должны увидеть несколько изображений с прямоугольными габаритами для каждого из объектов, ![TFRecord Preview](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/23-tfrecords-preview.jpg) ## 📈 Устанавливаем TensorBoard Перед тем, как начать тренировку мы можем запустить [TensorBoard](https://www.tensorflow.org/tensorboard). TensorBoard поможет нам в мониторинге тренировочного процесса. Он поможет нам увидеть, действительно ли модель обучается или же нам лучше остановить тренировку и подправить параметры тренировки. TensorBoard также поможет нам какие объекты и где именно на изображении наша модель обнаруживает. ![TensorBoard](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/24-tensorboard.gif) _Источник изображения: [домашняя страница TensorBoard](https://www.tensorflow.org/tensorboard)_ Отличной особенностью TensorBoard является то, что мы можем запустить его прямо в Google Colab. Если же вы экспериментируете с моделью локально в Jupyter ноутбуке, то вы можете [установить TensorBoard как Python пакет](https://github.com/tensorflow/tensorboard/blob/master/README.md) и запустить его локально из консоли. Для начала создадим папку `./logs`, в которой во время тренировки будут храниться параметры модели. ```bash mkdir -p logs ``` Далее, мы загружаем расширение TensorBoard в Google Colab: ``` %load_ext tensorboard ``` И теперь мы можем запустить TensorBoard и указать папку `./logs` в качестве папки с логами тренировки, ``` %tensorboard --logdir ./logs ``` В результате вы должны увидеть пустую панель TensorBoard: ![Empty TensorBoard Panel](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/25-tensorboard-launch.jpg) После того, как мы начнем тренировку, мы сможем вернуться к этой панели и проверить насколько хорошо она обучается. ## 🏋🏻‍️ Тренировка модели ### Настраиваем параметры тренировки Теперь мы можем вернуться к ранее упомянутому файлу `cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config`. В этом файле собраны параметры для тренировки модели `ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8`. Нам необходимо скопировать файл `pipeline.config` в корень нашего проекта и изменить следующие параметры: 1. Необходимо **количество классов** с `90` (количество классов набора данных COCO) на `1` (наш единственный класс `http`) 2. Необходимо уменьшить **размер тренировочного пакета** (batch size) до `8` изображений на один пакет, чтобы избежать проблем с недостатком памяти. 3. Необходимо указать нашей модели, где хранятся сохраненные **слепки** ранее натренированных параметров модели, поскольку мы не хотим тренировать ее с нуля. 4. Необходимо установить параметр `fine_tune_checkpoint_type` в `detection`. 5. Необходимо указать модели, где находится **карта новых классов** объектов. 6. Необходимо указать модели, где находятся **тренировочный и тестовый наборы данных**. Все эти изменения можно сделать вручную в файле `pipeline.config`, но это так же можно сделать программно: ```python import tensorflow as tf from shutil import copyfile from google.protobuf import text_format from object_detection.protos import pipeline_pb2 # Adjust pipeline config modification here if needed. def modify_config(pipeline): # Model config. pipeline.model.ssd.num_classes = 1 # Train config. pipeline.train_config.batch_size = 8 pipeline.train_config.fine_tune_checkpoint = 'cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0' pipeline.train_config.fine_tune_checkpoint_type = 'detection' # Train input reader config. pipeline.train_input_reader.label_map_path = 'dataset/printed_links/labels/label_map.pbtxt' pipeline.train_input_reader.tf_record_input_reader.input_path[0] = 'dataset/printed_links/tfrecords/train.record' # Eval input reader config. pipeline.eval_input_reader[0].label_map_path = 'dataset/printed_links/labels/label_map.pbtxt' pipeline.eval_input_reader[0].tf_record_input_reader.input_path[0] = 'dataset/printed_links/tfrecords/test.record' return pipeline def clone_pipeline_config(): copyfile( 'cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config', 'pipeline.config' ) def setup_pipeline(pipeline_config_path): clone_pipeline_config() pipeline = read_pipeline_config(pipeline_config_path) pipeline = modify_config(pipeline) write_pipeline_config(pipeline_config_path, pipeline) return pipeline def read_pipeline_config(pipeline_config_path): pipeline = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(pipeline_config_path, "r") as f: proto_str = f.read() text_format.Merge(proto_str, pipeline) return pipeline def write_pipeline_config(pipeline_config_path, pipeline): config_text = text_format.MessageToString(pipeline) with tf.io.gfile.GFile(pipeline_config_path, "wb") as f: f.write(config_text) # Adjusting the pipeline configuration. pipeline = setup_pipeline('pipeline.config') print(pipeline) ``` Вот окончательная версия файла `pipeline.config` после редактирования: ``` model { ssd { num_classes: 1 image_resizer { fixed_shape_resizer { height: 640 width: 640 } } feature_extractor { type: "ssd_mobilenet_v2_fpn_keras" depth_multiplier: 1.0 min_depth: 16 conv_hyperparams { regularizer { l2_regularizer { weight: 3.9999998989515007e-05 } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.009999999776482582 } } activation: RELU_6 batch_norm { decay: 0.996999979019165 scale: true epsilon: 0.0010000000474974513 } } use_depthwise: true override_base_feature_extractor_hyperparams: true fpn { min_level: 3 max_level: 7 additional_layer_depth: 128 } } box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true use_matmul_gather: true } } similarity_calculator { iou_similarity { } } box_predictor { weight_shared_convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { weight: 3.9999998989515007e-05 } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.009999999776482582 } } activation: RELU_6 batch_norm { decay: 0.996999979019165 scale: true epsilon: 0.0010000000474974513 } } depth: 128 num_layers_before_predictor: 4 kernel_size: 3 class_prediction_bias_init: -4.599999904632568 share_prediction_tower: true use_depthwise: true } } anchor_generator { multiscale_anchor_generator { min_level: 3 max_level: 7 anchor_scale: 4.0 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 scales_per_octave: 2 } } post_processing { batch_non_max_suppression { score_threshold: 9.99999993922529e-09 iou_threshold: 0.6000000238418579 max_detections_per_class: 100 max_total_detections: 100 use_static_shapes: false } score_converter: SIGMOID } normalize_loss_by_num_matches: true loss { localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_sigmoid_focal { gamma: 2.0 alpha: 0.25 } } classification_weight: 1.0 localization_weight: 1.0 } encode_background_as_zeros: true normalize_loc_loss_by_codesize: true inplace_batchnorm_update: true freeze_batchnorm: false } } train_config { batch_size: 8 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { random_crop_image { min_object_covered: 0.0 min_aspect_ratio: 0.75 max_aspect_ratio: 3.0 min_area: 0.75 max_area: 1.0 overlap_thresh: 0.0 } } sync_replicas: true optimizer { momentum_optimizer { learning_rate { cosine_decay_learning_rate { learning_rate_base: 0.07999999821186066 total_steps: 50000 warmup_learning_rate: 0.026666000485420227 warmup_steps: 1000 } } momentum_optimizer_value: 0.8999999761581421 } use_moving_average: false } fine_tune_checkpoint: "cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0" num_steps: 50000 startup_delay_steps: 0.0 replicas_to_aggregate: 8 max_number_of_boxes: 100 unpad_groundtruth_tensors: false fine_tune_checkpoint_type: "detection" fine_tune_checkpoint_version: V2 } train_input_reader { label_map_path: "dataset/printed_links/labels/label_map.pbtxt" tf_record_input_reader { input_path: "dataset/printed_links/tfrecords/train.record" } } eval_config { metrics_set: "coco_detection_metrics" use_moving_averages: false } eval_input_reader { label_map_path: "dataset/printed_links/labels/label_map.pbtxt" shuffle: false num_epochs: 1 tf_record_input_reader { input_path: "dataset/printed_links/tfrecords/test.record" } } ``` ### Запускаем процесс тренировки Мы готовы запустить процесс тренировки модели используя TensorFlow 2 Object Detection API. API содержит файл [model_main_tf2.py](https://github.com/tensorflow/models/blob/master/research/object_detection/model_main_tf2.py), который содержит всю логику тренировки. Вы можете детальнее ознакомиться с исходным Python кодом файла, в котором описаны входные параметры скрипта (например, `num_train_steps`, `model_dir` и пр.). Мы будем тренировать модель в течение `1000` итераций (эпох). ```bash %%bash NUM_TRAIN_STEPS=1000 CHECKPOINT_EVERY_N=1000 PIPELINE_CONFIG_PATH=pipeline.config MODEL_DIR=./logs SAMPLE_1_OF_N_EVAL_EXAMPLES=1 python ./models/research/object_detection/model_main_tf2.py \ --model_dir=$MODEL_DIR \ --num_train_steps=$NUM_TRAIN_STEPS \ --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ --pipeline_config_path=$PIPELINE_CONFIG_PATH \ --checkpoint_every_n=$CHECKPOINT_EVERY_N \ --alsologtostderr ``` Во время тренировки модели (это может занять `~10` минут для `1000` итераций с использованием [GPU runtime](https://colab.research.google.com/notebooks/gpu.ipynb) в GoogleColab) вы можете увидеть как процесс тренировки в TensorBoard. Ошибки `localization` и `classification` должны уменьшаться, что означает, что модель все лучше и лучше локализует объекты и определяет их класс. ![Training Process](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/26-tensorboard-training.jpg) Также по мере обучения модели в папке `logs` будут создаваться новые чекпоинты (слепки) параметров модели. Папка `logs` может выглядеть следующим образом: ``` logs ├── checkpoint ├── ckpt-1.data-00000-of-00001 ├── ckpt-1.index └── train └── events.out.tfevents.1606560330.b314c371fa10.1747.1628.v2 ``` ### Оцениваем модель (опционально) Чтобы оценить точность работы модели мы пробуем обнаружить объекты на изображения из тестового набора данных. Результат такой оценки обобщается в виде [метрик](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/evaluation_protocols.md), изменение которых мы можем наблюдать с течением времени. Вы можете более детально ознакомиться с тем, какие именно метрики используются [здесь](https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/training.html#evaluating-the-model-optional). В этой статье мы пропустим этот шаг с метриками, но мы все-же можем воспользоваться панелью TensorBoard, чтобы увидеть, какие объекты модель обнаруживает на тестовом наборе данных: ```bash %%bash PIPELINE_CONFIG_PATH=pipeline.config MODEL_DIR=logs python ./models/research/object_detection/model_main_tf2.py \ --model_dir=$MODEL_DIR \ --pipeline_config_path=$PIPELINE_CONFIG_PATH \ --checkpoint_dir=$MODEL_DIR \ ``` После запуска скрипта вы сможете увидеть несколько изображений с обнаруженными в них предметами: ![Model Evaluation](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/27-tensorboard-evaluation.jpg) ## 🗜 Экспортируем модель После окончания тренировки необходимо сохранить модель для дальнейшего использования. Для экспортирования модели мы воспользуемся скриптом [exporter_main_v2.py](https://github.com/tensorflow/models/blob/master/research/object_detection/exporter_main_v2.py) из Object Detection API. Этот скрипт подготавливает TensorFlow граф на основании чекпоинтов модели и ее тренировочной конфигурации. После выполнения скрипта мы получим папку с чекпоинтами, моделью в формате SavedModel и копией конфигурационного файла модели. ```bash %%bash python ./models/research/object_detection/exporter_main_v2.py \ --input_type=image_tensor \ --pipeline_config_path=pipeline.config \ --trained_checkpoint_dir=logs \ --output_directory=exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` Вот так выглядит содержимое папки `exported`: ``` exported └── ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ├── checkpoint │ ├── checkpoint │ ├── ckpt-0.data-00000-of-00001 │ └── ckpt-0.index ├── pipeline.config └── saved_model ├── assets ├── saved_model.pb └── variables ├── variables.data-00000-of-00001 └── variables.index ``` На этом этапе у нас есть модель в папке `saved_model`, которую мы уже можем использовать для обнаружения объектов. ## 🚀 Использование экспортированной модели Давайте посмотрим, как мы можем использовать модель, экспортированную на предыдущем этапе. В начале нам необходимо создать функцию-обнаружитель, которая будет использовать сохраненную модель. Эта функция будет принимать изображение на вход и выдавать информацию об обнаруженных объектах: ```python import time import math PATH_TO_SAVED_MODEL = 'exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model' def detection_function_from_saved_model(saved_model_path): print('Loading saved model...', end='') start_time = time.time() # Load saved model and build the detection function detect_fn = tf.saved_model.load(saved_model_path) end_time = time.time() elapsed_time = end_time - start_time print('Done! Took {} seconds'.format(math.ceil(elapsed_time))) return detect_fn exported_detect_fn = detection_function_from_saved_model( PATH_TO_SAVED_MODEL ) ``` _output →_ ``` Loading saved model...Done! Took 9 seconds ``` Для сопоставления идентификаторов обнаруженных классов с именами классов нам также необходимо загрузить карту классов: ```python from object_detection.utils import label_map_util category_index = label_map_util.create_category_index_from_labelmap( 'dataset/printed_links/labels/label_map.pbtxt', use_display_name=True ) print(category_index) ``` _output →_ ``` {1: {'id': 1, 'name': 'http'}} ``` Тестируем нашу модель на тестовом наборе данных. ```python import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from object_detection.utils import visualization_utils from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder %matplotlib inline def tensors_from_tfrecord( tfrecords_filename, tfrecords_num, dtype=tf.float32 ): decoder = TfExampleDecoder() raw_dataset = tf.data.TFRecordDataset(tfrecords_filename) images = [] for raw_record in raw_dataset.take(tfrecords_num): example = decoder.decode(raw_record) image = example['image'] image = tf.cast(image, dtype=dtype) images.append(image) return images def test_detection(tfrecords_filename, tfrecords_num, detect_fn): image_tensors = tensors_from_tfrecord( tfrecords_filename, tfrecords_num, dtype=tf.uint8 ) for image_tensor in image_tensors: image_np = image_tensor.numpy() # The model expects a batch of images, so add an axis with `tf.newaxis`. input_tensor = tf.expand_dims(image_tensor, 0) detections = detect_fn(input_tensor) # All outputs are batches tensors. # Convert to numpy arrays, and take index [0] to remove the batch dimension. # We're only interested in the first num_detections. num_detections = int(detections.pop('num_detections')) detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()} detections['num_detections'] = num_detections # detection_classes should be ints. detections['detection_classes'] = detections['detection_classes'].astype(np.int64) image_np_with_detections = image_np.astype(int).copy() visualization_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'], detections['detection_classes'], detections['detection_scores'], category_index, use_normalized_coordinates=True, max_boxes_to_draw=100, min_score_thresh=.3, agnostic_mode=False ) plt.figure(figsize=(8, 8)) plt.imshow(image_np_with_detections) plt.show() test_detection( tfrecords_filename='dataset/printed_links/tfrecords/test.record', tfrecords_num=10, detect_fn=exported_detect_fn ) ``` В результате вы должны увидеть `10` изображений из тестового набора данных с обнаруженными и подсвеченными `https:` префиксами: ![Testing the model on a test dataset](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/28-testing-the-model.jpg) Тот факт, что модель смогла обнаружить объекты (в нашем случае префиксы `https://`) в изображениях, которые она раньше не "видела" является хорошим знаком и, собственно, тем, что мы хотели достигнуть этой тренировкой. ## 🗜 Конвертируем модель в веб-совместимый формат Как вы помните из начала данной статьи нашей целью была тренировка модели обнаружения объектов, которую мы могли бы использовать в браузере. К счастью, существует JavaScript версия TensorFlow - [TensorFlow.js](https://www.tensorflow.org/js). В JavaScript мы не можем работать с сохраненной ранее моделью напрямую. Нам нужна еще одна последняя конвертация модели в формат [tfjs_graph_model](https://www.tensorflow.org/js/tutorials/conversion/import_saved_model). Для того, чтобы осуществить эту конвертацию, нам понадобится Python пакет tensorflowjs: ```bash pip install tensorflowjs --quiet ``` Теперь мы можем конвертировать модель в нужный нам формат: ```bash %%bash tensorflowjs_converter \ --input_format=tf_saved_model \ --output_format=tfjs_graph_model \ exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model \ exported_web/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ``` Папка `exported_web` содержит `.json` файл с информацией об архитектуре модели, а несколько файлов в формате `.bin` содержат ее параметры. ``` exported_web └── ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8 ├── group1-shard1of4.bin ├── group1-shard2of4.bin ├── group1-shard3of4.bin ├── group1-shard4of4.bin └── model.json ``` Наконец-то мы получили модель, которая способна обнаруживать `https://` префиксы в изображениях и которая сохранена в формате, понятном JavaScript приложениям. Давайте проверим размеры моделей, которые мы создали: ```python import pathlib def get_folder_size(folder_path): mB = 1000000 root_dir = pathlib.Path(folder_path) sizeBytes = sum(f.stat().st_size for f in root_dir.glob('**/*') if f.is_file()) return f'{sizeBytes//mB} MB' print(f'Original model size: {get_folder_size("cache/datasets/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') print(f'Exported model size: {get_folder_size("exported/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') print(f'Exported WEB model size: {get_folder_size("exported_web/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8")}') ``` _output →_ ``` Original model size: 31 MB Exported model size: 28 MB Exported WEB model size: 13 MB ``` Как вы можете заметить, модель, которую мы собираемся использовать на стороне клиента весит `13MB`, что вполне допустимо и соответствует требованиям, которые мы определили в начале статьи. Позже на стороне клиента мы сможем импортировать эту модель следующим образом: ```javascript import * as tf from '@tensorflow/tfjs'; const model = await tf.loadGraphModel(modelURL); ``` > 🧭 Следующим шагом будет реализация пользовательского интерфейса для модели, что является темой для другой статьи. Но уже сейчас, при желании, вы можете ознакомиться с финальным примером кода приложения на TypeScript в [репозитории links-detector](https://github.com/trekhleb/links-detector) на GitHub. ## 🤔 Заключение В этой статье мы начали решать проблему распознавания печатных ссылок. В итоге мы обучили модель, способную распознавать префиксы `https://` в текстовых изображениях (например, в кадрах видео-потока с камеры смартфона). Мы также конвертировали обученную модель в формат `tfjs_graph_model` для дальнейшего использования ее на стороне клиента в JavaScript/TypeScript приложении. Вы можете 🚀 [**запустить Links Detector**](https://trekhleb.github.io/links-detector/) со своего смартфона и попробовать, как он обнаруживает ссылки в вашей книге или журнале. Финальное решение выглядит следующим образом: ![Links Detector Demo](https://raw.githubusercontent.com/trekhleb/links-detector/master/articles/printed_links_detection/assets/03-links-detector-demo.gif) Вы также можете 📝 [**ознакомиться с репозиторием links-detector**](https://github.com/trekhleb/links-detector) на GitHub, в котором сможете найти исходный код клиентской части приложения. > ⚠️ На данный момент приложение находится в _экспериментальной_ стадии и имеет [множество недоработок и ограничений](https://github.com/trekhleb/links-detector/issues?q=is%3Aopen+is%3Aissue+label%3Aenhancement). Поэтому, до тех пор, пока вышеуказанные недоработки не будут ликвидированы, не ожидайте от приложения слишком многого 🤷🏻‍. В качестве следующих шагов по улучшению точности модели мы можем сделать следующее: - Дополнить тренировочный и тестовый наборы данных ссылками разных форматов (`http://`, `tcp://`, `ftp://` и пр.) - Дополнить набор данных примерами изображений с темным фоном и светлым текстом. - Дополнить набор данных подчеркнутыми ссылками. - Дополнить набор данных текстами и ссылками с другими шрифтами - и пр. Несмотря на то, что точность модели недостаточна для релиза полноценного приложения, я все-же надеюсь, что эта статья была для вас полезной и вдохновила вас на дальнейшие эксперименты с моделями обнаружения объектов. Успешной тренировки! ================================================ FILE: package.json ================================================ { "name": "links-detector", "version": "0.1.0", "private": true, "author": { "name": "Oleksii Trekhleb", "url": "https://www.linkedin.com/in/trekhleb/" }, "homepage": "https://trekhleb.github.io/links-detector/", "scripts": { "format:index": "prettier \"public/index.html\" --write", "cp-wasm": "cp node_modules/@tensorflow/tfjs-backend-wasm/dist/tfjs-backend-wasm.wasm ./public/wasm", "cp-wasm-simd": "cp node_modules/@tensorflow/tfjs-backend-wasm/dist/tfjs-backend-wasm-simd.wasm ./public/wasm", "cp-wasm-simd-thread": "cp node_modules/@tensorflow/tfjs-backend-wasm/dist/tfjs-backend-wasm-threaded-simd.wasm ./public/wasm", "build:wasm": "yarn cp-wasm && yarn cp-wasm-simd && yarn cp-wasm-simd-thread", "build:style": "tailwind build src/styles/index.css -o src/styles/tailwind.css", "build:pwa": "pwa-asset-generator src/icons/pwa/links-detector-logo-white.svg public/icons --manifest public/manifest.json --index public/index.html --background black --path \"%PUBLIC_URL%\" --scrape false --icon-only", "postbuild:pwa": "yarn run format:index", "build:assets": "yarn build:wasm && yarn build:style && yarn build:pwa", "prebuild": "yarn build:assets", "build": "react-scripts build", "prestart": "yarn build:assets", "start": "react-scripts start", "start-https": "HTTPS=true yarn start", "prestart-prod": "yarn build", "start-prod": "serve -c serve.json -l 4000", "test": "react-scripts test", "eject": "react-scripts eject", "lint": "eslint 'src/**/*.{js,ts,tsx}'", "predeploy": "yarn build", "deploy": "gh-pages -d ./build" }, "dependencies": { "@tensorflow/tfjs": "^2.4.0", "@tensorflow/tfjs-backend-wasm": "^2.7.0", "@tensorflow/tfjs-core": "^2.4.0", "@testing-library/jest-dom": "^5.11.5", "@testing-library/react": "^11.1.0", "@testing-library/user-event": "^12.1.10", "@types/gtag.js": "^0.0.3", "@types/jest": "^26.0.15", "@types/lodash": "^4.14.161", "@types/node": "^14.14.5", "@types/react": "^16.9.0", "@types/react-dom": "^16.9.0", "@types/react-helmet": "^6.1.0", "@types/react-router-dom": "^5.1.5", "@types/tesseract.js": "^0.0.2", "history": "^4.10.1", "lodash": "^4.17.20", "react": "^17.0.1 ", "react-dom": "^17.0.1", "react-helmet": "^6.1.0", "react-router-dom": "^5.2.0", "react-scripts": "4.0.0", "tailwindcss": "^1.8.10", "tesseract.js": "^2.1.3", "typescript": "~4.0.5", "workbox-core": "^5.1.3", "workbox-expiration": "^5.1.3", "workbox-precaching": "^5.1.3", "workbox-routing": "^5.1.3", "workbox-strategies": "^5.1.3", "workbox-cacheable-response": "^5.1.3", "workbox-google-analytics": "^5.1.3" }, "devDependencies": { "@typescript-eslint/eslint-plugin": "^4.1.1", "@typescript-eslint/parser": "^4.1.1", "eslint-config-airbnb": "^18.2.0", "eslint-plugin-import": "^2.22.0", "eslint-plugin-jsx-a11y": "^6.3.1", "eslint-plugin-react": "^7.20.6", "eslint-plugin-react-hooks": "^4.1.2", "gh-pages": "^3.1.0", "prettier": "^2.1.2", "pwa-asset-generator": "^3.2.3", "serve": "^11.3.2" }, "eslintConfig": { "extends": "react-app" }, "browserslist": { "production": [ ">0.2%", "not dead", "not op_mini all" ], "development": [ "last 1 chrome version", "last 1 firefox version", "last 1 safari version" ] } } ================================================ FILE: public/index.css ================================================ html, body { background-color: black; color: white; height: 100%; font-family: Roboto, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; } .full-height { height: 100%; box-sizing: border-box; } .fade-in-1 { animation: fadeIn ease-in-out .1s; } .fade-in-2 { animation: fadeIn ease-in-out .2s; } .fade-in-5 { animation: fadeIn ease-in-out .5s; } .fade-in-10 { animation: fadeIn ease-in-out 1s; } .pulsate-1 { animation: pulsate-1 2s cubic-bezier(0, 0, 0.2, 1) infinite; } .pulsate-2 { animation: pulsate-2 2s cubic-bezier(0, 0, 0.2, 1) infinite; } @keyframes fadeIn { 0% { opacity: 0; } 100% { opacity: 1; } } @keyframes pulsate-1 { 0% { transform: scale(1); opacity: 1; } 75%, 100% { transform: scale(1.5); opacity: 0; } } @keyframes pulsate-2 { 0%, 10% { transform: scale(1); opacity: 1; } 100% { transform: scale(1.5); opacity: 0; } } ================================================ FILE: public/index.html ================================================ Links Detector
Loading links detector app...
================================================ FILE: public/manifest.json ================================================ { "name": "Links Detector", "short_name": "Links Detector", "description": "Links Detector makes printed links clickable via your smartphone camera. No need to type a link in, just scan and click on it.", "start_url": "/links-detector/?src=pwa", "scope": "/links-detector/", "display": "standalone", "orientation": "portrait", "theme_color": "#000000", "background_color": "#000000", "icons": [ { "src": "icons/manifest-icon-192.png", "sizes": "192x192", "type": "image/png", "purpose": "maskable any" }, { "src": "icons/manifest-icon-512.png", "sizes": "512x512", "type": "image/png", "purpose": "maskable any" }, { "src": "favicon.ico", "sizes": "48x48", "type": "image/x-icon" } ] } ================================================ FILE: public/models/links_detector/v1/model.json ================================================ { "format": "graph-model", "generatedBy": "2.3.0", "convertedBy": "TensorFlow.js Converter v2.4.0", "userDefinedMetadata": { "signature": { "inputs": { "input_tensor:0": { "name": "input_tensor:0", "dtype": "DT_UINT8", "tensorShape": { "dim": [ { "size": "1" }, { "size": "-1" }, { "size": "-1" }, { "size": "3" } ] } } }, "outputs": { "Identity_1:0": { "name": "Identity_1:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "100" }, { "size": "4" } ] } }, "Identity_3:0": { "name": "Identity_3:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "100" }, { "size": "2" } ] } }, "Identity_5:0": { "name": "Identity_5:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" } ] } }, "Identity:0": { "name": "Identity:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "100" } ] } }, "Identity_7:0": { "name": "Identity_7:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "51150" }, { "size": "2" } ] } }, "Identity_2:0": { "name": "Identity_2:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "100" } ] } }, "Identity_4:0": { "name": "Identity_4:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "100" } ] } }, "Identity_6:0": { "name": "Identity_6:0", "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "51150" }, { "size": "4" } ] } } } } }, "modelTopology": { "node": [ { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_7", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_6", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/Reshape_3", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_3/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_3/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_3/axis", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_3/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_3/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_12/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_6/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/e", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_2/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_3/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/t", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/e", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_6/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_2", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_1/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_1/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/e", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_2", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_11", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_10", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/add_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "100" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_5/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_5/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_5/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_5/axis", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_18/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_9/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/e", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/add/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_9", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_16/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_1", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_17/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_8", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_14/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_7/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/e", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_2", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_15/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_8/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/e", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_3", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_2", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/start", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/delta", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/start", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/delta", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_2", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_4/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_2/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/t", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/e", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/GreaterEqual/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Const_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1/x", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split/split_dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_6/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_1", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split/split_dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul_1/x", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split/split_dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split/split_dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "100" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Const", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "100" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_4_recip", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_5_recip", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_2_recip", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_6_recip", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape/shape", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_1/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_2/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_3/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "unknown_328", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_329", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_330", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_4/shape", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } } } }, { "name": "StatefulPartitionedCall/concat/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Reshape_1/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose/perm", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_3_recip", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "51150" } ] } } } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_7_recip", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose_1/perm", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/ExpandDims_1/dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "4" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "unknown_269", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "32" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_270", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_271", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "4" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "unknown_261", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "96" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_262", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_263", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_1/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_2/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_3/shape", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } } } }, { "name": "StatefulPartitionedCall/Preprocessor/mul/x", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/sub/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ExpandDims/dim", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "2" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "unknown_259", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "1280" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_260", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_277", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_283", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_304", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_310", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_316", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } } } }, { "name": "unknown_322", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_331", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "128" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_332", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "12" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "unknown_333", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "12" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_4/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/concat_1/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Slice/begin", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Slice/size", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "3" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/iou_threshold", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/score_threshold", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/soft_nms_sigma", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_1", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Reshape/shape", "op": "Const", "attr": { "dtype": { "type": "DT_INT32" }, "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Const", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_6/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_3", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Greater/y", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6/axis", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_1", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_2", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Reshape/shape", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": { "dim": [ { "size": "1" } ] } } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Const", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": {} } } } }, { "name": "input_tensor", "op": "Placeholder", "attr": { "shape": { "shape": { "dim": [ { "size": "1" }, { "size": "-1" }, { "size": "-1" }, { "size": "3" } ] } }, "dtype": { "type": "DT_UINT8" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "3" }, { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "32" }, { "size": "1" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "32" }, { "size": "16" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "16" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "16" }, { "size": "96" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "96" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "96" }, { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "24" }, { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "144" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "144" }, { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "24" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "24" }, { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "144" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "144" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "144" }, { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "32" }, { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "192" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "192" }, { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "32" }, { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "192" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "192" }, { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "32" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "32" }, { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_bn_offset", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "192" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "192" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "192" }, { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "64" }, { "size": "384" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "384" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "384" }, { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "64" }, { "size": "384" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "384" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "384" }, { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "64" }, { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "384" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "384" }, { "size": "64" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "64" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "64" }, { "size": "384" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "384" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "384" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "384" }, { "size": "96" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "96" }, { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_bn_offset", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "576" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "576" }, { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "96" }, { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "576" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "576" }, { "size": "96" } ] } } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "96" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "96" }, { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "576" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "576" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "576" }, { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "160" }, { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "960" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "960" }, { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "160" }, { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "960" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "960" }, { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "160" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "160" }, { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "3" }, { "size": "3" }, { "size": "960" }, { "size": "1" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "960" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "960" }, { "size": "320" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "320" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "320" }, { "size": "1280" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1280" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_bn_offset", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_bn_offset", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_weights", "op": "Const", "attr": { "dtype": { "type": "DT_FLOAT" }, "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_weights", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "1" }, { "size": "1" }, { "size": "128" }, { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_bn_offset", "op": "Const", "attr": { "value": { "tensor": { "dtype": "DT_FLOAT", "tensorShape": { "dim": [ { "size": "128" } ] } } }, "dtype": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Cast", "op": "Cast", "input": [ "input_tensor" ], "attr": { "SrcT": { "type": "DT_UINT8" }, "Truncate": { "b": false }, "DstT": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/mul", "op": "Mul", "input": [ "StatefulPartitionedCall/Preprocessor/mul/x", "StatefulPartitionedCall/Cast" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Preprocessor/mul", "StatefulPartitionedCall/Preprocessor/sub/y" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/unstack", "op": "Unpack", "input": [ "StatefulPartitionedCall/Preprocessor/sub" ], "attr": { "num": { "i": "1" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ExpandDims", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Preprocessor/unstack", "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ExpandDims/dim" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ResizeBilinear", "op": "ResizeBilinear", "input": [ "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ExpandDims", "StatefulPartitionedCall/Preprocessor/ResizeImage/stack" ], "attr": { "align_corners": { "b": false }, "half_pixel_centers": { "b": false }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/Squeeze", "op": "Squeeze", "input": [ "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ResizeBilinear" ], "attr": { "squeeze_dims": { "list": { "i": [ "0" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Preprocessor/stack_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Preprocessor/ResizeImage/resize/Squeeze" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Preprocessor/stack", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/Squeeze", "ConstantFolding/StatefulPartitionedCall/Preprocessor/stack_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/Preprocessor/stack", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_bn_offset" ], "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_bn_offset" ], "attr": { "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "data_format": { "s": "TkhXQw==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_bn_offset" ], "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_bn_offset" ], "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_1/BiasAdd", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_add/add", "unknown_269", "unknown_270" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_bn_offset" ], "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_bn_offset" ], "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_bn_offset" ], "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_bn_offset" ], "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_2/BiasAdd", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_add/add", "unknown_261", "unknown_262" ], "device": "/device:CPU:0", "attr": { "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_bn_offset" ], "attr": { "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_add/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project_BN/FusedBatchNormV3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_add/add", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise", "op": "FusedDepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand_relu/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_bn_offset" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project_BN/FusedBatchNormV3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/out_relu/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project_BN/FusedBatchNormV3", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "U0FNRQ==" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_3/BiasAdd", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/out_relu/Relu6", "unknown_259", "unknown_260" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/stack", "op": "Pack", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_3/BiasAdd", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_3/BiasAdd" ], "attr": { "T": { "type": "DT_FLOAT" }, "axis": { "i": "3" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_3/BiasAdd", "unknown_304" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_3/BiasAdd", "unknown_277" ], "attr": { "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/stack_1", "op": "Pack", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/stack", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/stack" ], "attr": { "T": { "type": "DT_FLOAT" }, "axis": { "i": "2" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_2/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/stack_1", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/Reshape/shape" ], "attr": { "Tshape": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_2/Relu6", "unknown_310" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20/Relu6", "unknown_304" ], "attr": { "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20/Relu6", "unknown_283" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "2", "2", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/Reshape", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_2/BiasAdd" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_2/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_3/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/add", "unknown_263" ], "attr": { "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_2/Relu6", "unknown_316" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_3/Relu6", "unknown_310" ], "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21/Relu6", "unknown_304" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_2/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_3/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_bn_offset" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_4/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_bn_offset" ], "device": "/device:CPU:0", "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/stack", "op": "Pack", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2/Relu6", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2/Relu6" ], "attr": { "axis": { "i": "3" }, "N": { "i": "2" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2/Relu6", "unknown_304" ], "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_2/Relu6", "unknown_322" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_3/Relu6", "unknown_316" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_4/Relu6", "unknown_310" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/stack_1", "op": "Pack", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/stack", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/stack" ], "attr": { "T": { "type": "DT_FLOAT" }, "axis": { "i": "2" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_1/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_2/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_bn_offset" ], "device": "/device:CPU:0", "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_3/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_4/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/stack_1", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/Reshape/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_1/Relu6", "unknown_310" ], "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_2/Relu6", "unknown_328" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_2/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_2/Relu6", "unknown_331" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_3/Relu6", "unknown_322" ], "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_4/Relu6", "unknown_316" ], "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/add_1", "op": "AddV2", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/Reshape", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/projection_1/BiasAdd" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_1/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_bn_offset" ], "device": "/device:CPU:0", "attr": { "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_2", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_2/depthwise", "unknown_329", "unknown_330" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_2", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_2/depthwise", "unknown_332", "unknown_333" ], "device": "/device:CPU:0", "attr": { "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_3/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_bn_offset" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_4/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/add_1", "unknown_271" ], "attr": { "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_1/Relu6", "unknown_316" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_2", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_2", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_2/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_2", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_2", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_2/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_3/Relu6", "unknown_328" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_3/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_3/Relu6", "unknown_331" ], "attr": { "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_4/Relu6", "unknown_322" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d/depthwise", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_weights", "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_1/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_3/depthwise", "unknown_329", "unknown_330" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_3", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_3/depthwise", "unknown_332", "unknown_333" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_4/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_bn_offset" ], "device": "/device:CPU:0", "attr": { "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1/Relu6", "unknown_304" ], "attr": { "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_1/Relu6", "unknown_322" ], "attr": { "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_3", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_3", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_3/shape" ], "attr": { "Tshape": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_3", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_3", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_3/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_4/Relu6", "unknown_328" ], "attr": { "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_4/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_4/Relu6", "unknown_331" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_0/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_1/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_4", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_4/depthwise", "unknown_329", "unknown_330" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_4", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_4/depthwise", "unknown_332", "unknown_333" ], "device": "/device:CPU:0", "attr": { "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/activation_0/Relu6", "unknown_310" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_1/Relu6", "unknown_328" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_1/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_1/Relu6", "unknown_331" ], "attr": { "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_4", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_4", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_4/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_4", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_4", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_4/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_0/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_1", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d_1/depthwise", "unknown_329", "unknown_330" ], "device": "/device:CPU:0", "attr": { "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_1", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d_1/depthwise", "unknown_332", "unknown_333" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/activation_0/Relu6", "unknown_316" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_1", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd_1", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_1/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_1", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd_1", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_1/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_0/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/activation_0/Relu6", "unknown_322" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_0/Relu6", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d/depthwise", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_weights", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_bn_offset" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==", "UmVsdTY=" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_0/Relu6", "unknown_328" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d/depthwise", "op": "DepthwiseConv2dNative", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/activation_0/Relu6", "unknown_331" ], "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "data_format": { "s": "TkhXQw==" }, "explicit_paddings": { "list": {} }, "padding": { "s": "U0FNRQ==" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/separable_conv2d/depthwise", "unknown_329", "unknown_330" ], "device": "/device:CPU:0", "attr": { "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } }, "explicit_paddings": { "list": {} }, "use_cudnn_on_gpu": { "b": true }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd", "op": "_FusedConv2D", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/separable_conv2d/depthwise", "unknown_332", "unknown_333" ], "device": "/device:CPU:0", "attr": { "use_cudnn_on_gpu": { "b": true }, "explicit_paddings": { "list": {} }, "num_args": { "i": "1" }, "epsilon": { "f": 0.0 }, "padding": { "s": "VkFMSUQ=" }, "fused_ops": { "list": { "s": [ "Qmlhc0FkZA==" ] } }, "dilations": { "list": { "i": [ "1", "1", "1", "1" ] } }, "T": { "type": "DT_FLOAT" }, "data_format": { "s": "TkhXQw==" }, "strides": { "list": { "i": [ "1", "1", "1", "1" ] } } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/BoxPredictor/BiasAdd", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/ClassPredictor/BiasAdd", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/concat", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_1", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_2", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_3", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_4", "StatefulPartitionedCall/concat/axis" ], "attr": { "N": { "i": "5" }, "Tidx": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/concat_1", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_1", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_2", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_3", "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_4", "StatefulPartitionedCall/concat_1/axis" ], "attr": { "Tidx": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" }, "N": { "i": "5" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Reshape_1", "op": "Reshape", "input": [ "StatefulPartitionedCall/concat", "StatefulPartitionedCall/Postprocessor/Reshape_1/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/convert_scores", "op": "Sigmoid", "input": [ "StatefulPartitionedCall/concat_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose", "op": "Transpose", "input": [ "StatefulPartitionedCall/Postprocessor/Reshape_1", "StatefulPartitionedCall/Postprocessor/Decode/transpose/perm" ], "attr": { "Tperm": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack_5", "op": "Unpack", "input": [ "StatefulPartitionedCall/Postprocessor/convert_scores" ], "attr": { "num": { "i": "1" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Slice", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/convert_scores", "StatefulPartitionedCall/Postprocessor/Slice/begin", "StatefulPartitionedCall/Postprocessor/Slice/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/unstack", "op": "Unpack", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/transpose" ], "attr": { "num": { "i": "4" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack_1", "op": "Unpack", "input": [ "StatefulPartitionedCall/Postprocessor/Slice" ], "attr": { "num": { "i": "1" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/mul_2", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/unstack", "StatefulPartitionedCall/Postprocessor/Decode/truediv" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/mul_3", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/unstack:1", "StatefulPartitionedCall/Postprocessor/Decode/truediv_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_2", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/unstack:2", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_2_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_3", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/unstack:3", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_3_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape/shape" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul_2", "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/add_1", "op": "AddV2", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul_3", "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/Exp_1", "op": "Exp", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/truediv_2" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/Exp", "op": "Exp", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/truediv_3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/mul_1", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/Exp_1", "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/mul", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/Exp", "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_4", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul_1", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_4_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_6", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul_1", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_6_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_5", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_5_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_7", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/mul", "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_7_recip" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/add", "StatefulPartitionedCall/Postprocessor/Decode/truediv_4" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/add_2", "op": "AddV2", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/add", "StatefulPartitionedCall/Postprocessor/Decode/truediv_6" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/sub_1", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/add_1", "StatefulPartitionedCall/Postprocessor/Decode/truediv_5" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/add_3", "op": "AddV2", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/add_1", "StatefulPartitionedCall/Postprocessor/Decode/truediv_7" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/stack", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/sub", "StatefulPartitionedCall/Postprocessor/Decode/sub_1", "StatefulPartitionedCall/Postprocessor/Decode/add_2", "StatefulPartitionedCall/Postprocessor/Decode/add_3" ], "attr": { "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" }, "N": { "i": "4" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose_1", "op": "Transpose", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/stack", "StatefulPartitionedCall/Postprocessor/Decode/transpose_1/perm" ], "attr": { "Tperm": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Reshape_2", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/Decode/transpose_1", "StatefulPartitionedCall/Postprocessor/stack" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/ExpandDims_1", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/Reshape_2", "StatefulPartitionedCall/Postprocessor/ExpandDims_1/dim" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Squeeze", "op": "Squeeze", "input": [ "StatefulPartitionedCall/Postprocessor/ExpandDims_1" ], "attr": { "squeeze_dims": { "list": { "i": [ "2" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack", "op": "Unpack", "input": [ "StatefulPartitionedCall/Postprocessor/ExpandDims_1" ], "attr": { "num": { "i": "1" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/unstack", "op": "Unpack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack" ], "attr": { "num": { "i": "1" }, "T": { "type": "DT_FLOAT" }, "axis": { "i": "1" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/NonMaxSuppressionV5", "op": "NonMaxSuppressionV5", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/unstack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/iou_threshold", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/score_threshold", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/soft_nms_sigma" ], "attr": { "pad_to_max_output_size": { "b": false }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_2", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/NonMaxSuppressionV5" ], "attr": { "T": { "type": "DT_INT32" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Less", "op": "Less", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/sub_1", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/sub_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Reshape/shape" ], "attr": { "T": { "type": "DT_INT32" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/sub", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Reshape/shape" ], "attr": { "T": { "type": "DT_INT32" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1", "op": "Fill", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Reshape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Const" ], "attr": { "T": { "type": "DT_FLOAT" }, "index_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros", "op": "Fill", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Reshape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Const" ], "attr": { "T": { "type": "DT_INT32" }, "index_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat_1", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/NonMaxSuppressionV5:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat_1/axis" ], "attr": { "Tidx": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/NonMaxSuppressionV5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat/axis" ], "attr": { "N": { "i": "2" }, "Tidx": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Less", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/Reshape_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_3/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/unstack_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_4/axis" ], "attr": { "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/unstack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Shape", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Shape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2", "op": "TopKV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice" ], "attr": { "sorted": { "b": true }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_3/axis" ], "attr": { "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/add_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_5/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_4/axis" ], "attr": { "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_6", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_6/axis" ], "attr": { "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_1", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_1/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split", "op": "Split", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split/split_dim", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_6" ], "attr": { "num_split": { "i": "4" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum", "op": "Minimum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_2" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_2", "op": "Minimum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_1", "op": "Minimum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split:2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_2" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_3", "op": "Minimum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split:3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_3" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum", "op": "Maximum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_2", "op": "Maximum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_1", "op": "Maximum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_3", "op": "Maximum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Minimum_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Maximum_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat/axis" ], "attr": { "Tidx": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" }, "N": { "i": "4" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split", "op": "Split", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split/split_dim", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat" ], "attr": { "T": { "type": "DT_FLOAT" }, "num_split": { "i": "4" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split:2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/sub_1", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split:3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split:1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/mul", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/sub", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/sub_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/Squeeze", "op": "Squeeze", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/mul" ], "attr": { "T": { "type": "DT_FLOAT" }, "squeeze_dims": { "list": { "i": [ "1" ] } } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Greater", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/Squeeze", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Greater/y" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Where", "op": "Where", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Greater" ], "attr": { "T": { "type": "DT_BOOL" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Where", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Reshape/shape" ], "attr": { "T": { "type": "DT_INT64" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Reshape" ], "attr": { "SrcT": { "type": "DT_INT64" }, "Truncate": { "b": false }, "DstT": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_3/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_5/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_4/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_1", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_1/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Shape", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6" ], "attr": { "out_type": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split", "op": "Split", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split/split_dim", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6" ], "attr": { "T": { "type": "DT_FLOAT" }, "num_split": { "i": "4" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_3", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Shape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split:2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/sub_1", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split:3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split:1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/mul", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/sub", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/sub_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Reshape", "op": "Reshape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Reshape/shape" ], "attr": { "T": { "type": "DT_INT32" }, "Tshape": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/Squeeze", "op": "Squeeze", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/mul" ], "attr": { "squeeze_dims": { "list": { "i": [ "1" ] } }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1", "op": "Fill", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Reshape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Const" ], "attr": { "index_type": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Cast", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/Squeeze" ], "attr": { "SrcT": { "type": "DT_FLOAT" }, "Truncate": { "b": false }, "DstT": { "type": "DT_BOOL" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul_1", "op": "Mul", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul_1/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_1", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Cast", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul_1" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/GreaterEqual", "op": "GreaterEqual", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/GreaterEqual/y" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Equal", "op": "NoOp", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice", "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_1" ] }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Cast_1", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/GreaterEqual" ], "attr": { "SrcT": { "type": "DT_BOOL" }, "Truncate": { "b": false }, "DstT": { "type": "DT_INT32" } } }, { "name": "Identity_6", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/Squeeze", "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Equal" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2", "op": "TopKV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice", "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Equal" ], "attr": { "T": { "type": "DT_FLOAT" }, "sorted": { "b": true } } }, { "name": "Identity_7", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/convert_scores", "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Equal" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Sum", "op": "Sum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Cast_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Const_1" ], "attr": { "T": { "type": "DT_INT32" }, "keep_dims": { "b": false }, "Tidx": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_3/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_5/axis" ], "attr": { "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_4/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_1", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_1/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_6", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/TopKV2:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_6/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split", "op": "Split", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split/split_dim", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_6" ], "attr": { "T": { "type": "DT_FLOAT" }, "num_split": { "i": "4" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat", "op": "ConcatV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split:1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split:2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split:3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat/axis" ], "attr": { "T": { "type": "DT_FLOAT" }, "N": { "i": "4" }, "Tidx": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_4", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat" ], "attr": { "out_type": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Shape_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1", "op": "Minimum", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "op": "Range", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/start", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/delta" ], "attr": { "Tidx": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Greater", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Sum" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_3/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_5/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_4/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_1", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_1/axis" ], "attr": { "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_6", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_6/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_2", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Greater", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Sum", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "op": "Range", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/start", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/delta" ], "attr": { "Tidx": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_12_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_2" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5/axis" ], "attr": { "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4/axis" ], "attr": { "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" }, "Taxis": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6", "op": "GatherV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6/axis" ], "attr": { "Taxis": { "type": "DT_INT32" }, "batch_dims": { "i": "0" }, "Tindices": { "type": "DT_INT32" }, "Tparams": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_12", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Select_2", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_12_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_6", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_10", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_8", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_2", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Cast_4", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_12" ], "attr": { "DstT": { "type": "DT_FLOAT" }, "SrcT": { "type": "DT_INT32" }, "Truncate": { "b": false } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_10", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_2" ], "attr": { "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_8", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "T": { "type": "DT_INT32" }, "Index": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_8", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_2" ], "attr": { "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_2" ], "attr": { "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_2" ], "attr": { "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_2" ], "attr": { "T": { "type": "DT_INT32" }, "Index": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "Identity_5", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/Cast_4" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_12", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_12/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_18", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_18/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_14", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_14/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_15", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_15/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_4", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_4/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_1", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_1/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_6", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_12", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_6/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_9", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_18", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_9/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_7", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_14", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_7/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_8", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_15", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_8/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_2", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_2/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_1", "op": "Greater", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_1/y" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_9", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_7", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_8", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1", "op": "Select", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/t", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/e" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3/size_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5/size_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_4/size", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "0" }, "N": { "i": "2" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1/size_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice/size", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "0" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3/size", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3/size_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5/size", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5/size_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_4", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_8", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_4/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1/size", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1/size_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_6", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_10", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_9", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_4" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1", "op": "Slice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1/size" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_1", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_7", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_11", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_9", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_9", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_3", "op": "Shape", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1" ], "attr": { "T": { "type": "DT_FLOAT" }, "out_type": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "T": { "type": "DT_INT32" }, "Index": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_7", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_11", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_2" ], "attr": { "Index": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" }, "shrink_axis_mask": { "i": "1" }, "begin_mask": { "i": "0" }, "ellipsis_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_16", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_16/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_17", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_17/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5", "op": "StridedSlice", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Shape_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_2" ], "attr": { "shrink_axis_mask": { "i": "1" }, "ellipsis_mask": { "i": "0" }, "begin_mask": { "i": "0" }, "new_axis_mask": { "i": "0" }, "end_mask": { "i": "0" }, "T": { "type": "DT_INT32" }, "Index": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_2", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_2/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_3", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_3/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_4/values_1", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_16", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_17" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "0" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5", "op": "Sub", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5/x", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5" ], "attr": { "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack/values_1", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_2", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_3" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "0" }, "N": { "i": "2" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3/values_1_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5/values_1_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_4", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_9", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_4/values_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "1" }, "N": { "i": "2" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1/values_1_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack/values_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "1" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3/values_1", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3/values_1_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5/values_1", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5/values_1_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_4", "op": "Pad", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_4", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_4" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tpaddings": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1/values_1", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1/values_1_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad", "op": "Pad", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack" ], "attr": { "Tpaddings": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_7", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3/values_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "1" }, "N": { "i": "2" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_11", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5/values_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "1" }, "N": { "i": "2" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_11_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_4" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1", "op": "Pack", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1/values_1" ], "attr": { "T": { "type": "DT_INT32" }, "axis": { "i": "1" }, "N": { "i": "2" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_6_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_3", "op": "Pad", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tpaddings": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_5", "op": "Pad", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tpaddings": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_11", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_4", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_11_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_1", "op": "Pad", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1", "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tpaddings": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_6", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_6_const_axis" ], "attr": { "T": { "type": "DT_FLOAT" }, "Tdim": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_10_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_3" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_8_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_5" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "Identity_3", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_11" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_7_const_axis", "op": "Const", "input": [ "^StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_1" ], "attr": { "value": { "tensor": { "dtype": "DT_INT32", "tensorShape": {} } }, "dtype": { "type": "DT_INT32" } } }, { "name": "Identity_1", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_6" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_10", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_3", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_10_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_8", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_5", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_8_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_7", "op": "ExpandDims", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Pad_1", "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_7_const_axis" ], "attr": { "Tdim": { "type": "DT_INT32" }, "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Postprocessor/Cast_5", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_10" ], "attr": { "SrcT": { "type": "DT_FLOAT" }, "Truncate": { "b": false }, "DstT": { "type": "DT_INT32" } } }, { "name": "StatefulPartitionedCall/add", "op": "AddV2", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_8", "StatefulPartitionedCall/add/y" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "Identity_4", "op": "Identity", "input": [ "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_7" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "StatefulPartitionedCall/Cast_1", "op": "Cast", "input": [ "StatefulPartitionedCall/Postprocessor/Cast_5" ], "attr": { "SrcT": { "type": "DT_INT32" }, "Truncate": { "b": false }, "DstT": { "type": "DT_FLOAT" } } }, { "name": "Identity_2", "op": "Identity", "input": [ "StatefulPartitionedCall/add" ], "attr": { "T": { "type": "DT_FLOAT" } } }, { "name": "Identity", "op": "Identity", "input": [ "StatefulPartitionedCall/Cast_1" ], "attr": { "T": { "type": "DT_FLOAT" } } } ], "library": {}, "versions": { "producer": 440 } }, "weightsManifest": [ { "paths": [ "group1-shard1of4.bin", "group1-shard2of4.bin", "group1-shard3of4.bin", "group1-shard4of4.bin" ], "weights": [ { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_7", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_13/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_6", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/Reshape_3", "shape": [ 51150 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_3/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_12/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_12/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_6/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_6/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_13/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_1", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_2/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_2/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_3/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_6/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_6/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_1/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_1/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_1/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_1/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_3/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_11", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_19/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_10", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/add_1", "shape": [ 100 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_18/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_18/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_9/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_9/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_19/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/add/y", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_9", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_16/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_16/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_17/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_8", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_14/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_14/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_7/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_7/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_4/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_15/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_15/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_8/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_8/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_17/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_3", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_5/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/zeros_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/start", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_1/delta", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_1/GatherV2_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/start", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range_2/delta", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather_2/GatherV2_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_4/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/sub_4/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Greater_2/y", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/t", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Select_2/e", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/strided_slice_5/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/GreaterEqual/y", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Const_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum_1/x", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/split/split_dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/Gather/GatherV2_6/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ChangeCoordinateFrame/Scale/concat/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_4/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField_1/strided_slice/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Area/split/split_dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul_1/x", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Area/split/split_dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_2", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/split/split_dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/range", "shape": [ 100 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Reshape/shape", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros_1/Const", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/mul", "shape": [ 100 ], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_4_recip", "shape": [], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_5_recip", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv", "shape": [ 51150 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add", "shape": [ 51150 ], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_2_recip", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub_1", "shape": [ 51150 ], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_6_recip", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/truediv_1", "shape": [ 51150 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/add_1", "shape": [ 51150 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_1/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_2/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_3/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "unknown_328", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_329", "shape": [ 1, 1, 128, 24 ], "dtype": "float32" }, { "name": "unknown_330", "shape": [ 24 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead/Reshape_4/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/concat/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/Reshape_1/shape", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose/perm", "shape": [ 2 ], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_3_recip", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/get_center_coordinates_and_sizes/sub", "shape": [ 51150 ], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/Decode/truediv_7_recip", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/Decode/transpose_1/perm", "shape": [ 2 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/stack", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/ExpandDims_1/dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling_1/Reshape/shape", "shape": [ 4 ], "dtype": "int32" }, { "name": "unknown_269", "shape": [ 1, 1, 32, 128 ], "dtype": "float32" }, { "name": "unknown_270", "shape": [ 128 ], "dtype": "float32" }, { "name": "unknown_271", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/nearest_neighbor_upsampling/nearest_neighbor_upsampling/Reshape/shape", "shape": [ 4 ], "dtype": "int32" }, { "name": "unknown_261", "shape": [ 1, 1, 96, 128 ], "dtype": "float32" }, { "name": "unknown_262", "shape": [ 128 ], "dtype": "float32" }, { "name": "unknown_263", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_1/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_2/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_3/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Preprocessor/mul/x", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Preprocessor/sub/y", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/resize/ExpandDims/dim", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Preprocessor/ResizeImage/stack", "shape": [ 2 ], "dtype": "int32" }, { "name": "unknown_259", "shape": [ 1, 1, 1280, 128 ], "dtype": "float32" }, { "name": "unknown_260", "shape": [ 128 ], "dtype": "float32" }, { "name": "unknown_277", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_283", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_304", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_310", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_316", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_322", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_331", "shape": [ 3, 3, 128, 1 ], "dtype": "float32" }, { "name": "unknown_332", "shape": [ 1, 1, 128, 12 ], "dtype": "float32" }, { "name": "unknown_333", "shape": [ 12 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead/Reshape_4/shape", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/concat_1/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/Slice/begin", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/Slice/size", "shape": [ 3 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape/shape", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Minimum", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/iou_threshold", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/score_threshold", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/non_max_suppression_with_scores/soft_nms_sigma", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_2/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Reshape/shape", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/zeros/Const", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/concat/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Gather/GatherV2_5/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/strided_slice/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/SortByField/Gather/GatherV2_6/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_3", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/strided_slice_1", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/concat/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Greater/y", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Reshape/shape", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ClipToWindow/Gather/GatherV2_6/axis", "shape": [], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_1", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/strided_slice_3/stack_2", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Reshape/shape", "shape": [ 1 ], "dtype": "int32" }, { "name": "StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/ones_1/Const", "shape": [], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_weights", "shape": [ 3, 3, 3, 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv1/Conv2D_bn_offset", "shape": [ 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_weights", "shape": [ 3, 3, 32, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_depthwise/depthwise_bn_offset", "shape": [ 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_weights", "shape": [ 1, 1, 32, 16 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/expanded_conv_project/Conv2D_bn_offset", "shape": [ 16 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_weights", "shape": [ 1, 1, 16, 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_expand/Conv2D_bn_offset", "shape": [ 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_weights", "shape": [ 3, 3, 96, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_depthwise/depthwise_bn_offset", "shape": [ 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_weights", "shape": [ 1, 1, 96, 24 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_1_project/Conv2D_bn_offset", "shape": [ 24 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_weights", "shape": [ 1, 1, 24, 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_expand/Conv2D_bn_offset", "shape": [ 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_weights", "shape": [ 3, 3, 144, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_depthwise/depthwise_bn_offset", "shape": [ 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_weights", "shape": [ 1, 1, 144, 24 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_2_project/Conv2D_bn_offset", "shape": [ 24 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_weights", "shape": [ 1, 1, 24, 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_expand/Conv2D_bn_offset", "shape": [ 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_weights", "shape": [ 3, 3, 144, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_depthwise/depthwise_bn_offset", "shape": [ 144 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_weights", "shape": [ 1, 1, 144, 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_3_project/Conv2D_bn_offset", "shape": [ 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_weights", "shape": [ 1, 1, 32, 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_expand/Conv2D_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_weights", "shape": [ 3, 3, 192, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_depthwise/depthwise_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_weights", "shape": [ 1, 1, 192, 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_4_project/Conv2D_bn_offset", "shape": [ 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_weights", "shape": [ 1, 1, 32, 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_expand/Conv2D_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_weights", "shape": [ 3, 3, 192, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_depthwise/depthwise_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_weights", "shape": [ 1, 1, 192, 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_5_project/Conv2D_bn_offset", "shape": [ 32 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_weights", "shape": [ 1, 1, 32, 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_expand/Conv2D_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_weights", "shape": [ 3, 3, 192, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_depthwise/depthwise_bn_offset", "shape": [ 192 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_weights", "shape": [ 1, 1, 192, 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_6_project/Conv2D_bn_offset", "shape": [ 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_weights", "shape": [ 1, 1, 64, 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_expand/Conv2D_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_weights", "shape": [ 3, 3, 384, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_depthwise/depthwise_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_weights", "shape": [ 1, 1, 384, 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_7_project/Conv2D_bn_offset", "shape": [ 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_weights", "shape": [ 1, 1, 64, 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_expand/Conv2D_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_weights", "shape": [ 3, 3, 384, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_depthwise/depthwise_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_weights", "shape": [ 1, 1, 384, 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_8_project/Conv2D_bn_offset", "shape": [ 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_weights", "shape": [ 1, 1, 64, 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_expand/Conv2D_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_weights", "shape": [ 3, 3, 384, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_depthwise/depthwise_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_weights", "shape": [ 1, 1, 384, 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_9_project/Conv2D_bn_offset", "shape": [ 64 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_weights", "shape": [ 1, 1, 64, 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_expand/Conv2D_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_weights", "shape": [ 3, 3, 384, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_depthwise/depthwise_bn_offset", "shape": [ 384 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_weights", "shape": [ 1, 1, 384, 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_10_project/Conv2D_bn_offset", "shape": [ 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_weights", "shape": [ 1, 1, 96, 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_expand/Conv2D_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_weights", "shape": [ 3, 3, 576, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_depthwise/depthwise_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_weights", "shape": [ 1, 1, 576, 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_11_project/Conv2D_bn_offset", "shape": [ 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_weights", "shape": [ 1, 1, 96, 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_expand/Conv2D_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_weights", "shape": [ 3, 3, 576, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_depthwise/depthwise_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_weights", "shape": [ 1, 1, 576, 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_12_project/Conv2D_bn_offset", "shape": [ 96 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_weights", "shape": [ 1, 1, 96, 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_expand/Conv2D_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_weights", "shape": [ 3, 3, 576, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_depthwise/depthwise_bn_offset", "shape": [ 576 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_weights", "shape": [ 1, 1, 576, 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_13_project/Conv2D_bn_offset", "shape": [ 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_weights", "shape": [ 1, 1, 160, 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_expand/Conv2D_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_weights", "shape": [ 3, 3, 960, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_depthwise/depthwise_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_weights", "shape": [ 1, 1, 960, 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_14_project/Conv2D_bn_offset", "shape": [ 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_weights", "shape": [ 1, 1, 160, 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_expand/Conv2D_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_weights", "shape": [ 3, 3, 960, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_depthwise/depthwise_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_weights", "shape": [ 1, 1, 960, 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_15_project/Conv2D_bn_offset", "shape": [ 160 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_weights", "shape": [ 1, 1, 160, 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_expand/Conv2D_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_weights", "shape": [ 3, 3, 960, 1 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_depthwise/depthwise_bn_offset", "shape": [ 960 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_weights", "shape": [ 1, 1, 960, 320 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/block_16_project/Conv2D_bn_offset", "shape": [ 320 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_weights", "shape": [ 1, 1, 320, 1280 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/functional_1/Conv_1/Conv2D_bn_offset", "shape": [ 1280 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_2_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_20_depthwise_conv/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_2_depthwise_conv/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_2_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_3_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/bottom_up_Conv2d_21_depthwise_conv/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_1_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_2_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_3_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_4_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/ssd_mobile_net_v2fpn_keras_feature_extractor/FeatureMaps/top_down/smoothing_1_depthwise_conv/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_1_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_2_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_3_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_4_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_0/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_1_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_3_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_4_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_1/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_1_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_4_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_2/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_weights", "shape": [ 1, 1, 128, 128 ], "dtype": "float32" }, { "name": "StatefulPartitionedCall/WeightSharedConvolutionalBoxPredictor/PredictionTower/conv2d_3/separable_conv2d_bn_offset", "shape": [ 128 ], "dtype": "float32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Preprocessor/stack_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_12_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_3/size_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_5/size_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/Slice_1/size_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_3/values_1_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_5/values_1_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/PadOrClipBoxList/stack_1/values_1_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_11_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_6_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_10_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_8_const_axis", "shape": [], "dtype": "int32" }, { "name": "ConstantFolding/StatefulPartitionedCall/Postprocessor/BatchMultiClassNonMaxSuppression/stack_7_const_axis", "shape": [], "dtype": "int32" } ] } ] } ================================================ FILE: public/robots.txt ================================================ # https://www.robotstxt.org/robotstxt.html User-agent: * Disallow: ================================================ FILE: serve.json ================================================ { "public": "./build", "rewrites": [ { "source": "/links-detector", "destination": "index.html" }, { "source": "/links-detector/", "destination": "index.html" }, { "source": "/links-detector/:a/:b?/:c?/:d?/:e?/:f?/:g?", "destination": "/:a/:b?/:c?/:d?/:e?/:f?/:g?" } ], "redirects": [ { "source": "/", "destination": "/links-detector/", "type": 302 } ] } ================================================ FILE: src/components/App.tsx ================================================ import React from 'react'; import { Router } from 'react-router-dom'; import { createHashHistory, Location } from 'history'; import Template from './shared/Template'; import Routes from './Routes'; import ErrorBoundary from './shared/ErrorBoundary'; import { gaPageView } from '../utils/analytics'; const history = createHashHistory(); history.listen((location: Location): void => { gaPageView(location); }); function App(): React.ReactElement { return ( ); } export default App; ================================================ FILE: src/components/Routes.tsx ================================================ import React from 'react'; import { Switch, Route } from 'react-router-dom'; import { ROUTES } from '../constants/routes'; import HomeScreen from './screens/HomeScreen'; import DetectorScreen from './screens/DetectorScreen'; import NoteFoundScreen from './screens/NotFoundScreen'; import DebugScreen from './screens/DebugScreen'; import DemoScreen from './screens/DemoScreen'; function Routes(): React.ReactElement { return ( ); } export default Routes; ================================================ FILE: src/components/elements/BoxesCanvas.tsx ================================================ import React, { useRef, useEffect, useCallback } from 'react'; import { DetectionBox } from '../../utils/graphModel'; import useLogger from '../../hooks/useLogger'; type BoxesCanvasProps = { boxes: DetectionBox[], width: number, height: number, normalized?: boolean, boxColor?: string, boxLabelColor?: string, }; const boxColorDefault = '#2fff00'; const boxFrameWidth = 1; const boxLabelFont = '10px helvetica'; const boxLabelColorDefault = '#000000'; const boxLabelPadding = 4; const BoxesCanvas = (props: BoxesCanvasProps): React.ReactElement => { const { boxes, width, height, normalized = true, boxColor = boxColorDefault, boxLabelColor = boxLabelColorDefault, } = props; const logger = useLogger({ context: 'DetectionBoxes' }); const canvasRef = useRef(null); const drawDetections = (): void => { if (!canvasRef.current || !boxes) { return; } const ctx: CanvasRenderingContext2D | null = canvasRef.current.getContext('2d'); if (!ctx) { logger.logError('cannot get canvas 2D context'); return; } ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); ctx.font = boxLabelFont; ctx.textBaseline = 'top'; let normalizedBoxes: DetectionBox[] = [...boxes]; if (normalized) { normalizedBoxes = normalizedBoxes.map((box: DetectionBox) => { const { x1, y1, x2, y2, categoryId, score, } = box; const normalizeByWidth = (w: number): number => Math.floor(width * w); const normalizeByHeight = (h: number): number => Math.floor(height * h); return { x1: normalizeByWidth(x1), y1: normalizeByHeight(y1), x2: normalizeByWidth(x2), y2: normalizeByHeight(y2), categoryId, score, }; }); } logger.logDebug('drawDetections', { boxes, normalizedBoxes, }); normalizedBoxes.forEach((box: DetectionBox) => { const { x1, y1, x2, y2, score, } = box; // Draw the bounding box. ctx.strokeStyle = boxColor; ctx.lineWidth = boxFrameWidth; ctx.strokeRect(x1, y1, x2 - x1, y2 - y1); // Draw the label background. ctx.fillStyle = boxColor; const label = `${Math.floor(score * 100)}%`; const textWidth = ctx.measureText(label).width; const textHeight = parseInt(boxLabelFont, 10); // Draw top left rectangle. ctx.fillRect( x1 - boxFrameWidth, y1 - textHeight - boxLabelPadding, textWidth + boxLabelPadding, textHeight + boxLabelPadding, ); // Draw the text last to ensure it's on top. ctx.fillStyle = boxLabelColor; ctx.fillText( label, x1 + boxLabelPadding / 2 - boxFrameWidth, y1 - boxLabelPadding / 2 - textHeight, ); }); }; const drawDetectionsCallback = useCallback(drawDetections, [ boxes, boxColor, boxLabelColor, normalized, width, height, logger, ]); useEffect(() => { drawDetectionsCallback(); }, [drawDetectionsCallback]); return ( ); }; export default BoxesCanvas; ================================================ FILE: src/components/elements/DebugInfo.tsx ================================================ import React, { useEffect, useState } from 'react'; import { getTFInfo, isCanvasFilterSupported, isWebGLSupported, TFInfo, } from '../../utils/debug'; import useLogger from '../../hooks/useLogger'; import { DETECTION_CONFIG } from '../../configs/detectionConfig'; function DebugInfo(): React.ReactElement { const [tfInfo, setTfInfo] = useState(null); const logger = useLogger({ context: 'DebugInfo' }); useEffect(() => { logger.logDebug('useEffect'); getTFInfo({ modelURL: DETECTION_CONFIG.modelLoading.linksDetectorModelURL, }).then((info: TFInfo) => { setTfInfo(info); logger.logDebug('useEffect: then', { info }); }); }, [logger]); if (!tfInfo) { return
Loading...
; } const supported = 'YES'; const notSupported = 'NO'; return (
  • Platform name: {tfInfo.platformName}
  • Backend name: {tfInfo.backendName}
  • WebGL: {isWebGLSupported() ? supported : notSupported}
  • Canvas Filters: {isCanvasFilterSupported() ? supported : notSupported}
); } export default DebugInfo; ================================================ FILE: src/components/elements/DetectedLinks.tsx ================================================ import React, { CSSProperties } from 'react'; import { DetectedLink } from '../../hooks/useLinksDetector'; import Icon from '../shared/Icon'; import { ICON_KEYS } from '../../icons'; import { DETECTION_BACKGROUND_COLOR_CLASS, DETECTION_TEXT_COLOR_CLASS, } from '../../constants/style'; type DetectedLinksProps = { links: DetectedLink[], containerSize: number, }; function DetectedLinks(props: DetectedLinksProps): React.ReactElement | null { const { links, containerSize } = props; if (!links || !links.length) { return null; } const containerStyle: CSSProperties = { width: `${containerSize}px`, height: `${containerSize}px`, }; const linkStyle: CSSProperties = { fontSize: '12px', }; const linksElements = links.map((link: DetectedLink) => { const linkContainerStyle: CSSProperties = { marginTop: `${link.y1}px`, marginLeft: `${link.x1}px`, boxShadow: '0px 5px 10px 0px rgba(0,0,0,0.3)', }; /* eslint-disable react/jsx-no-target-blank */ return (
{link.url}
); }); return (
{ linksElements }
); } export default DetectedLinks; ================================================ FILE: src/components/elements/DetectedLinksPrefixes.tsx ================================================ import React, { CSSProperties } from 'react'; import { DetectionBox } from '../../utils/graphModel'; import { relativeToAbsolute } from '../../utils/image'; import Spinner from '../shared/Spinner'; import { DETECTION_CONFIG } from '../../configs/detectionConfig'; type DetectedLinksPrefixesProps = { boxes: DetectionBox[] | null, containerSize: number, }; function DetectedLinksPrefixes(props: DetectedLinksPrefixesProps): React.ReactElement | null { const { boxes, containerSize } = props; if (!boxes || !boxes.length) { return null; } const regionProposalPadding: number = Math.ceil( containerSize * DETECTION_CONFIG.ocr.regionProposalPadding, ); const containerStyle: CSSProperties = { width: `${containerSize}px`, height: `${containerSize}px`, display: 'block', overflow: 'hidden', }; const boxesElements = boxes.map((box: DetectionBox) => { const left: number = relativeToAbsolute(box.x1, containerSize); const top: number = relativeToAbsolute(box.y1, containerSize); const right: number = relativeToAbsolute(box.x2, containerSize); const bottom: number = relativeToAbsolute(box.y2, containerSize); const width: number = right - left; const height: number = bottom - top; const horizontalScaleFactor: number = 10; const boxStyle: CSSProperties = { marginLeft: `${left - regionProposalPadding}px`, marginTop: `${top}px`, width: `${horizontalScaleFactor * Math.max(width, height)}px`, height: `${height}px`, }; return (
); }); return (
{ boxesElements }
); } export default DetectedLinksPrefixes; ================================================ FILE: src/components/elements/LinksDetector.tsx ================================================ import React, { CSSProperties, useCallback, useEffect, useState, } from 'react'; import { Rectangle } from 'tesseract.js'; import CameraStream from '../shared/CameraStream'; import useWindowSize from '../../hooks/useWindowSize'; import { DETECTION_CONFIG } from '../../configs/detectionConfig'; import Notification, { NotificationLevel } from '../shared/Notification'; import useLogger from '../../hooks/useLogger'; import ProgressBar from '../shared/ProgressBar'; import BoxesCanvas from './BoxesCanvas'; import { isDebugMode } from '../../constants/debug'; import ErrorBoundary from '../shared/ErrorBoundary'; import PixelsCanvas from './PixelsCanvas'; import useLinksDetector, { DetectionPerformance } from '../../hooks/useLinksDetector'; import { normalizeCSSFilterParam } from '../../utils/image'; import PerformanceMonitor from './PerformanceMonitor'; import { DetectionBox } from '../../utils/graphModel'; import DetectedLinks from './DetectedLinks'; import DetectedLinksPrefixes from './DetectedLinksPrefixes'; import { FRAME_PADDING_CLASS } from '../../constants/style'; const uiVideoBrightness = normalizeCSSFilterParam( DETECTION_CONFIG.imagePreprocessing.ui.brightness, ); const uiVideoContrast = normalizeCSSFilterParam( DETECTION_CONFIG.imagePreprocessing.ui.contrast, ); const videoStyle: CSSProperties = DETECTION_CONFIG.imagePreprocessing.ui.enabled ? { filter: `brightness(${uiVideoBrightness}) contrast(${uiVideoContrast}) grayscale(1)`, } : {}; type LinksDetectorProps = { onLoaded?: () => void, onError?: () => void, }; function LinksDetector(props: LinksDetectorProps): React.ReactElement | null { const { onLoaded = (): void => {}, onError = (): void => {}, } = props; const logger = useLogger({ context: 'LiveDetector' }); const windowSize = useWindowSize(); const [ detectionPerformance, setDetectionPerformance, ] = useState(null); const { detectedLinks, detectLinks, error, loadingProgress, loadingStage, httpsBoxes, regionProposals, pixels, } = useLinksDetector({ modelURL: DETECTION_CONFIG.modelLoading.linksDetectorModelURL, maxBoxesNum: DETECTION_CONFIG.httpsDetection.maxBoxesNum, scoreThreshold: DETECTION_CONFIG.httpsDetection.scoreThreshold, iouThreshold: DETECTION_CONFIG.httpsDetection.IOUThreshold, workersNum: DETECTION_CONFIG.ocr.workersNum, language: DETECTION_CONFIG.ocr.language, }); const onLoadedCallback = useCallback(onLoaded, [onLoaded]); useEffect(() => { if (loadingProgress === null || loadingProgress < 1) { return; } logger.logDebug('useEffect: onLoadedCallback', { loadingProgress }); onLoadedCallback(); }, [loadingProgress, onLoadedCallback, logger]); const onErrorCallback = useCallback(onError, [onError]); useEffect(() => { if (!error) { return; } logger.logDebug('useEffect: onErrorCallback', { error }); onErrorCallback(); }, [error, onErrorCallback, logger]); const isDebug: boolean = isDebugMode(); if (error) { return (
{error}
); } if (loadingProgress === null || loadingProgress < 1) { return ; } if (!windowSize || !windowSize.width || !windowSize.height) { return ; } const onFrame = async (video: HTMLVideoElement): Promise => { const resizeToSize: number = Math.min( video.width, DETECTION_CONFIG.imagePreprocessing.model.size, ); logger.logDebug('onFrame start', { resizeToSize }); const currentDetectionPerformance: DetectionPerformance | null = await detectLinks({ video, applyFilters: DETECTION_CONFIG.imagePreprocessing.model.enabled, videoBrightness: DETECTION_CONFIG.imagePreprocessing.model.brightness, videoContrast: DETECTION_CONFIG.imagePreprocessing.model.contrast, regionProposalsPadding: DETECTION_CONFIG.ocr.regionProposalPadding, useRegionProposals: DETECTION_CONFIG.ocr.useRegionProposals, resizeToSize, }); if (isDebug) { setDetectionPerformance(currentDetectionPerformance); } logger.logDebug('onFrame end'); }; const videoSize: number = Math.min(windowSize.width, windowSize.height); const canvasContainerStyles: CSSProperties = { marginTop: `-${videoSize}px`, }; const detectedLinksContainerStyles: CSSProperties = { marginTop: `-${videoSize}px`, width: `${videoSize}px`, height: `${videoSize}px`, }; const cameraStream = ( ); const imageCanvas = isDebug ? (
) : null; const regionProposalBoxes: DetectionBox[] = regionProposals && regionProposals.length ? regionProposals.map((regionProposal: Rectangle): DetectionBox => { return { x1: regionProposal.left, x2: regionProposal.left + regionProposal.width, y1: regionProposal.top, y2: regionProposal.top + regionProposal.height, score: 0, categoryId: 0, }; }) : []; const regionProposalsCanvas = regionProposalBoxes && regionProposalBoxes.length && isDebug ? (
) : null; const httpsBoxesCanvas = httpsBoxes && isDebug ? (
) : null; const performanceMonitor = isDebug ? (
) : null; const detectedLinksPrefixesCanvas = httpsBoxes && httpsBoxes.length ? (
) : null; const detectedLinksCanvas = detectedLinks && detectedLinks.length ? (
) : null; return (
{cameraStream} {imageCanvas} {regionProposalsCanvas} {httpsBoxesCanvas} {performanceMonitor} {detectedLinksPrefixesCanvas} {detectedLinksCanvas}
); } export default LinksDetector; ================================================ FILE: src/components/elements/PerformanceMonitor.tsx ================================================ import React, { CSSProperties } from 'react'; import { DetectionPerformance } from '../../hooks/useLinksDetector'; type DetectionPerformanceProps = { metrics: DetectionPerformance | null, }; function PerformanceMonitor(props: DetectionPerformanceProps): React.ReactElement | null { const { metrics } = props; const monitorStyles: CSSProperties = { color: 'white', backgroundColor: 'black', padding: '10px', fontSize: '10px', }; if (!metrics) { return null; } return (
proc: {metrics.processing}s
avgProc: {metrics.avgProcessing}s
https: {metrics.inference}s
avgHttps: {metrics.avgInference}s
ocr: {metrics.ocr}s
avgOcr: {metrics.avgOcr}s
total: {metrics.total}s
fps: {metrics.fps}
); } export default PerformanceMonitor; ================================================ FILE: src/components/elements/PixelsCanvas.tsx ================================================ import React, { useEffect, useRef } from 'react'; import useLogger from '../../hooks/useLogger'; import { Pixels } from '../../utils/image'; type PixelsCanvasProps = { pixels: Pixels | null, width: number, height: number, }; function PixelsCanvas(props: PixelsCanvasProps): React.ReactElement { const { width, height, pixels } = props; const logger = useLogger({ context: 'ImageCanvas' }); const canvasRef = useRef(null); useEffect(() => { if (!canvasRef.current || !pixels) { return; } const ctx: CanvasRenderingContext2D | null = canvasRef.current.getContext('2d'); if (!ctx) { return; } logger.logDebug('useEffect'); ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); ctx.drawImage(pixels, 0, 0); }, [pixels, width, height, logger]); return ( ); } export default PixelsCanvas; ================================================ FILE: src/components/screens/DebugScreen.tsx ================================================ import React from 'react'; import DebugInfo from '../elements/DebugInfo'; import PageTitle from '../shared/PageTitle'; function DebugScreen(): React.ReactElement { return ( <> ); } export default DebugScreen; ================================================ FILE: src/components/screens/DemoScreen.tsx ================================================ import React from 'react'; import PageTitle from '../shared/PageTitle'; import Demo from '../shared/Demo'; function DemoScreen(): React.ReactElement { return ( <>
); } export default DemoScreen; ================================================ FILE: src/components/screens/DetectorScreen.tsx ================================================ import React, { useState } from 'react'; import { useHistory, useLocation } from 'react-router-dom'; import { History, Location, LocationDescriptor } from 'history'; import LinksDetector from '../elements/LinksDetector'; import Modal from '../shared/Modal'; import { RouteNames, ROUTES } from '../../constants/routes'; import PageTitle from '../shared/PageTitle'; import useLogger from '../../hooks/useLogger'; function DetectorScreen(): React.ReactElement { const logger = useLogger({ context: 'DetectorScreen' }); const history: History = useHistory(); const location: Location = useLocation(); const [loaded, setLoaded] = useState(false); const [error, setError] = useState(false); const onModalClose = (): void => { const path: LocationDescriptor = { pathname: ROUTES[RouteNames.home].path, search: location.search, hash: location.hash, }; history.push(path); }; const onLoaded = (): void => { logger.logDebug('onLoaded', { loaded }); if (!loaded) { setLoaded(true); } }; const onError = (): void => { logger.logDebug('onError', { error }); if (!error) { setError(true); } }; return ( <> ); } export default DetectorScreen; ================================================ FILE: src/components/screens/HomeScreen.tsx ================================================ import React from 'react'; import { useHistory, useLocation } from 'react-router-dom'; import { History, LocationDescriptor, Location } from 'history'; import { ROUTES } from '../../constants/routes'; import LaunchButton from '../shared/LaunchButton'; import Promo from '../shared/Promo'; import PageTitle from '../shared/PageTitle'; function HomeScreen(): React.ReactElement { const history: History = useHistory(); const location: Location = useLocation(); const onLaunch = (): void => { const path: LocationDescriptor = { pathname: ROUTES.detector.path, search: location.search, hash: location.hash, }; history.push(path); }; return ( <>
Scan
); } export default HomeScreen; ================================================ FILE: src/components/screens/NotFoundScreen.tsx ================================================ import React, { useEffect } from 'react'; import { Link, useLocation } from 'react-router-dom'; import { Location } from 'history'; import { HOME_ROUTE } from '../../constants/routes'; import Notification, { NotificationLevel } from '../shared/Notification'; import PageTitle from '../shared/PageTitle'; import useLogger from '../../hooks/useLogger'; function NoteFoundScreen(): React.ReactElement { const logger = useLogger({ context: 'NoteFoundScreen' }); const location: Location = useLocation(); useEffect(() => { const path: string = (location.pathname || '') + (location.search || '') + (location.hash || ''); logger.logError(`page no found: ${path}`); }, [logger, location.pathname, location.search, location.hash]); return ( <>
Page not found
Try to start from Homepage
); } export default NoteFoundScreen; ================================================ FILE: src/components/shared/CameraStream.tsx ================================================ import React, { CSSProperties, useCallback, useEffect, useRef, useState, } from 'react'; import throttle from 'lodash/throttle'; import useLogger from '../../hooks/useLogger'; import Notification, { NotificationLevel } from './Notification'; import ErrorBoundary from './ErrorBoundary'; import Grid from './Grid'; type FacingMode = 'user' | 'environment'; type CameraStreamProps = { width: number, height: number, idealFrameRate: number, onFrame: (video: HTMLVideoElement) => Promise, facingMode?: FacingMode, videoStyle?: CSSProperties, withGrid?: boolean, }; const videoFrameRate = 30; const oneSecond = 1000; const gridVerticalCells = 10; const gridHorizontalCells = 4; /* global MediaStreamConstraints */ function CameraStream(props: CameraStreamProps): React.ReactElement { const { width, height, onFrame, idealFrameRate, facingMode = 'environment', videoStyle: videoStyleOverrides = {}, withGrid = false, } = props; const frameThrottlingMs = Math.floor(oneSecond / idealFrameRate); const logger = useLogger({ context: 'CameraStream' }); const videoRef = useRef(null); const [errorMessage, setErrorMessage] = useState(null); // On iOS Safari filters add weird 1px left and bottom white borders to the video. // To hide that border the -1px shift is introduced in the styles below. const VIDEO_PADDING = 2; const videoWidth = width + 2 * VIDEO_PADDING; const videoHeight = height + 2 * VIDEO_PADDING; const onLocalFrame = (): void => { requestAnimationFrame(() => { logger.logDebug('onLocalFrame'); if (videoRef.current) { // eslint-disable-next-line @typescript-eslint/no-use-before-define onFrame(videoRef.current).then(throttledOnLocalFrame); } }); }; const throttledOnLocalFrame = throttle( onLocalFrame, frameThrottlingMs, { leading: false, trailing: true, }, ); // eslint-disable-next-line react-hooks/exhaustive-deps const throttledOnLocalFrameCallback = useCallback(throttledOnLocalFrame, []); useEffect((): () => void => { if (!videoRef.current) { return (): void => { }; } logger.logDebug('useEffect'); if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { const msg = 'Your browser does not support camera access'; setErrorMessage(msg); logger.logError(msg); return (): void => { }; } let localStream: MediaStream | null = null; const userMediaConstraints: MediaStreamConstraints = { audio: false, video: { width: { ideal: videoWidth }, height: { ideal: videoHeight }, facingMode: { ideal: facingMode }, frameRate: { ideal: videoFrameRate }, }, }; // @see: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia navigator.mediaDevices.getUserMedia(userMediaConstraints) .then((stream: MediaStream) => { localStream = stream; if (!videoRef.current) { return; } videoRef.current.srcObject = stream; videoRef.current.onloadedmetadata = (): void => { logger.logDebug('onloadedmetadata'); requestAnimationFrame(throttledOnLocalFrameCallback); }; }) .catch((error: DOMException) => { let message = 'Video cannot be started'; if (error && error.message) { message += `: ${error.message}`; } setErrorMessage(message); logger.logError(message, error); }); return (): void => { logger.logDebug('useEffect return'); // Stop animation frames. throttledOnLocalFrameCallback.cancel(); // Stop camera access. if (localStream) { logger.logDebug('useEffect return: Stopping the camera access'); localStream.getTracks().forEach((track: MediaStreamTrack) => { track.stop(); }); } }; }, [videoWidth, videoHeight, facingMode, logger, throttledOnLocalFrameCallback]); if (errorMessage) { return ( {errorMessage} ); } const videoWrapperStyle: CSSProperties = { width: `${width}px`, height: `${height}px`, overflow: 'hidden', }; const videoStyle: CSSProperties = { objectFit: 'cover', width: `${videoWidth}px`, minWidth: `${videoWidth}px`, height: `${videoHeight}px`, minHeight: `${videoHeight}px`, marginLeft: `-${VIDEO_PADDING}px`, ...videoStyleOverrides, }; const gridStyles: CSSProperties = { marginTop: `-${videoHeight}px`, }; const gridCanvas = withGrid ? (
) : null; return (
{gridCanvas}
); } export default CameraStream; ================================================ FILE: src/components/shared/Demo.tsx ================================================ import React, { SyntheticEvent, useState, } from 'react'; import { BASE_VIDEO_PATH } from '../../constants/routes'; import useLogger from '../../hooks/useLogger'; import Notification, { NotificationLevel } from './Notification'; const mp4DemoPath: string = `${BASE_VIDEO_PATH}/demo-black-720p.mp4`; const webmDemoPath: string = `${BASE_VIDEO_PATH}/demo-black-720p.webm`; function Demo(): React.ReactElement { const logger = useLogger({ context: 'Demo' }); const [videoError, setVideoError] = useState(null); const onVideoError = (event: SyntheticEvent): void => { const errorMessage: string = 'Video cannot be loaded'; setVideoError(errorMessage); logger.logError(errorMessage, { event }); }; if (videoError) { return ( {videoError} ); } /* eslint-disable jsx-a11y/media-has-caption */ return ( ); } export default Demo; ================================================ FILE: src/components/shared/EnhancedRow.tsx ================================================ import React from 'react'; type EnhancedRowProps = { content: React.ReactNode, contentClassName?: string, className?: string, startEnhancer?: React.ReactNode, }; function EnhancedRow(props: EnhancedRowProps): React.ReactElement { const { startEnhancer, content, contentClassName = '', className = '', } = props; const startEnhancerElement = startEnhancer ? (
{startEnhancer}
) : null; const contentElement = (
{content}
); return (
{startEnhancerElement} {contentElement}
); } export default EnhancedRow; ================================================ FILE: src/components/shared/ErrorBoundary.tsx ================================================ import React, { ErrorInfo } from 'react'; import Notification, { NotificationLevel } from './Notification'; import { buildLoggers, Loggers } from '../../utils/logger'; type ErrorBoundaryProps = { children: React.ReactNode, }; type ErrorBoundaryState = { hasError: boolean, }; class ErrorBoundary extends React.Component { private logger: Loggers; constructor(props: any) { super(props); this.state = { hasError: false, }; this.logger = buildLoggers({ context: 'ErrorBoundary' }); } static getDerivedStateFromError(): ErrorBoundaryState { return { hasError: true }; } componentDidCatch(error: Error, errorInfo: ErrorInfo): void { this.logger.logError('componentDidCatch', { error, errorInfo, }); } render(): React.ReactNode { const { hasError } = this.state; const { children } = this.props; if (hasError) { return ( Component has crashed ); } return children; } } export default ErrorBoundary; ================================================ FILE: src/components/shared/Footer.tsx ================================================ import React from 'react'; import HyperLink from './HyperLink'; import { GITHUB_BASE_URL, GITHUB_ISSUES_LINK } from '../../constants/links'; import { ICON_KEYS } from '../../icons'; import { ROUTES } from '../../constants/routes'; function Footer(): React.ReactElement { return (
About Demo Issues
); } export default Footer; ================================================ FILE: src/components/shared/Grid.tsx ================================================ import React, { CSSProperties } from 'react'; type GridProps = { vCells: number, hCells: number, width: number, height: number, }; function Grid(props: GridProps): React.ReactElement { const { vCells, hCells, width, height, } = props; const containerStyle: CSSProperties = { display: 'grid', width: `${width}px`, height: `${height}px`, gridTemplateColumns: `repeat(${hCells}, 1fr)`, gridTemplateRows: `repeat(${vCells}, 1fr)`, }; const borderStyle: string = '1px dashed rgba(0, 0, 0, 0.4)'; const cellStyle: CSSProperties = { borderLeft: borderStyle, borderBottom: borderStyle, }; const leftColumnCellStyle: CSSProperties = { borderLeft: '0px', borderBottom: borderStyle, }; const lastRowCellStyle: CSSProperties = { borderLeft: borderStyle, borderBottom: '0px', }; const leftBottomCellStyle: CSSProperties = { borderLeft: '0px', borderBottom: '0px', }; const gridItems: React.ReactNode[] = []; for (let itemIndex = 0; itemIndex < vCells * hCells; itemIndex += 1) { // Generic row style. let style: CSSProperties = cellStyle; if (itemIndex === hCells * (vCells - 1)) { // Left bottom column. style = leftBottomCellStyle; } else if (itemIndex % hCells === 0) { // Left column. style = leftColumnCellStyle; } else if (itemIndex > hCells * (vCells - 1)) { // Bottom row. style = lastRowCellStyle; } gridItems.push((
)); } return (
{gridItems}
); } export default Grid; ================================================ FILE: src/components/shared/Header.tsx ================================================ import React from 'react'; import Logo from './Logo'; function Header(): React.ReactElement { return (
); } export default Header; ================================================ FILE: src/components/shared/HyperLink.tsx ================================================ import React from 'react'; import { Link } from 'react-router-dom'; import { ICON_KEYS } from '../../icons'; import Icon from './Icon'; import { LINKS_TEXT_HOVER_COLOR_CLASS } from '../../constants/style'; type HyperLinkProps = { to: string, children: React.ReactNode, iconKey?: ICON_KEYS, className?: string, routerLink?: boolean, }; function HyperLink(props: HyperLinkProps): React.ReactElement { const { to, children, iconKey, className = '', routerLink = false, } = props; const icon = iconKey ? ( ) : null; const linkContent = icon ? ( {icon} {children} ) : children; const linkClassName: string = `underline text-sm ${className} hover:${LINKS_TEXT_HOVER_COLOR_CLASS}`; if (routerLink) { return ( {linkContent} ); } return ( {linkContent} ); } export default HyperLink; ================================================ FILE: src/components/shared/Icon.tsx ================================================ import React from 'react'; import { ICON_KEYS, ICONS } from '../../icons'; type IconProps = { iconKey: ICON_KEYS, className?: string | undefined, }; function Icon(props: IconProps): React.ReactElement | null { const { iconKey, className } = props; if (!Object.prototype.hasOwnProperty.call(ICONS, iconKey)) { return null; } const icon = ICONS[iconKey]; const IconComponent = icon.component; const fillCurrent = Object.prototype.hasOwnProperty.call(icon, 'fillCurrent') ? icon.fillCurrent : true; const fillCurrentClass = fillCurrent ? 'fill-current' : ''; return ( ); } export default Icon; ================================================ FILE: src/components/shared/LaunchButton.tsx ================================================ import React, { CSSProperties } from 'react'; import { LAUNCH_BUTTON_BACKGROUND_HOVER_CLASS } from '../../constants/style'; // import detectionImage from '../../images/detection.gif'; type LaunchButtonProps = { onClick: () => void, children: React.ReactNode, }; function LaunchButton(props: LaunchButtonProps): React.ReactElement { const { children, onClick } = props; const buttonSizePx: number = 180; const wrapperStyles: CSSProperties = { width: `${buttonSizePx}px`, height: `${buttonSizePx}px`, overflow: 'visible', }; const buttonBackgroundStyle: CSSProperties = { width: `${buttonSizePx}px`, height: `${buttonSizePx}px`, background: 'rgba(255, 255, 255, .3)', border: '1px solid rgba(255, 255, 255, .7)', position: 'absolute', }; const buttonStyles: CSSProperties = { width: `${buttonSizePx}px`, height: `${buttonSizePx}px`, // backgroundImage: `url(${detectionImage})`, backgroundSize: 'cover', overflow: 'hidden', position: 'absolute', }; const buttonTextStyles: CSSProperties = { width: `${buttonSizePx}px`, height: `${buttonSizePx}px`, // backgroundColor: 'rgba(255, 255, 255, .7)', overflow: 'hidden', }; return (
); } export default LaunchButton; ================================================ FILE: src/components/shared/Logo.tsx ================================================ import React from 'react'; import { Link } from 'react-router-dom'; import Icon from './Icon'; import { ICON_KEYS } from '../../icons'; import EnhancedRow from './EnhancedRow'; import { HOME_ROUTE } from '../../constants/routes'; import { LINKS_TEXT_HOVER_COLOR_CLASS, THEME_BG_COLOR_CLASS } from '../../constants/style'; import { GITHUB_ISSUES_LINK } from '../../constants/links'; function Logo(): React.ReactElement { const logoIcon = ( ); const content = ( <> Links Detector alpha ); return ( ); } export default Logo; ================================================ FILE: src/components/shared/MainNavigation.tsx ================================================ import React from 'react'; import { NavLink } from 'react-router-dom'; import { ROUTES } from '../../constants/routes'; function MainNavigation(): React.ReactElement { return (
  • Debug
  • Debug
); } export default MainNavigation; ================================================ FILE: src/components/shared/Modal.tsx ================================================ import React from 'react'; import ModalCloseButton from './ModalCloseButton'; type ModalProps = { children: React.ReactNode, onClose?: () => void, disableClose?: boolean, }; function Modal(props: ModalProps): React.ReactElement { const { children, onClose = (): void => {}, disableClose = false, } = props; const bgClass = 'bg-black'; let modalContainerClasses = 'absolute left-0 top-0 z-10 w-full h-full overflow-hidden flex items-center justify-center flex-col fade-in-5'; if (bgClass) { modalContainerClasses += ` ${bgClass}`; } const iconContainerClass = 'w-8 h-8 absolute right-0 top-0 m-3 z-20'; const modalContentClass = 'w-full flex items-center justify-center flex-col'; const closeButton = !disableClose ? () : null; return (
{closeButton}
{children}
); } export default Modal; ================================================ FILE: src/components/shared/ModalCloseButton.tsx ================================================ import React from 'react'; import Icon from './Icon'; import { ICON_KEYS } from '../../icons'; type ModalCloseButtonProps = { onClick: () => void, }; function ModalCloseButton(props: ModalCloseButtonProps): React.ReactElement { const { onClick } = props; const commonClasses = 'transition duration-300 ease-in-out w-full h-full fade-in-10'; const iconButtonClass = `${commonClasses} cursor-pointer border-0 p-0 m-0 rounded-full focus:outline-none bg-black hover:bg-white`; const iconClass = `${commonClasses} text-white hover:text-black`; return ( ); } export default ModalCloseButton; ================================================ FILE: src/components/shared/Notification.tsx ================================================ import React from 'react'; import { ICON_KEYS } from '../../icons'; import Icon from './Icon'; export enum NotificationLevel { INFO, WARNING, DANGER, } type NotificationProps = { children: React.ReactNode, level?: NotificationLevel, }; function Notification(props: NotificationProps): React.ReactElement { const { children, level = NotificationLevel.INFO, } = props; let bgColor; let textColor; let borderColor; switch (level) { case NotificationLevel.DANGER: bgColor = 'bg-red-600'; textColor = 'text-white'; borderColor = bgColor; break; case NotificationLevel.WARNING: bgColor = 'bg-yellow-600'; textColor = 'text-white'; borderColor = bgColor; break; case NotificationLevel.INFO: bgColor = 'bg-blue-600'; textColor = 'text-white'; borderColor = bgColor; break; default: bgColor = 'bg-white'; textColor = 'text-black'; borderColor = 'border-black'; } return (
{children}
); } export default Notification; ================================================ FILE: src/components/shared/PageTitle.tsx ================================================ import React from 'react'; import { Helmet } from 'react-helmet'; import usePageTitle from '../../hooks/usePageTitle'; import { APP_TITLE } from '../../constants/page'; function PageTitle(): React.ReactElement | null { const { pageTitle } = usePageTitle(); return ( {pageTitle || APP_TITLE} ); } export default PageTitle; ================================================ FILE: src/components/shared/ProgressBar.tsx ================================================ import React from 'react'; import { ZeroOneRange } from '../../utils/types'; type ProgressBarProps = { progress?: ZeroOneRange | null, text?: string | null, }; const progressAnimationTimeS = 0.5; function ProgressBar(props: ProgressBarProps): React.ReactElement { const { progress, text } = props; const progressPercentage = progress !== undefined && progress !== null ? Math.max(Math.min(Math.floor(progress * 100), 100), 0) : 0; const progressLine = progressPercentage !== undefined && progressPercentage !== null ? (
) : null; const progressText = text ? (
{text}
) : null; return (
{progressLine} {progressText}
); } export default ProgressBar; ================================================ FILE: src/components/shared/Promo.tsx ================================================ import React from 'react'; function Promo(): React.ReactElement { return (
Links Detector makes printed links clickable via your smartphone camera No need to type a link in, just scan and click on it
); } export default Promo; ================================================ FILE: src/components/shared/Spinner.css ================================================ @keyframes sk-scaleout { 0% { -webkit-transform: scaleX(0); transform: scaleX(0); } 100% { -webkit-transform: scaleX(1.0); transform: scaleX(1.0); opacity: 0; } } ================================================ FILE: src/components/shared/Spinner.tsx ================================================ import React, { CSSProperties } from 'react'; import './Spinner.css'; import { DETECTION_BACKGROUND_COLOR_CLASS } from '../../constants/style'; function Spinner(): React.ReactElement { const spinnerStyles: CSSProperties = { width: '100%', height: '100%', transformOrigin: 'top left', animation: 'sk-scaleout 1.2s ease-in-out infinite', }; return (
); } export default Spinner; ================================================ FILE: src/components/shared/Template.tsx ================================================ import React, { CSSProperties } from 'react'; import Header from './Header'; import Footer from './Footer'; import { FRAME_PADDING_CLASS } from '../../constants/style'; type TemplateProps = { children?: React.ReactNode, } function Template(props: TemplateProps): React.ReactElement { const { children } = props; const headerStyles: CSSProperties = { zIndex: 1, }; const footerStyles: CSSProperties = { zIndex: 1, }; return (
{children}
); } export default Template; ================================================ FILE: src/configs/analytics.ts ================================================ export const GOOGLE_ANALYTICS_ID = 'G-NEPEGVZ6TM'; ================================================ FILE: src/configs/detectionConfig.ts ================================================ import { ZeroOneRange } from '../utils/types'; import { BASE_APP_PATH } from '../constants/routes'; export const MODELS_BASE_URL = `${BASE_APP_PATH}`; export type DetectionConfig = { modelLoading: { linksDetectorModelURL: string, }, imagePreprocessing: { ui: { enabled: boolean, brightness: ZeroOneRange, contrast: ZeroOneRange, }, model: { enabled: boolean, brightness: ZeroOneRange, contrast: ZeroOneRange, size: number, // Size in pixels (0 means do not resize). }, }, videoStreaming: { idealFPS: number, }, httpsDetection: { maxBoxesNum: number, IOUThreshold: ZeroOneRange, scoreThreshold: ZeroOneRange, }, ocr: { useRegionProposals: boolean, workersNum: number, language: string, regionProposalPadding: ZeroOneRange, }, }; export const DETECTION_CONFIG: DetectionConfig = { modelLoading: { linksDetectorModelURL: `${MODELS_BASE_URL}/models/links_detector/v1/model.json`, }, imagePreprocessing: { ui: { enabled: true, brightness: 0.2, contrast: 0.2, }, model: { enabled: true, brightness: 0.6, contrast: 0.7, size: 640, }, }, videoStreaming: { idealFPS: 10, }, httpsDetection: { // @see: https://js.tensorflow.org/api/latest/#image.nonMaxSuppressionAsync maxBoxesNum: 2, IOUThreshold: 0.5, scoreThreshold: 0.5, }, ocr: { // @see: https://github.com/naptha/tesseract.js/blob/master/docs/examples.md useRegionProposals: true, workersNum: 2, language: 'eng', regionProposalPadding: 0.02, }, }; ================================================ FILE: src/configs/pwa.ts ================================================ export const PWA_ENABLED: boolean = true; export const CACHE_PREFIX: string = 'links-detector'; export const CACHE_VERSION: string = 'v1'; ================================================ FILE: src/constants/debug.ts ================================================ import { DEBUG_GET_PARAM } from './routes'; export const isDebugMode = (): boolean => { const url = new URL(window.location.href); return !!url.searchParams.get(DEBUG_GET_PARAM); }; ================================================ FILE: src/constants/links.ts ================================================ export const GITHUB_BASE_URL: string = 'https://github.com/trekhleb/links-detector'; export const GITHUB_ISSUES_LINK: string = `${GITHUB_BASE_URL}/issues`; ================================================ FILE: src/constants/page.ts ================================================ export const APP_TITLE: string = 'Links Detector'; export const APP_TITLE_SEPARATOR: string = ' | '; ================================================ FILE: src/constants/routes.ts ================================================ import { APP_TITLE, APP_TITLE_SEPARATOR } from './page'; export const BASE_APP_PATH: string = '/links-detector'; export const BASE_VIDEO_PATH: string = `${BASE_APP_PATH}/videos`; // The BASE_ROUTE_PATH may be different from BASE_APP_PATH path in case of a hash router. // Compare /links-detector/#/home (hash router) vs /links-detector/home (history router) export const BASE_ROUTE_PATH: string = '/'; export const DEBUG_GET_PARAM = 'debug'; export enum RouteNames { home = 'home', detector = 'detector', debug = 'debug', demo = 'demo', } export type RouteType = { path: string, title: string, }; export type RoutesType = { [routeName in RouteNames]: RouteType; }; const generateAppTitle = (pageTitle: string): string => { return `${APP_TITLE}${APP_TITLE_SEPARATOR}${pageTitle}`; }; const generatePath = (path: string): string => { if (path === '/') { return BASE_ROUTE_PATH; } if (BASE_ROUTE_PATH === '/') { return path; } return `${BASE_ROUTE_PATH}${path}`; }; export const ROUTES: RoutesType = { [RouteNames.home]: { path: generatePath('/'), title: generateAppTitle('Start'), }, [RouteNames.detector]: { path: generatePath('/detector'), title: generateAppTitle('Scanning'), }, [RouteNames.debug]: { path: generatePath('/debug'), title: generateAppTitle('Debug'), }, [RouteNames.demo]: { path: generatePath('/demo'), title: generateAppTitle('Demo'), }, }; export const HOME_ROUTE: RouteType = ROUTES.home; ================================================ FILE: src/constants/style.ts ================================================ // @see: https://tailwindcss.com/docs/background-color#class-reference const THEME_COLOR: string = 'yellow'; const THEME_COLOR_INTENSITY: number = 400; export const DETECTION_TEXT_COLOR_CLASS: string = 'text-black'; export const DETECTION_BACKGROUND_COLOR_CLASS: string = `bg-${THEME_COLOR}-${THEME_COLOR_INTENSITY}`; export const LINKS_TEXT_HOVER_COLOR_CLASS: string = `text-${THEME_COLOR}-${THEME_COLOR_INTENSITY}`; export const THEME_BG_COLOR_CLASS: string = `bg-${THEME_COLOR}-${THEME_COLOR_INTENSITY}`; export const LAUNCH_BUTTON_BACKGROUND_HOVER_CLASS: string = `bg-${THEME_COLOR}-${THEME_COLOR_INTENSITY}`; // @see: https://tailwindcss.com/docs/padding#app export const FRAME_PADDING_CLASS: string = 'p-5'; ================================================ FILE: src/hooks/useGraphModel.ts ================================================ import { useState, useEffect, useCallback } from 'react'; import * as tf from '@tensorflow/tfjs'; import useLogger from './useLogger'; import { graphModelLoad, graphModelWarmup } from '../utils/graphModel'; import { ZeroOneRange } from '../utils/types'; import { toFloatFixed } from '../utils/numbers'; type UseGraphModelProps = { modelURL: string, warmup?: boolean, }; type UseGraphModelOutput = { model: tf.GraphModel | null, error: string | null, loadingProgress: ZeroOneRange, }; const useGraphModel = (props: UseGraphModelProps): UseGraphModelOutput => { const { modelURL, warmup = false } = props; const logger = useLogger({ context: 'useGraphModel' }); const [model, setModel] = useState(null); const [isWarm, setIsWarm] = useState(null); const [error, setError] = useState(null); const [loadingProgress, setLoadingProgress] = useState(0); const warmupGraphModel = async (): Promise => { if (!warmup || !model || isWarm) { return; } await graphModelWarmup(model); }; const warmupCallback = useCallback( warmupGraphModel, [warmup, model, isWarm], ); const calculateLoadingProgress = (progress: ZeroOneRange): ZeroOneRange => { if (!warmup) { return toFloatFixed(progress, 2); } // In case of model warm up we need to reserve some percentage of loader for warming up. const warmupLoadingRatio = 0.05; return toFloatFixed((1 - warmupLoadingRatio) * progress, 2); }; const calculateLoadingProgressCallback = useCallback(calculateLoadingProgress, [warmup]); const onLoadingProgress = (progress: ZeroOneRange): void => { logger.logDebug('onLoadingProgress', { progress }); setLoadingProgress(calculateLoadingProgressCallback(progress)); }; const onLoadingProgressCallback = useCallback( onLoadingProgress, [calculateLoadingProgressCallback, logger], ); // Effect for loading a model. useEffect(() => { logger.logDebug('useEffect'); if (!model) { logger.logDebug('useEffect: loading the model'); graphModelLoad(modelURL, onLoadingProgressCallback) .then((graphModel: tf.GraphModel) => { setModel(graphModel); }) .catch((e: Error) => { setError(e.message); logger.logError(`cannot load the model: ${e.message}`); }); } return (): void => { logger.logDebug('useEffect: shutdown', { model }); if (model) { try { logger.logDebug('useEffect: shutdown: disposing the model'); model.dispose(); } catch (e) { logger.logDebug('useEffect: shutdown: disposing the model: CAUGHT ERROR'); } } }; }, [modelURL, setError, setModel, logger, onLoadingProgressCallback, model]); // Effect for warming up a model. useEffect(() => { if (!warmup || !model || isWarm) { return; } logger.logDebug('useEffect: warming up the model'); warmupCallback().then(() => { setIsWarm(true); setLoadingProgress(1); }); }, [ model, warmup, isWarm, setIsWarm, warmupCallback, logger, ]); let finalModel: tf.GraphModel | null = model; if (warmup) { finalModel = isWarm ? model : null; } return { model: finalModel, loadingProgress, error, }; }; export default useGraphModel; ================================================ FILE: src/hooks/useLinksDetector.ts ================================================ import { GraphModel } from '@tensorflow/tfjs'; import { ConfigResult, DetectResult, Line, RecognizeOptions, RecognizeResult, Rectangle, Scheduler, } from 'tesseract.js'; import { useCallback, useEffect, useRef, useState, } from 'react'; import useGraphModel from './useGraphModel'; import useLogger from './useLogger'; import useTesseract from './useTesseract'; import { ZeroOneRange } from '../utils/types'; import { newProfiler, Profiler } from '../utils/profiler'; import { DetectionBox, graphModelExecute } from '../utils/graphModel'; import { brightnessFilter, contrastFilter, FilterFunc, greyscaleFilter, Pixels, preprocessPixels, relativeToAbsolute, } from '../utils/image'; import { JobTypes } from '../utils/tesseract'; import { toFloatFixed } from '../utils/numbers'; import { Loggers } from '../utils/logger'; export type DetectionPerformance = { processing: number, avgProcessing: number, inference: number, avgInference: number, ocr: number, avgOcr: number, total: number, avgFps: number, fps: number, }; export type DetectedLink = { url: string, x1: number, y1: number, x2: number, y2: number, }; export type UseLinkDetectorProps = { modelURL: string, maxBoxesNum: number, scoreThreshold: number, iouThreshold: number, workersNum: number, language: string, }; export type DetectProps = { video: HTMLVideoElement, videoBrightness: number, videoContrast: number, resizeToSize: number, applyFilters: boolean, regionProposalsPadding: ZeroOneRange, useRegionProposals: boolean, }; export type UseLinkDetectorOutput = { detectLinks: (props: DetectProps) => Promise, detectedLinks: DetectedLink[], error: string | null, loadingProgress: ZeroOneRange | null, loadingStage: string | null, httpsBoxes: DetectionBox[] | null, regionProposals: Rectangle[], pixels: Pixels | null, }; export type TesseractDetection = ConfigResult | RecognizeResult | DetectResult; // @see: https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url const URL_REG_EXP = /https?:\/\/(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)/gi; const extractLinkFromText = (text: string): string | null => { const urls: string[] | null = text.match(URL_REG_EXP); if (!urls || !urls.length) { return null; } return urls[0]; }; const extractLinkFromDetection = ( detection: TesseractDetection | null, logger: Loggers, ): DetectedLink | null => { if (!detection || !detection.data || !detection.data.lines || !detection.data.lines.length) { logger.logDebug('extractLinkFromDetection: empty'); return null; } for (let lineIndex = 0; lineIndex < detection.data.lines.length; lineIndex += 1) { const line: Line = detection.data.lines[lineIndex]; const url: string | null = extractLinkFromText(line.text); logger.logDebug('extractLinkFromDetection: line link', { url, text: line.text }); if (url) { const detectedLink: DetectedLink = { url, x1: line.bbox.x0, y1: line.bbox.y0, x2: line.bbox.x1, y2: line.bbox.y1, }; logger.logDebug('extractLinkFromDetection: detected', { detectedLink }); return detectedLink; } } return null; }; const useLinksDetector = (props: UseLinkDetectorProps): UseLinkDetectorOutput => { const { modelURL, iouThreshold, maxBoxesNum, scoreThreshold, workersNum, language, } = props; const preprocessingProfiler = useRef(newProfiler()); const inferenceProfiler = useRef(newProfiler()); const ocrProfiler = useRef(newProfiler()); const onFrameProfiler = useRef(newProfiler()); const modelRef = useRef(null); const tesseractSchedulerRef = useRef(null); const logger: Loggers = useLogger({ context: 'useLinksDetector' }); const [detectedLinks, setDetectedLinks] = useState([]); const [pixels, setPixels] = useState(null); const [detectionError, setDetectionError] = useState(null); const [httpsBoxes, setHttpsBoxes] = useState(null); const [regionProposals, setRegionProposals] = useState([]); const [loadingProgress, setLoadingProgress] = useState(null); const [loadingStage, setLoadingStage] = useState(null); const { model, error: modelError, loadingProgress: modelLoadingProgress, } = useGraphModel({ modelURL, warmup: true, }); const { scheduler: tesseractScheduler, loaded: tesseractSchedulerLoaded, error: tesseractSchedulerError, loadingProgress: tesseractLoadingProgress, } = useTesseract({ workersNum, language, }); const detectLinks = async (detectProps: DetectProps): Promise => { const { video, videoBrightness, videoContrast, resizeToSize, applyFilters, regionProposalsPadding, useRegionProposals, } = detectProps; logger.logDebug('detectLinks', detectProps); if (!modelRef.current) { const errMsg = 'Model is not ready for detection yet'; logger.logError(errMsg); setDetectionError(errMsg); return null; } /* eslint-disable no-else-return */ if (!tesseractSchedulerRef.current) { const errMsg = 'Tesseract is not loaded yet'; logger.logError(errMsg); setDetectionError(errMsg); return null; } else if (tesseractSchedulerRef.current.getNumWorkers() !== workersNum) { const errMsg = 'Tesseract workers are not loaded yet'; logger.logError(errMsg); setDetectionError(errMsg); return null; } onFrameProfiler.current.start(); // Image preprocessing. const filters: FilterFunc[] = applyFilters ? [ brightnessFilter(videoBrightness), contrastFilter(videoContrast), greyscaleFilter(), ] : []; preprocessingProfiler.current.start(); let processedPixels: Pixels | null = null; try { processedPixels = preprocessPixels({ pixels: video, resizeToSize, filters, }); } catch (error) { const errMessage: string = (error && error.message) || 'Image preprocessing failed'; logger.logError(errMessage); setDetectionError(errMessage); } if (!processedPixels) { return null; } setPixels(processedPixels); const imageProcessingTime = preprocessingProfiler.current.stop(); // HTTPS prefixes detection model execution. inferenceProfiler.current.start(); const httpsPredictions: DetectionBox[] | null = await graphModelExecute({ model: modelRef.current, pixels: processedPixels, maxBoxesNum, scoreThreshold, iouThreshold, }); const modelExecutionTime = inferenceProfiler.current.stop(); setHttpsBoxes(httpsPredictions); // OCR execution. ocrProfiler.current.start(); const rectanglesImage: Rectangle[] = [ { left: 0, top: 0, width: processedPixels.width, height: processedPixels.height, }, ]; /* eslint-disable-next-line max-len */ const rectanglesRegions: Rectangle[] = !useRegionProposals || !httpsPredictions || !httpsPredictions.length ? [] : httpsPredictions.map((httpsPrediction: DetectionBox): Rectangle => { const { x1, y1, y2 } = httpsPrediction; const imageW: number = (processedPixels && processedPixels.width) || 0; const imageH: number = (processedPixels && processedPixels.height) || 0; const left: number = relativeToAbsolute(x1 - regionProposalsPadding, imageW); const top: number = relativeToAbsolute(y1 - regionProposalsPadding, imageH); const bottom: number = relativeToAbsolute(y2 + regionProposalsPadding, imageH); const width: number = imageW - left; const height: number = bottom - top; return { left, top, width, height, }; }); const rectangles: Rectangle[] = useRegionProposals ? rectanglesRegions : rectanglesImage; setRegionProposals(rectangles); logger.logDebug('detectLinks: tesseract is ready', { numWorkers: tesseractSchedulerRef.current.getNumWorkers(), rectangles: { ...rectangles }, }); if (rectangles.length) { const texts: Array = await Promise.all( rectangles.map((rectangle: Rectangle): Promise => { const recognizeOptions: Partial = { rectangle }; if (!tesseractSchedulerRef.current) { return Promise.resolve(null); } return tesseractSchedulerRef.current.addJob( JobTypes.Recognize, processedPixels, recognizeOptions, ); }), ); if (texts && texts.length) { const currentDetectedLinks: Array = texts .map((text: TesseractDetection | null): DetectedLink | null => { return extractLinkFromDetection(text, logger); }) .filter( (detection: DetectedLink | null): boolean => detection !== null, ); // @ts-ignore setDetectedLinks(currentDetectedLinks); } else { // If no links are recognized we should clear the previously recognized links. // However to give users more time to click on them even if the next recognition // round was not successful we may avoid clearing the array of links. // setDetectedLinks([]); } logger.logDebug('recognized texts', { texts }); } else { logger.logDebug('skipping the text recognition'); } const ocrExecutionTime = ocrProfiler.current.stop(); const onFrameTime = onFrameProfiler.current.stop(); // Performance summary. const detectionPerformance: DetectionPerformance = { processing: imageProcessingTime, avgProcessing: preprocessingProfiler.current.avg(), inference: modelExecutionTime, avgInference: inferenceProfiler.current.avg(), ocr: ocrExecutionTime, avgOcr: ocrProfiler.current.avg(), total: onFrameTime, avgFps: onFrameProfiler.current.avgFps(), fps: onFrameProfiler.current.fps(), }; logger.logDebugTable('onFrame', detectionPerformance); return detectionPerformance; }; const detectLinksCallback = useCallback(detectLinks, [ modelRef, iouThreshold, logger, maxBoxesNum, scoreThreshold, workersNum, tesseractSchedulerRef, ]); // Calculate the loading progress. useEffect(() => { const normalizedProgress: ZeroOneRange = toFloatFixed( (modelLoadingProgress + tesseractLoadingProgress) / 2, 2, ); logger.logDebug('useEffect: loading progress', { modelLoadingProgress, tesseractLoadingProgress, normalizedProgress, loadingProgress, }); setLoadingProgress(normalizedProgress); setLoadingStage('Loading links detector'); }, [loadingProgress, modelLoadingProgress, logger, tesseractLoadingProgress]); // Update model references. useEffect(() => { logger.logDebug('useEffect: Model'); modelRef.current = model; }, [model, logger]); // Update tesseract scheduler references. useEffect(() => { logger.logDebug('useEffect: Tesseract Scheduler'); if (!tesseractScheduler || !tesseractSchedulerLoaded) { return; } tesseractSchedulerRef.current = tesseractScheduler; }, [tesseractScheduler, logger, tesseractSchedulerLoaded]); return { detectedLinks, detectLinks: detectLinksCallback, loadingProgress, loadingStage, pixels, httpsBoxes, regionProposals, error: modelError || detectionError || tesseractSchedulerError, }; }; export default useLinksDetector; ================================================ FILE: src/hooks/useLogger.ts ================================================ import { useRef } from 'react'; import { buildLoggers, LoggerContext, Loggers } from '../utils/logger'; type UseLoggerParams = { context?: LoggerContext, }; function useLogger(params: UseLoggerParams = {}): Loggers { const { context } = params; const loggers = useRef(buildLoggers({ context })); return loggers.current; } export default useLogger; ================================================ FILE: src/hooks/usePageTitle.ts ================================================ import { useEffect, useState } from 'react'; import { useRouteMatch } from 'react-router-dom'; import { routeTitleFromPath } from '../utils/routes'; type UsePageTitleOutput = { pageTitle: string | null, }; function usePageTitle(): UsePageTitleOutput { const [pageTitle, setPageTitle] = useState(null); const routeMatch = useRouteMatch(); useEffect(() => { const detectedPageTitle: string | null = routeTitleFromPath(routeMatch.path); setPageTitle(detectedPageTitle); }, [routeMatch.path]); return { pageTitle, }; } export default usePageTitle; ================================================ FILE: src/hooks/useTesseract.ts ================================================ import { Scheduler } from 'tesseract.js'; import { useCallback, useEffect, useRef, useState, } from 'react'; import { initScheduler, InitSchedulerProps } from '../utils/tesseract'; import useLogger from './useLogger'; import { ZeroOneRange } from '../utils/types'; type UseSchedulerProps = InitSchedulerProps; type UseSchedulerOutput = { scheduler: Scheduler | null, loaded: boolean, loadingProgress: ZeroOneRange, error: string | null, }; const useTesseract = (props: UseSchedulerProps): UseSchedulerOutput => { const { workersNum, language } = props; const [loaded, setLoaded] = useState(false); const [loadingProgress, serLoadingProgress] = useState(0); const [error, setError] = useState(null); const scheduler = useRef(null); const logger = useLogger({ context: 'useTesseract' }); const onSchedulerLoading = (progress: ZeroOneRange): void => { logger.logDebug('onSchedulerLoading', { progress, workersNum: scheduler.current ? scheduler.current.getNumWorkers() : 0, }); serLoadingProgress(progress); }; const onSchedulerLoadingCallback = useCallback(onSchedulerLoading, [logger]); const onSchedulerError = (schedulerError: any): void => { let errMessage = 'Scheduler error'; if (typeof schedulerError === 'string') { errMessage = schedulerError; } else if (schedulerError && schedulerError.message && typeof schedulerError.message === 'string') { errMessage = schedulerError.message; } setError(errMessage); }; const onSchedulerErrorCallback = useCallback(onSchedulerError, []); useEffect((): () => void => { logger.logDebug('useEffect'); if (scheduler && scheduler.current) { logger.logDebug('useEffect: skip'); return (): void => {}; } initScheduler({ workersNum, language, onLoading: onSchedulerLoadingCallback, onError: onSchedulerErrorCallback, }) .then((ocrScheduler: Scheduler) => { logger.logDebug('useEffect: init finished', { ocrScheduler, workersNum: ocrScheduler.getNumWorkers(), queueLen: ocrScheduler.getQueueLen(), }); scheduler.current = ocrScheduler; setLoaded(true); }); return (): void => { logger.logDebug('useEffect: shutdown', { scheduler: scheduler.current }); if (scheduler.current) { logger.logDebug('useEffect: shutdown: terminating the scheduler'); scheduler.current.terminate().then(() => { logger.logDebug('useEffect: shutdown: scheduler is terminated'); }); } }; }, [workersNum, language, logger, onSchedulerErrorCallback, onSchedulerLoadingCallback]); return { scheduler: scheduler.current, loadingProgress, loaded, error, }; }; export default useTesseract; ================================================ FILE: src/hooks/useWindowSize.ts ================================================ import { useEffect, useState } from 'react'; import throttle from 'lodash/throttle'; import useLogger from './useLogger'; type WindowSize = { width: number | undefined, height: number | undefined, }; const resizeThrottleMs = 200; function useWindowSize(): WindowSize { const logger = useLogger({ context: 'useWindowSize' }); const [windowSize, setWindowSize] = useState({ width: undefined, height: undefined, }); useEffect((): () => void => { logger.logDebug('useEffect'); const handleResize = (): void => { setWindowSize({ width: window.innerWidth, height: window.innerHeight, }); }; const handleResizeThrottled = throttle( handleResize, resizeThrottleMs, { leading: false, trailing: true, }, ); window.addEventListener('resize', handleResizeThrottled); handleResizeThrottled(); return (): void => { logger.logDebug('useEffect return'); window.removeEventListener('resize', handleResizeThrottled); }; }, [logger]); return windowSize; } export default useWindowSize; ================================================ FILE: src/icons/README.md ================================================ # Icons - [Tutorial](https://tailwindcss.com/course/working-with-svg-icons/#app) of how to style icons with Tailwind - [SVG OMG](https://jakearchibald.github.io/svgomg/) - for icons optimization - [SVG Icons Packs](https://tailwindcss.com/resources/) - list by Tailwind - [Icomoon](https://icomoon.io/) - SVG icons set - [FeatherIcons](https://feathericons.com/) - SVG icons set - [Vectr](https://vectr.com/) - creating vector icons online - [FaviconGenerator](https://realfavicongenerator.net/) - to generate favicon and app icons ================================================ FILE: src/icons/index.ts ================================================ import React, { SVGProps } from 'react'; import { ReactComponent as XIcon } from './feathericons/x.svg'; import { ReactComponent as AlertCircleIcon } from './feathericons/alert-circle.svg'; import { ReactComponent as Link2Icon } from './feathericons/link-2.svg'; import { ReactComponent as LinkIcon } from './feathericons/link.svg'; import { ReactComponent as ExternalLinkIcon } from './feathericons/external-link.svg'; import { ReactComponent as GitHubIcon } from './feathericons/github.svg'; import { ReactComponent as EditIcon } from './feathericons/edit-3.svg'; import { ReactComponent as BookOpenIcon } from './feathericons/book-open.svg'; import { ReactComponent as SearchIcon } from './feathericons/search.svg'; import { ReactComponent as SmartphoneIcon } from './feathericons/smartphone.svg'; import { ReactComponent as EyeIcon } from './feathericons/eye.svg'; import { ReactComponent as YoutubeIcon } from './feathericons/youtube.svg'; import { ReactComponent as LinksDetectorLogoIcon } from './vectr/links-detector-logo.svg'; export enum ICON_KEYS { X = 'x', ALERT_CIRCLE = 'alert-circle', LINK = 'link', LINK_2 = 'link-2', EXTERNAL_LINK = 'external-link', GIT_HUB = 'github', EDIT = 'edit', LINKS_DETECTOR_LOGO = 'links-detector-logo', BOOK_OPEN = 'book-open', SEARCH = 'search', SMARTPHONE = 'smartphone', EYE = 'eye', YOUTUBE = 'youtube', } type IconType = { component: React.FunctionComponent & { title?: string | undefined; }>, fillCurrent?: boolean, } type IconsType = { [iconKey in ICON_KEYS]: IconType; } export const ICONS: IconsType = { [ICON_KEYS.X]: { component: XIcon, }, [ICON_KEYS.ALERT_CIRCLE]: { component: AlertCircleIcon, fillCurrent: false, }, [ICON_KEYS.LINK]: { component: LinkIcon, fillCurrent: false, }, [ICON_KEYS.LINK_2]: { component: Link2Icon, fillCurrent: false, }, [ICON_KEYS.EXTERNAL_LINK]: { component: ExternalLinkIcon, fillCurrent: false, }, [ICON_KEYS.GIT_HUB]: { component: GitHubIcon, fillCurrent: false, }, [ICON_KEYS.EDIT]: { component: EditIcon, fillCurrent: false, }, [ICON_KEYS.LINKS_DETECTOR_LOGO]: { component: LinksDetectorLogoIcon, fillCurrent: false, }, [ICON_KEYS.BOOK_OPEN]: { component: BookOpenIcon, fillCurrent: false, }, [ICON_KEYS.SEARCH]: { component: SearchIcon, fillCurrent: false, }, [ICON_KEYS.SMARTPHONE]: { component: SmartphoneIcon, fillCurrent: false, }, [ICON_KEYS.EYE]: { component: EyeIcon, fillCurrent: false, }, [ICON_KEYS.YOUTUBE]: { component: YoutubeIcon, fillCurrent: false, }, }; ================================================ FILE: src/index.tsx ================================================ import React from 'react'; import ReactDOM from 'react-dom'; import './styles/tailwind.css'; import App from './components/App'; import * as serviceWorker from './serviceWorkerRegistration'; import { PWA_ENABLED } from './configs/pwa'; ReactDOM.render( , document.getElementById('root'), ); if (PWA_ENABLED) { serviceWorker.register(); } else { serviceWorker.unregister(); } ================================================ FILE: src/react-app-env.d.ts ================================================ /// /* declare module 'glfx'; or declare namespace bananaJs { function getBanana(): string; function addBanana(n: number) void; function removeBanana(n: number) void; } */ ================================================ FILE: src/service-worker.ts ================================================ /// // This service worker can be customized! // See https://developers.google.com/web/tools/workbox/modules // for the list of available Workbox modules, or add any other code you'd like. // You can also remove this file if you'd prefer not to use a // service worker, and the Workbox build step will be skipped. import { clientsClaim, skipWaiting, setCacheNameDetails } from 'workbox-core'; import { ExpirationPlugin } from 'workbox-expiration'; import { registerRoute } from 'workbox-routing'; import { StaleWhileRevalidate } from 'workbox-strategies'; import { CacheableResponsePlugin } from 'workbox-cacheable-response'; import { precacheAndRoute } from 'workbox-precaching'; // import * as googleAnalytics from 'workbox-google-analytics'; import { CACHE_PREFIX, CACHE_VERSION } from './configs/pwa'; import { daysToSeconds } from './utils/numbers'; // @see: https://developers.google.com/web/tools/workbox/modules/workbox-core setCacheNameDetails({ prefix: CACHE_PREFIX, suffix: CACHE_VERSION, precache: 'precache', runtime: 'runtime', googleAnalytics: 'ga', }); // Precache all of the assets generated by your build process. // Their URLs are injected into the manifest variable below. // This variable must be present somewhere in your service worker file, // even if you decide not to use precaching. See https://cra.link/PWA // eslint-disable-next-line no-undef declare const self: ServiceWorkerGlobalScope; // googleAnalytics.initialize(); // Precache logic needs to go before registerRoute. Otherwise the caching strategy // from registerRoute will be applied instead of a CacheFirst strategy of precacheAndRoute. // @see: https://developers.google.com/web/tools/workbox/modules/workbox-precaching#serving_precached_responses // eslint-disable-next-line no-restricted-globals, no-underscore-dangle precacheAndRoute(self.__WB_MANIFEST, { ignoreURLParametersMatching: [/.*/], }); const getCacheName = (name: string): string => { return `${CACHE_PREFIX}-${name}-${CACHE_VERSION}`; }; skipWaiting(); clientsClaim(); // @see: https://developer.mozilla.org/en-US/docs/Web/API/Request // @see: https://developer.mozilla.org/en-US/docs/Web/API/RequestDestination registerRoute( ({ request, url }: { request: Request; url: URL }) => { // Assets by type. // eslint-disable-next-line no-undef const assetTypes: RequestDestination[] = [ 'image', 'style', 'script', 'worker', 'font', ]; if (assetTypes.includes(request.destination)) { return true; } // Assets by extension. const assetExtensions: string[] = [ '.json', // i.e. TensorFlow model summary. '.bin', // i.e. TensorFlow model data. '.gz', // i.e. Tesseract training data. ]; for (let extIndex = 0; extIndex < assetExtensions.length; extIndex += 1) { const extension: string = assetExtensions[extIndex]; if (url.href.endsWith(extension)) { return true; } } // Assets by origin. // @see: https://developers.google.com/web/tools/workbox/guides/common-recipes#google_fonts const assetOrigins: string[] = [ 'https://fonts.googleapis.com', // i.e. Google Fonts stylesheets. 'https://fonts.gstatic.com', // i.e. Google Font font files. ]; for (let originIndex = 0; originIndex < assetOrigins.length; originIndex += 1) { const origin: string = assetOrigins[originIndex]; if (url.origin === origin) { return true; } } return false; }, new StaleWhileRevalidate({ cacheName: getCacheName('assets'), plugins: [ new ExpirationPlugin({ maxAgeSeconds: daysToSeconds(30), }), // @see: https://developers.google.com/web/tools/workbox/modules/workbox-cacheable-response#caching_based_on_status_codes new CacheableResponsePlugin({ statuses: [0, 200] }), ], }), ); ================================================ FILE: src/serviceWorkerRegistration.ts ================================================ // This optional code is used to register a service worker. // register() is not called by default. // This lets the app load faster on subsequent visits in production, and gives // it offline capabilities. However, it also means that developers (and users) // will only see deployed updates on subsequent visits to a page, after all the // existing tabs open on the page have been closed, since previously cached // resources are updated in the background. // To learn more about the benefits of this model and instructions on how to // opt-in, read https://bit.ly/CRA-PWA import { buildLoggers } from './utils/logger'; const logger = buildLoggers({ context: 'swRegistration' }); const isServiceWorkerSupported = (): boolean => { return 'serviceWorker' in navigator; }; type Config = { onSuccess?: (registration: ServiceWorkerRegistration) => void; onUpdate?: (registration: ServiceWorkerRegistration) => void; }; function registerValidSW(swUrl: string, config?: Config): void { logger.logDebug('registerValidSW'); navigator.serviceWorker .register(swUrl) .then((registration: ServiceWorkerRegistration) => { logger.logDebug('registerValidSW: registered', { registration }); // eslint-disable-next-line no-param-reassign registration.onupdatefound = (): void => { const installingWorker = registration.installing; logger.logDebug('registerValidSW: onupdatefound', { installingWorker }); if (installingWorker == null) { return; } installingWorker.onstatechange = (): void => { if (installingWorker.state === 'installed') { if (navigator.serviceWorker.controller) { // At this point, the updated precached content has been fetched, // but the previous service worker will still serve the older // content until all client tabs are closed. // @see: https://bit.ly/CRA-PWA logger.logDebug( 'registerValidSW: New content is available and will be used when all tabs for this page are closed', { state: installingWorker.state, controller: navigator.serviceWorker.controller, }, ); // Execute callback if (config && config.onUpdate) { config.onUpdate(registration); } } else { // At this point, everything has been precached. It's the perfect time to display a // "Content is cached for offline use." message. logger.logDebug( 'registerValidSW: Content is cached for offline use', { state: installingWorker.state, controller: navigator.serviceWorker.controller, }, ); // Execute callback if (config && config.onSuccess) { config.onSuccess(registration); } } } }; }; }) .catch((error: Error) => { logger.logError('Error during service worker registration:', error); }); } export function register(config?: Config): void { if (!isServiceWorkerSupported()) { logger.logDebug('register: no supported'); return; } if (process.env.NODE_ENV !== 'production') { logger.logDebug('register: not a production environment'); return; } // The URL constructor is available in all browsers that support SW. const publicUrl = new URL( process.env.PUBLIC_URL, window.location.href, ); logger.logDebug('register', { publicUrlString: publicUrl.toString(), }); if (publicUrl.origin !== window.location.origin) { // Our service worker won't work if PUBLIC_URL is on a different origin // from what our page is served on. This might happen if a CDN is used to // serve assets; see https://github.com/facebook/create-react-app/issues/2374 logger.logError('register: PUBLIC_URL is on a different origin', { publicURLOrigin: publicUrl.origin, locationOrigin: window.location.origin, }); return; } window.addEventListener('load', () => { const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; logger.logDebug('register: window loaded', { swUrl }); registerValidSW(swUrl, config); }); } export function unregister(): void { if (!isServiceWorkerSupported()) { logger.logDebug('unregister: not supported'); return; } logger.logDebug('unregister', { sw: navigator.serviceWorker }); navigator.serviceWorker.ready .then((registration: ServiceWorkerRegistration) => { logger.logDebug('unregister: starting'); registration.unregister() .then((result) => { logger.logDebug('unregister: finished', { result }); }) .catch((error) => { logger.logError('unregister: failed', { error }); }); }) .catch((error: Error) => { logger.logError(`unregister: failed: ${error.message}`, { error }); }); } ================================================ FILE: src/setupTests.ts ================================================ // jest-dom adds custom jest matchers for asserting on DOM nodes. // allows you to do things like: // expect(element).toHaveTextContent(/react/i) // learn more: https://github.com/testing-library/jest-dom import '@testing-library/jest-dom/extend-expect'; ================================================ FILE: src/styles/index.css ================================================ @tailwind base; @tailwind components; @tailwind utilities; ================================================ FILE: src/utils/analytics.ts ================================================ import { Location } from 'history'; import { GOOGLE_ANALYTICS_ID } from '../configs/analytics'; const getPathFromLocation = (routerLocation: Location): string => { // @ts-ignore const documentLocation: Location | null = document && document.location; const location: Location = documentLocation || routerLocation; let path = location.pathname; if (location.search) { path += location.search; } if (location.hash) { path += location.hash; } return path; }; const gTagSupported = (): boolean => { return window && window.gtag && true; }; export const gaPageView = (location: Location): void => { if (!gTagSupported()) { return; } // @see: https://developers.google.com/gtagjs/reference/api#config window.gtag('config', GOOGLE_ANALYTICS_ID, { page_path: getPathFromLocation(location), }); }; export const gaErrorLog = (errorType: string, errorMessage: string): void => { if (!gTagSupported()) { return; } // @see: https://developers.google.com/gtagjs/reference/api#config window.gtag('config', GOOGLE_ANALYTICS_ID); // @see: https://developers.google.com/gtagjs/reference/event#exception window.gtag('event', 'exception', { type: errorType, description: errorMessage, }); }; ================================================ FILE: src/utils/debug.ts ================================================ import * as tf from '@tensorflow/tfjs'; import { setTFBackend } from './graphModel'; type TFInfoProps = { modelURL: string, }; export type TFInfo = { platformName: string, backendName: string, }; export const getTFInfo = async (props: TFInfoProps): Promise => { const { modelURL } = props; await setTFBackend(); await tf.loadGraphModel(modelURL); return { platformName: tf.env().platformName, backendName: tf.engine().backendName, }; }; export const isWebGLSupported = (): boolean => { try { const canvas: HTMLCanvasElement = document.createElement('canvas'); return !!window.WebGLRenderingContext && ( !!canvas.getContext('webgl') || !!canvas.getContext('experimental-webgl') ); } catch (e) { return false; } }; export const isCanvasFilterSupported = (): boolean => { try { const canvas: HTMLCanvasElement = document.createElement('canvas'); const context: CanvasRenderingContext2D | null = canvas.getContext('2d'); if (!context) { return false; } if (!context.filter) { return false; } return true; } catch (e) { return false; } }; ================================================ FILE: src/utils/graphModel.ts ================================================ import * as tf from '@tensorflow/tfjs'; import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm'; import { DataType } from '@tensorflow/tfjs-core/src/types'; import { buildLoggers } from './logger'; import { Pixels } from './image'; import { newProfiler, Profiler } from './profiler'; export enum TFBackends { cpu = 'cpu', webgl = 'webgl', wasm = 'wasm', } export const setTFBackend = async ( backendName: string = TFBackends.webgl, ): Promise => { if (backendName === TFBackends.wasm) { // @see: package.json setWasmPaths({ 'tfjs-backend-wasm.wasm': '/wasm/tfjs-backend-wasm.wasm', 'tfjs-backend-wasm-simd.wasm': '/wasm/tfjs-backend-wasm-simd.wasm', 'tfjs-backend-wasm-threaded-simd.wasm': '/wasm/tfjs-backend-wasm-threaded-simd.wasm', }); } await tf.setBackend(backendName); await tf.ready(); }; export const graphModelLoad = async ( modelURL: string, onProgress: (progress: number) => void, ): Promise => { const logger = buildLoggers({ context: 'graphModelLoad' }); await setTFBackend(); const model: tf.GraphModel = await tf.loadGraphModel(modelURL, { onProgress }); logger.logDebug('Model is loaded', { backendName: tf.engine().backendName, platformName: tf.env().platformName, model, backend: tf.engine().backend, features: tf.env().features, }); return model; }; export const graphModelWarmup = async ( model: tf.GraphModel, ): Promise => { if (!model) { return; } const logger = buildLoggers({ context: 'graphModelWarmup' }); const inputShapeWithNulls = model.inputs[0].shape; if (!inputShapeWithNulls) { logger.logWarn('Cannot warmup the model: unknown input shape'); return; } const inputShape = inputShapeWithNulls.map((dimension: number) => { if (dimension === null || dimension === -1) { return 1; } return dimension; }); const dataType: DataType = 'int32'; const fakeInput = tf.zeros(inputShape, dataType); logger.logDebug('warmupModel', { inputShape, fakeInput }); try { await model.executeAsync(fakeInput); logger.logDebug('Model is wormed up'); } catch (error) { logger.logError(`Cannot warmup the model: ${error.message}`, { error }); } }; type ModelPredictions = { detectionsNum: number, detectionScores: number[], detectionClasses: number[], detectionBoxes: number[][], }; export type DetectionBox = { x1: number, y1: number, x2: number, y2: number, score: number, categoryId: number, }; type GraphModelExecuteProps = { model: tf.GraphModel, pixels: Pixels, maxBoxesNum: number, iouThreshold: number, scoreThreshold: number, }; export const graphModelExecute = async ( props: GraphModelExecuteProps, ): Promise => { const { model, pixels, maxBoxesNum, iouThreshold, scoreThreshold, } = props; const profiler: Profiler = newProfiler(); const logger = buildLoggers({ context: 'graphModelExecute' }); if (!model || !pixels) { logger.logError('executeModel: model or video is undefined'); return null; } const inputTensor: tf.Tensor3D = tf.browser.fromPixels(pixels).expandDims(0); let results: tf.Tensor | tf.Tensor[] | null = null; try { profiler.start(); results = await model.executeAsync(inputTensor); const inferenceTime = profiler.stop(); logger.logDebug('executeModel: executing', { inputTensorShape: inputTensor.shape, inferenceTime, results, }); } catch (e) { const errorMessage = (e && e.message) || 'Cannot execute the model'; logger.logError(errorMessage); } if (!results) { logger.logError('executeModel: model results are empty'); return null; } if (!Array.isArray(results)) { logger.logError('executeModel: expected an array of Tensors, got one Tensor', { results, }); return null; } const DETECTIONS_NUM_INDEX = 2; const DETECTIONS_CLASSES_INDEX = 5; const DETECTIONS_BOXES_INDEX = 0; const DETECTIONS_SCORES_INDEX = 6; const detectionsNum: number = tf.util.flatten( results[DETECTIONS_NUM_INDEX].arraySync(), )[0]; const detectionClasses: number[] = await tf.broadcastTo( tf.squeeze(results[DETECTIONS_CLASSES_INDEX]), [detectionsNum], ).array(); const detectionScores: number[] = await tf.broadcastTo( tf.squeeze(results[DETECTIONS_SCORES_INDEX]), [detectionsNum], ).array(); // Each entry is [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are // the corners of the bounding box. const detectionBoxes: number[][] = await tf.broadcastTo( tf.squeeze(results[DETECTIONS_BOXES_INDEX]), [detectionsNum, 4], ).array(); const importantBoxesIndicesTensor: tf.Tensor1D = await tf.image.nonMaxSuppressionAsync( detectionBoxes, detectionScores, maxBoxesNum, iouThreshold, scoreThreshold, ); const importantBoxesIndices: Int32Array = await importantBoxesIndicesTensor.data<'int32'>(); const boxes: DetectionBox[] = importantBoxesIndices.reduce( (tmpBoxes: DetectionBox[], boxIndex: number) => { tmpBoxes.push({ x1: detectionBoxes[boxIndex][1], y1: detectionBoxes[boxIndex][0], x2: detectionBoxes[boxIndex][3], y2: detectionBoxes[boxIndex][2], score: detectionScores[boxIndex], categoryId: detectionClasses[boxIndex], }); return tmpBoxes; }, [], ); const modelPredictions: ModelPredictions = { detectionsNum, detectionClasses, detectionBoxes, detectionScores, }; logger.logDebug('executeModel: parsed results', { modelPredictions, importantBoxesIndices, boxes, }); return boxes; }; ================================================ FILE: src/utils/image.ts ================================================ /* eslint-disable no-param-reassign */ import { SignedZeroOneRange, ZeroOneRange } from './types'; export type Pixels = HTMLImageElement | HTMLCanvasElement| HTMLVideoElement; export type FilterFunc = (colors: Uint8ClampedArray, shift: number) => void; const cutColor = (color: number): number => { return Math.min(Math.floor(color), 255); }; // Converts [0, 1] range to [-1, 1] range. const normalizeFilterParam = (zeroOneRange: ZeroOneRange): SignedZeroOneRange => { return zeroOneRange * 2 - 1; }; // @see: https://developer.mozilla.org/en-US/docs/Web/CSS/filter-function export const normalizeCSSFilterParam = (zeroOneRange: ZeroOneRange): number => { return 1 + zeroOneRange; }; // @see: http://thecryptmag.com/Online/56/imgproc_5.html // @see: https://css-tricks.com/manipulating-pixels-using-canvas/ export const contrastFilter = (contrastRange: ZeroOneRange): FilterFunc => ( colors: Uint8ClampedArray, shift: number, ): void => { const contrast = normalizeFilterParam(contrastRange) * 100; const factor = (259 * (contrast + 255)) / (255 * (259 - contrast)); for (let channel = 0; channel < 3; channel += 1) { colors[shift + channel] = cutColor(factor * (colors[shift + channel] - 128) + 128); } }; // @see: https://css-tricks.com/manipulating-pixels-using-canvas/ export const brightnessFilter = (brightness: ZeroOneRange): FilterFunc => ( colors: Uint8ClampedArray, shift: number, ): void => { const brightnessDelta = 255 * normalizeFilterParam(brightness); for (let channel = 0; channel < 3; channel += 1) { colors[shift + channel] = cutColor(colors[shift + channel] + brightnessDelta); } }; // @see: https://www.tutorialspoint.com/dip/grayscale_to_rgb_conversion.htm export const greyscaleFilter = (): FilterFunc => ( colors: Uint8ClampedArray, shift: number, ): void => { const average = cutColor( 0.3 * colors[shift] + 0.59 * colors[shift + 1] + 0.11 * colors[shift + 2], ); for (let channel = 0; channel < 3; channel += 1) { colors[shift + channel] = average; } }; export const canvasFromPixels = ( pixels: Pixels, size?: number | null, ): HTMLCanvasElement | null => { const canvas: HTMLCanvasElement = document.createElement<'canvas'>('canvas'); canvas.width = size || pixels.width; canvas.height = size || pixels.height; const context: CanvasRenderingContext2D | null = canvas.getContext('2d'); if (!context) { return null; } context.drawImage(pixels, 0, 0, canvas.width, canvas.height); return canvas; }; export type PreprocessPixelsProps = { pixels: Pixels, filters: FilterFunc[], resizeToSize?: number | null, }; export const preprocessPixels = (props: PreprocessPixelsProps): Pixels => { const { pixels, filters, resizeToSize } = props; const canvas: HTMLCanvasElement | null = canvasFromPixels(pixels, resizeToSize); if (!canvas) { throw new Error('Canvas cannot be created'); } const context: CanvasRenderingContext2D | null = canvas.getContext('2d'); if (!context) { throw new Error('Empty canvas context'); } const imageData: ImageData = context.getImageData(0, 0, canvas.width, canvas.height); const COLORS_IN_PIXEL = 4; // RGBA for (let shift = 0; shift < imageData.data.length; shift += COLORS_IN_PIXEL) { filters.forEach((filter: FilterFunc) => { filter(imageData.data, shift); }); } context.putImageData(imageData, 0, 0); return canvas; }; export const relativeToAbsolute = (relativeCoordinate: ZeroOneRange, imageSize: number): number => { return Math.max( Math.min( Math.round(relativeCoordinate * imageSize), imageSize, ), 0, ); }; ================================================ FILE: src/utils/logger.ts ================================================ import { isDebugMode } from '../constants/debug'; import { gaErrorLog } from './analytics'; export type LoggerContext = string | null; export type LoggerMessage = string; export type LoggerMeta = Error | Object | null; interface LinksDetectorConsole { log(...data: any[]): void; table(tabularData?: any, properties?: string[]): void; warn(...data: any[]): void; error(...data: any[]): void; } const linksDetectorConsoleName: string = 'linksDetectorConsole'; function getSystemLogger(): LinksDetectorConsole { if (Object.prototype.hasOwnProperty.call(window, linksDetectorConsoleName)) { return window[linksDetectorConsoleName]; } const linksDetectorConsole: LinksDetectorConsole = { log: window.console.log, table: window.console.table, warn: window.console.warn, error: window.console.error, }; Object.defineProperty(window, linksDetectorConsoleName, { value: linksDetectorConsole, writable: false, }); return linksDetectorConsole; } const logger: LinksDetectorConsole = getSystemLogger(); export type TableLogger = ( message: string, tabularData: any, properties?: string[], ) => void; export type Logger = ( message: LoggerMessage, meta?: LoggerMeta, ) => void; export type Loggers = { logDebugTable: TableLogger, logDebug: Logger, logInfo: Logger, logWarn: Logger, logError: Logger, }; const contextSeparator = '→'; type OnCallLoggerCallback = (context: LoggerContext, message: LoggerMessage, meta?: LoggerMeta) => void; type BuildLoggerProps = { loggerFunc: (message?: any, ...optionalParams: any[]) => void, onCall?: OnCallLoggerCallback, context?: LoggerContext, muted?: boolean, }; const buildLogger = ( props: BuildLoggerProps, ): Logger => (message: LoggerMessage, meta?: LoggerMeta): void => { const { loggerFunc, onCall, context, muted, } = props; if (muted) { return; } const args: (LoggerMessage | LoggerContext | LoggerMeta)[] = [message]; if (context) { const consoleColors: string[] = [ 'green', 'orange', 'blue', 'brown', 'blueviolet', 'chocolate', 'coral', 'dodgerblue', 'olive', 'teal', ]; const contextHash: number = context.length % consoleColors.length; const contextColor: string = consoleColors[contextHash]; const contextStyles: string = `background: ${contextColor}; color: white; padding: 0 3px; border-radius: 3px;`; args.unshift( `%c${context}`, contextStyles, contextSeparator, ); } if (meta) { args.push(meta); } loggerFunc(...args); if (onCall) { onCall(context || 'unknown', message, meta); } }; type BuildTableLoggerProps = { loggerFunc: (tabularData: any, properties?: string[]) => void, context?: LoggerContext, muted?: boolean, }; const buildTableLogger = ( props: BuildTableLoggerProps, ): TableLogger => (message: string, tabularData: any, properties?: string[]): void => { const { loggerFunc, muted } = props; if (muted) { return; } loggerFunc(tabularData, properties); }; const onGAError: OnCallLoggerCallback = ( context: LoggerContext, message: LoggerMessage, ): void => { gaErrorLog(context || 'unknownContext', message); }; type BuildLoggersParams = { context?: LoggerContext, }; export const buildLoggers = (params: BuildLoggersParams): Loggers => { const { context } = params; const muted = !isDebugMode(); return { logDebugTable: buildTableLogger({ loggerFunc: logger.table, context, muted, }), logDebug: buildLogger({ loggerFunc: logger.log, context, muted, }), logInfo: buildLogger({ loggerFunc: logger.log, context, muted, }), logWarn: buildLogger({ loggerFunc: logger.warn, context, }), logError: buildLogger({ loggerFunc: logger.error, context, onCall: onGAError, }), }; }; ================================================ FILE: src/utils/numbers.ts ================================================ export const toFloatFixed = (num: number, fractionDigits: number): number => { const leverage: number = 10 ** fractionDigits; return Math.round(num * leverage) / leverage; }; export const daysToSeconds = (days: number): number => { const secondsInDay: number = 24 * 60 * 60; return days * secondsInDay; }; ================================================ FILE: src/utils/profiler.ts ================================================ import { toFloatFixed } from './numbers'; export type Profiler = { start: () => void, stop: (inSeconds?: boolean) => number, avg: (inSeconds?: boolean) => number, fps: () => number, avgFps: () => number, }; const msToSs = (timeMs: number, fractionDigits: number = 2): number => { return toFloatFixed(timeMs / 1000, fractionDigits); }; export const newProfiler = (): Profiler => { let timeRangesSum: number = 0; let timeRangesNum: number = 0; let lastTimeRange: number = 0; let startTimeMs: number = 0; const start = (): void => { startTimeMs = Date.now(); }; const stop = (inSeconds: boolean = true): number => { const timeRange = Date.now() - startTimeMs; lastTimeRange = timeRange; timeRangesNum += 1; timeRangesSum += timeRange; if (inSeconds) { return msToSs(timeRange); } return timeRange; }; const avg = (inSeconds: boolean = true): number => { const average = Math.ceil(timeRangesSum / timeRangesNum); if (inSeconds) { return msToSs(average); } return average; }; const fps = (): number => { return toFloatFixed(1 / msToSs(lastTimeRange), 2); }; const avgFps = (): number => { return toFloatFixed(1 / avg(), 2); }; return { start, stop, avg, fps, avgFps, }; }; ================================================ FILE: src/utils/routes.ts ================================================ import { ROUTES, RouteType } from '../constants/routes'; export const routeFromPath = (path: string): RouteType | null => { const route: RouteType | undefined = Object.values(ROUTES) .find((currentRoute: RouteType) => currentRoute.path === path); return route || null; }; export const routeTitleFromPath = (path: string): string | null => { const route: RouteType | null = routeFromPath(path); if (route) { return route.title; } return null; }; ================================================ FILE: src/utils/tesseract.ts ================================================ import { createWorker, createScheduler, Scheduler, Worker, WorkerOptions, WorkerParams, PSM, } from 'tesseract.js'; import { buildLoggers } from './logger'; import { ZeroOneRange } from './types'; import { toFloatFixed } from './numbers'; export type InitSchedulerProps = { workersNum: number, language: string, onError?: (error: any) => void, onLoading?: (progress: ZeroOneRange) => void, }; export enum JobTypes { Recognize = 'recognize', Detect = 'detect', } export enum WorkerLoadingStatuses { LoadingCore = 'loading tesseract core', Initializing = 'initializing tesseract', Initialized = 'initialized tesseract', LoadingLanguageTrainData = 'loading language traineddata', LoadingLanguageTrainDataCached = 'loading language traineddata (from cache)', LoadedLanguageTrainData = 'loaded language traineddata', InitializingAPI = 'initializing api', InitializedAPI = 'initialized api', RecognizingText = 'recognizing text', } const CORE_WORKER_ID = 'core'; type WorkerLoadingProgress = { [loadingState: string]: ZeroOneRange | null, }; type WorkersLoadingProgress = { [workerId: string]: WorkerLoadingProgress, }; export type WorkerLogEvent = { status: WorkerLoadingStatuses, workerId?: string, progress?: ZeroOneRange, }; const getLoadingProgress = ( originalWorkersLoadingProgress: WorkersLoadingProgress, workersNum: number, ): ZeroOneRange => { const workersLoadingProgress: WorkersLoadingProgress = { ...originalWorkersLoadingProgress }; // Detect core loading progress. const coreNum: number = 1; // always 1 const rawCoreLoadingProgress: WorkerLoadingProgress = workersLoadingProgress[ CORE_WORKER_ID ] || {}; const coreLoadingProgress: ZeroOneRange = rawCoreLoadingProgress[ WorkerLoadingStatuses.LoadingCore ] || 0; delete workersLoadingProgress[CORE_WORKER_ID]; // Detect workers loading progress. const rawWorkerProgresses: WorkerLoadingProgress[] = Object.values( workersLoadingProgress, ); if (!rawWorkerProgresses || !rawWorkerProgresses.length) { return 0; } const workerProgresses: ZeroOneRange[] = rawWorkerProgresses.map( (rawWorkerProgress: WorkerLoadingProgress) => { const tesseractLoadingProgress: ZeroOneRange = rawWorkerProgress[ WorkerLoadingStatuses.Initialized ] || 0; const apiLoadingProgress: ZeroOneRange = rawWorkerProgress[ WorkerLoadingStatuses.InitializedAPI ] || 0; const trainDataLoadingProgress: ZeroOneRange = rawWorkerProgress[ WorkerLoadingStatuses.LoadedLanguageTrainData ] || 0; return (tesseractLoadingProgress + apiLoadingProgress + trainDataLoadingProgress) / 3; }, ); const denormalizedLoadingProgress: number = workerProgresses.reduce( (overallProgress: number, currentProgress: ZeroOneRange) => { return overallProgress + currentProgress; }, 0, ); // Calculate overall loading progress. return toFloatFixed( (coreLoadingProgress + denormalizedLoadingProgress) / (workersNum + coreNum), 2, ); }; export const initScheduler = async (props: InitSchedulerProps): Promise => { const { workersNum, language, onError = (): void => {}, onLoading = (): void => {}, } = props; const logger = buildLoggers({ context: 'initScheduler' }); logger.logDebug('initScheduler', { ...props }); const workerIDs: string[] = []; const workersLoadingProgress: WorkersLoadingProgress = {}; const scheduler: Scheduler = createScheduler(); const onWorkerLog = (logEvent: WorkerLogEvent): void => { // Register a new loading state in worker loading progress object. const workerID: string = logEvent.workerId || CORE_WORKER_ID; if (!workersLoadingProgress[workerID]) { workersLoadingProgress[workerID] = {}; } workersLoadingProgress[workerID][logEvent.status] = logEvent.progress || null; // Calculate overall loading progress. const progress: ZeroOneRange = getLoadingProgress(workersLoadingProgress, workersNum); logger.logDebug('worker log', { ...logEvent, overallProgress: progress, workersLoadingProgress: { ...workersLoadingProgress }, loadedWorkersNum: scheduler.getNumWorkers(), workerIDs, }); onLoading(progress); }; const onWorkerError = (error: any): void => { logger.logError('worker error', { ...error }); onError(error); }; const workerOptions: Partial = { logger: onWorkerLog, errorHandler: onWorkerError, }; const initWorker = async (): Promise => { // @see: https://github.com/naptha/tesseract.js/blob/master/docs/api.md#workersetparametersparams-jobid-promise const workerParams: Partial = { // @ts-ignore tessedit_pageseg_mode: PSM.SINGLE_LINE, tessjs_create_hocr: '0', tessjs_create_tsv: '0', }; const worker: Worker = createWorker(workerOptions); await worker.load(); await worker.loadLanguage(language); await worker.initialize(language); await worker.setParameters(workerParams); return worker; }; let workers: Worker[] = []; try { const workersPromises: Promise[] = Array(workersNum) .fill(null) .map(() => initWorker()); workers = await Promise.all(workersPromises); } catch (error) { logger.logError('cannot init workers', { error }); onError(error); } workers.forEach((worker: Worker) => { const workerID = scheduler.addWorker(worker); workerIDs.push(workerID); logger.logDebug('addWorker', { workerID }); }); return scheduler; }; ================================================ FILE: src/utils/types.ts ================================================ // [0, 1] export type ZeroOneRange = number; // [-1, 1] export type SignedZeroOneRange = number; ================================================ FILE: tailwind.config.js ================================================ // @see: https://tailwindcss.com/docs/configuration/ // @see: https://github.com/tailwindcss/tailwindcss/blob/master/stubs/defaultConfig.stub.js const defaultTheme = require('tailwindcss/defaultTheme'); const fontFamily = { ...defaultTheme.fontFamily }; fontFamily.sans = [ 'Roboto', 'system-ui', '-apple-system', 'BlinkMacSystemFont', '"Segoe UI"', '"Helvetica Neue"', 'Arial', '"Noto Sans"', 'sans-serif', '"Apple Color Emoji"', '"Segoe UI Emoji"', '"Segoe UI Symbol"', '"Noto Color Emoji"', ]; module.exports = { purge: [], theme: { fontFamily, extend: {}, }, variants: { margin: ['responsive', 'last'], }, plugins: [], }; ================================================ FILE: tsconfig.json ================================================ { "compilerOptions": { "target": "es5", "lib": [ "dom", "dom.iterable", "esnext" ], "allowJs": true, "skipLibCheck": true, "esModuleInterop": true, "allowSyntheticDefaultImports": true, "strict": true, "forceConsistentCasingInFileNames": true, "module": "esnext", "moduleResolution": "node", "resolveJsonModule": true, "isolatedModules": true, "noEmit": true, "jsx": "react", "noFallthroughCasesInSwitch": true, "suppressImplicitAnyIndexErrors": true }, "include": [ "src" ] }