add opencv for automated bacteria counting (hopefully - one day :))
This commit is contained in:
parent
7e7cb6770e
commit
37612b7898
50
opencv-js-4.10.0/.github/workflows/build-opencv.yml
vendored
Normal file
50
opencv-js-4.10.0/.github/workflows/build-opencv.yml
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
name: Build OpenCV.js
|
||||||
|
|
||||||
|
on:
|
||||||
|
# push:
|
||||||
|
# branches:
|
||||||
|
# - build-opencv
|
||||||
|
# - main
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-opencv:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libv4l-dev
|
||||||
|
|
||||||
|
- name: Checkout emsdk
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: emscripten-core/emsdk
|
||||||
|
path: emsdk
|
||||||
|
- name: Install an emsdk version
|
||||||
|
run: |
|
||||||
|
cd emsdk
|
||||||
|
./emsdk install 2.0.10
|
||||||
|
./emsdk activate 2.0.10
|
||||||
|
|
||||||
|
- name: Checkout opencv
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: opencv/opencv
|
||||||
|
ref: 4.10.0
|
||||||
|
path: opencv
|
||||||
|
- name: Build opencv.js
|
||||||
|
run: |
|
||||||
|
source emsdk/emsdk_env.sh
|
||||||
|
emcmake python opencv/platforms/js/build_js.py build_js --build_flags="-s WASM_ASYNC_COMPILATION=0"
|
||||||
|
|
||||||
|
- name: Upload opencv_js
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: opencv.js
|
||||||
|
path: build_js/bin/opencv.js
|
||||||
|
retention-days: 30
|
||||||
|
|
||||||
|
# - name: Check out repository code
|
||||||
|
# uses: actions/checkout@v4
|
||||||
49
opencv-js-4.10.0/.github/workflows/npm-publish.yml
vendored
Normal file
49
opencv-js-4.10.0/.github/workflows/npm-publish.yml
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
||||||
|
# This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
|
||||||
|
# For more information see: https://help.github.com/actions/language-and-framework-guides/publishing-nodejs-packages
|
||||||
|
|
||||||
|
name: Publish NPM Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [created]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# build:
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# steps:
|
||||||
|
# - uses: actions/checkout@v4
|
||||||
|
# - uses: actions/setup-node@v4
|
||||||
|
# with:
|
||||||
|
# node-version: "20.x"
|
||||||
|
# - run: npm ci
|
||||||
|
# - run: npm run build
|
||||||
|
|
||||||
|
publish-npm:
|
||||||
|
# needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: "20.x"
|
||||||
|
registry-url: https://registry.npmjs.org/
|
||||||
|
- run: npm ci
|
||||||
|
- run: npm publish --access=public
|
||||||
|
env:
|
||||||
|
NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
|
||||||
|
|
||||||
|
# publish-gpr:
|
||||||
|
# needs: build
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# steps:
|
||||||
|
# - uses: actions/checkout@v2
|
||||||
|
# - uses: actions/setup-node@v1
|
||||||
|
# with:
|
||||||
|
# node-version: 14
|
||||||
|
# registry-url: https://npm.pkg.github.com/
|
||||||
|
# - run: npm ci
|
||||||
|
# - run: npm publish
|
||||||
|
# env:
|
||||||
|
# NODE_AUTH_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||||
29
opencv-js-4.10.0/.github/workflows/unit-test.yml
vendored
Normal file
29
opencv-js-4.10.0/.github/workflows/unit-test.yml
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
name: "Unit Test"
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
- ready_for_review
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODE_OPTIONS: --max_old_space_size=4096
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm install
|
||||||
|
|
||||||
|
- name: Run test
|
||||||
|
run: npm test
|
||||||
3
opencv-js-4.10.0/.gitignore
vendored
Normal file
3
opencv-js-4.10.0/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
/node_modules/
|
||||||
|
/dist/src/
|
||||||
|
.idea/
|
||||||
3
opencv-js-4.10.0/.prettierrc.json
Normal file
3
opencv-js-4.10.0/.prettierrc.json
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"trailingComma": "all"
|
||||||
|
}
|
||||||
201
opencv-js-4.10.0/LICENSE
Normal file
201
opencv-js-4.10.0/LICENSE
Normal file
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
62
opencv-js-4.10.0/README.md
Normal file
62
opencv-js-4.10.0/README.md
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
# opencv-js
|
||||||
|
|
||||||
|
OpenCV JavaScript version (NPM package) for node.js or browser. Get started guide [OpenCV.js Tutorials](https://docs.opencv.org/4.10.0/#:~:text=OpenCV%2DPython%20Tutorials-,OpenCV.js%20Tutorials,-Tutorials%20for%20contrib).
|
||||||
|
|
||||||
|
The file `opencv.js` was downloaded from https://docs.opencv.org/4.10.0/opencv.js
|
||||||
|
|
||||||
|
TypeScript is supported (thanks to `mirada`).
|
||||||
|
|
||||||
|
# Code Examples
|
||||||
|
|
||||||
|
- See code examples (React, Angular) in [opencv-js-examples](https://github.com/TechStark/opencv-js-examples)
|
||||||
|
|
||||||
|
# Live Demo
|
||||||
|
|
||||||
|
## Using in react.js project
|
||||||
|
|
||||||
|
- See [live demo and code here](https://codesandbox.io/s/techstarkopencv-js-demo-page-f7gvk?file=/src/TestPage.jsx)
|
||||||
|
<img src="https://user-images.githubusercontent.com/132509/130320696-eaa3899b-2356-4e9f-bbc9-0a969465c58e.png" height="800px" alt="Live demo screenshot" />
|
||||||
|
- Get the test image from here [Lenna.png](test/Lenna.png)
|
||||||
|
|
||||||
|
## Using in Angular project
|
||||||
|
|
||||||
|
- See [code here](https://codesandbox.io/s/techstark-opencv-js-angular-demo-hkmc1n?file=/src/app/app.component.ts)
|
||||||
|
|
||||||
|
## Real-time face detection
|
||||||
|
|
||||||
|
- See [live demo and code here](https://codesandbox.io/s/opencv-js-face-detection-i1i3u)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
# How to Use
|
||||||
|
|
||||||
|
- `npm install @techstark/opencv-js`
|
||||||
|
- or `yarn add @techstark/opencv-js`
|
||||||
|
- `import cv from "@techstark/opencv-js"`
|
||||||
|
- for TypeScript, set `"esModuleInterop": true` in `tsconfig.json`
|
||||||
|
- or `import * as cv from "@techstark/opencv-js"`
|
||||||
|
|
||||||
|
# Webpack Configuration (for browser usage)
|
||||||
|
|
||||||
|
If you use this package for browsers, you need to set some polyfills. In the file "webpack.config.js", set
|
||||||
|
|
||||||
|
```js
|
||||||
|
module.exports = {
|
||||||
|
resolve: {
|
||||||
|
modules: [...],
|
||||||
|
fallback: {
|
||||||
|
fs: false,
|
||||||
|
path: false,
|
||||||
|
crypto: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
# What methods and properties are available
|
||||||
|
|
||||||
|
The TypeScript type declarations may not be up to date with the latest OpenCV.js. Refer to [cvKeys.json](doc/cvKeys.json) to check the available methods and properties at runtime.
|
||||||
|
|
||||||
|
# Star History
|
||||||
|
|
||||||
|
[](https://star-history.com/#techstark/opencv-js&Date)
|
||||||
1
opencv-js-4.10.0/_config.yml
Normal file
1
opencv-js-4.10.0/_config.yml
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
theme: jekyll-theme-cayman
|
||||||
33
opencv-js-4.10.0/dist/README.md
vendored
Normal file
33
opencv-js-4.10.0/dist/README.md
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
## Build opencv.js
|
||||||
|
|
||||||
|
- see https://github.com/opencv/opencv/blob/4.x/platforms/js/README.md
|
||||||
|
- also https://docs.opencv.org/4.7.0/d4/da1/tutorial_js_setup.html
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ~/apps/emsdk
|
||||||
|
./emsdk update
|
||||||
|
./emsdk install 2.0.10
|
||||||
|
./emsdk activate 2.0.10
|
||||||
|
```
|
||||||
|
|
||||||
|
- build
|
||||||
|
|
||||||
|
```sh
|
||||||
|
source ~/apps/emsdk/emsdk_env.sh
|
||||||
|
emcmake python ./platforms/js/build_js.py build_js --build_wasm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Patch opencv.js
|
||||||
|
|
||||||
|
- To create a patch for the current version of opencv.js, run:
|
||||||
|
|
||||||
|
```
|
||||||
|
git diff > temp.patch
|
||||||
|
mv temp.patch dist/opencv.js.patch
|
||||||
|
```
|
||||||
|
|
||||||
|
- To apply the patch, run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git apply dist/opencv.js.patch
|
||||||
|
```
|
||||||
48
opencv-js-4.10.0/dist/opencv.js
vendored
Normal file
48
opencv-js-4.10.0/dist/opencv.js
vendored
Normal file
File diff suppressed because one or more lines are too long
14
opencv-js-4.10.0/dist/opencv.js.patch
vendored
Normal file
14
opencv-js-4.10.0/dist/opencv.js.patch
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
diff --git a/dist/opencv.js b/dist/opencv.js
|
||||||
|
index 1be6d42..9971ab6 100644
|
||||||
|
--- a/dist/opencv.js
|
||||||
|
+++ b/dist/opencv.js
|
||||||
|
@@ -42,7 +42,7 @@ if (typeof exports === 'object' && typeof module === 'object')
|
||||||
|
exports["cv"] = cv;
|
||||||
|
|
||||||
|
if (typeof Module === 'undefined')
|
||||||
|
- Module = {};
|
||||||
|
+ var Module = {};
|
||||||
|
return cv(Module);
|
||||||
|
}));
|
||||||
|
|
||||||
|
\ No newline at end of file
|
||||||
3
opencv-js-4.10.0/doc/README.md
Normal file
3
opencv-js-4.10.0/doc/README.md
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
## Runtime/real methods and properties on CV objects
|
||||||
|
|
||||||
|
`cvKeys.json` is generated by `test/cvKeys.test.ts`
|
||||||
1393
opencv-js-4.10.0/doc/cvKeys.json
Normal file
1393
opencv-js-4.10.0/doc/cvKeys.json
Normal file
File diff suppressed because it is too large
Load diff
5
opencv-js-4.10.0/jest.config.js
Normal file
5
opencv-js-4.10.0/jest.config.js
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
/** @type {import('ts-jest').JestConfigWithTsJest} */
|
||||||
|
module.exports = {
|
||||||
|
preset: 'ts-jest',
|
||||||
|
testEnvironment: 'node',
|
||||||
|
};
|
||||||
BIN
opencv-js-4.10.0/opencv.ico
Normal file
BIN
opencv-js-4.10.0/opencv.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
8414
opencv-js-4.10.0/package-lock.json
generated
Normal file
8414
opencv-js-4.10.0/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
40
opencv-js-4.10.0/package.json
Normal file
40
opencv-js-4.10.0/package.json
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
{
|
||||||
|
"name": "@techstark/opencv-js",
|
||||||
|
"version": "4.10.0-release.1",
|
||||||
|
"description": "OpenCV JavaScript version for node.js or browser",
|
||||||
|
"main": "dist/opencv.js",
|
||||||
|
"types": "dist/src/index.d.ts",
|
||||||
|
"files": [
|
||||||
|
"dist/",
|
||||||
|
"src/"
|
||||||
|
],
|
||||||
|
"scripts": {
|
||||||
|
"build": "tsc",
|
||||||
|
"prepack": "npm run build",
|
||||||
|
"format": "prettier --write \"src/**/*.ts\"",
|
||||||
|
"test": "jest"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@types/jest": "^29.5.12",
|
||||||
|
"jest": "^29.7.0",
|
||||||
|
"jimp": "^0.22.12",
|
||||||
|
"prettier": "^3.3.3",
|
||||||
|
"ts-jest": "^29.2.5",
|
||||||
|
"typescript": "^5.5.4"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "git+https://github.com/TechStark/opencv-js.git"
|
||||||
|
},
|
||||||
|
"keywords": [
|
||||||
|
"opencv",
|
||||||
|
"javascript",
|
||||||
|
"computer vision"
|
||||||
|
],
|
||||||
|
"author": "Wilson",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"bugs": {
|
||||||
|
"url": "https://github.com/TechStark/opencv-js/issues"
|
||||||
|
},
|
||||||
|
"homepage": "https://github.com/TechStark/opencv-js#readme"
|
||||||
|
}
|
||||||
1
opencv-js-4.10.0/src/index.ts
Normal file
1
opencv-js-4.10.0/src/index.ts
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
export * from "./types/opencv";
|
||||||
6
opencv-js-4.10.0/src/types/_cv.ts
Normal file
6
opencv-js-4.10.0/src/types/_cv.ts
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
import type { FS } from "./emscripten";
|
||||||
|
import type { CV } from "./opencv";
|
||||||
|
|
||||||
|
declare global {
|
||||||
|
var cv: CV & { FS: FS };
|
||||||
|
}
|
||||||
286
opencv-js-4.10.0/src/types/emscripten.ts
Normal file
286
opencv-js-4.10.0/src/types/emscripten.ts
Normal file
|
|
@ -0,0 +1,286 @@
|
||||||
|
interface Lookup {
|
||||||
|
path: string;
|
||||||
|
node: FSNode;
|
||||||
|
}
|
||||||
|
interface FSStream {}
|
||||||
|
interface FSNode {}
|
||||||
|
|
||||||
|
export interface FS {
|
||||||
|
lookupPath(path: string, opts: any): Lookup;
|
||||||
|
getPath(node: FSNode): string;
|
||||||
|
|
||||||
|
isFile(mode: number): boolean;
|
||||||
|
isDir(mode: number): boolean;
|
||||||
|
isLink(mode: number): boolean;
|
||||||
|
isChrdev(mode: number): boolean;
|
||||||
|
isBlkdev(mode: number): boolean;
|
||||||
|
isFIFO(mode: number): boolean;
|
||||||
|
isSocket(mode: number): boolean;
|
||||||
|
|
||||||
|
major(dev: number): number;
|
||||||
|
minor(dev: number): number;
|
||||||
|
makedev(ma: number, mi: number): number;
|
||||||
|
registerDevice(dev: number, ops: any): void;
|
||||||
|
|
||||||
|
syncfs(populate: boolean, callback: (e: any) => any): void;
|
||||||
|
syncfs(callback: (e: any) => any, populate?: boolean): void;
|
||||||
|
mount(type: any, opts: any, mountpoint: string): any;
|
||||||
|
unmount(mountpoint: string): void;
|
||||||
|
|
||||||
|
mkdir(path: string, mode?: number): any;
|
||||||
|
mkdev(path: string, mode?: number, dev?: number): any;
|
||||||
|
symlink(oldpath: string, newpath: string): any;
|
||||||
|
rename(old_path: string, new_path: string): void;
|
||||||
|
rmdir(path: string): void;
|
||||||
|
readdir(path: string): string[];
|
||||||
|
unlink(path: string): void;
|
||||||
|
readlink(path: string): string;
|
||||||
|
stat(path: string, dontFollow?: boolean): any;
|
||||||
|
lstat(path: string): any;
|
||||||
|
chmod(path: string, mode: number, dontFollow?: boolean): void;
|
||||||
|
lchmod(path: string, mode: number): void;
|
||||||
|
fchmod(fd: number, mode: number): void;
|
||||||
|
chown(path: string, uid: number, gid: number, dontFollow?: boolean): void;
|
||||||
|
lchown(path: string, uid: number, gid: number): void;
|
||||||
|
fchown(fd: number, uid: number, gid: number): void;
|
||||||
|
truncate(path: string, len: number): void;
|
||||||
|
ftruncate(fd: number, len: number): void;
|
||||||
|
utime(path: string, atime: number, mtime: number): void;
|
||||||
|
open(
|
||||||
|
path: string,
|
||||||
|
flags: string,
|
||||||
|
mode?: number,
|
||||||
|
fd_start?: number,
|
||||||
|
fd_end?: number,
|
||||||
|
): FSStream;
|
||||||
|
close(stream: FSStream): void;
|
||||||
|
llseek(stream: FSStream, offset: number, whence: number): any;
|
||||||
|
read(
|
||||||
|
stream: FSStream,
|
||||||
|
buffer: ArrayBufferView,
|
||||||
|
offset: number,
|
||||||
|
length: number,
|
||||||
|
position?: number,
|
||||||
|
): number;
|
||||||
|
write(
|
||||||
|
stream: FSStream,
|
||||||
|
buffer: ArrayBufferView,
|
||||||
|
offset: number,
|
||||||
|
length: number,
|
||||||
|
position?: number,
|
||||||
|
canOwn?: boolean,
|
||||||
|
): number;
|
||||||
|
allocate(stream: FSStream, offset: number, length: number): void;
|
||||||
|
mmap(
|
||||||
|
stream: FSStream,
|
||||||
|
buffer: ArrayBufferView,
|
||||||
|
offset: number,
|
||||||
|
length: number,
|
||||||
|
position: number,
|
||||||
|
prot: number,
|
||||||
|
flags: number,
|
||||||
|
): any;
|
||||||
|
ioctl(stream: FSStream, cmd: any, arg: any): any;
|
||||||
|
readFile(
|
||||||
|
path: string,
|
||||||
|
opts?: { encoding: string; flags: string },
|
||||||
|
): ArrayBufferView;
|
||||||
|
writeFile(
|
||||||
|
path: string,
|
||||||
|
data: ArrayBufferView,
|
||||||
|
opts?: { encoding: string; flags: string },
|
||||||
|
): void;
|
||||||
|
writeFile(
|
||||||
|
path: string,
|
||||||
|
data: string,
|
||||||
|
opts?: { encoding: string; flags: string },
|
||||||
|
): void;
|
||||||
|
analyzePath(p: string): any;
|
||||||
|
cwd(): string;
|
||||||
|
chdir(path: string): void;
|
||||||
|
init(
|
||||||
|
input: () => number,
|
||||||
|
output: (c: number) => any,
|
||||||
|
error: (c: number) => any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
createLazyFile(
|
||||||
|
parent: string,
|
||||||
|
name: string,
|
||||||
|
url: string,
|
||||||
|
canRead: boolean,
|
||||||
|
canWrite: boolean,
|
||||||
|
): FSNode;
|
||||||
|
createLazyFile(
|
||||||
|
parent: FSNode,
|
||||||
|
name: string,
|
||||||
|
url: string,
|
||||||
|
canRead: boolean,
|
||||||
|
canWrite: boolean,
|
||||||
|
): FSNode;
|
||||||
|
|
||||||
|
createPreloadedFile(
|
||||||
|
parent: string,
|
||||||
|
name: string,
|
||||||
|
url: string,
|
||||||
|
canRead: boolean,
|
||||||
|
canWrite: boolean,
|
||||||
|
onload?: () => void,
|
||||||
|
onerror?: () => void,
|
||||||
|
dontCreateFile?: boolean,
|
||||||
|
canOwn?: boolean,
|
||||||
|
): void;
|
||||||
|
createPreloadedFile(
|
||||||
|
parent: FSNode,
|
||||||
|
name: string,
|
||||||
|
url: string,
|
||||||
|
canRead: boolean,
|
||||||
|
canWrite: boolean,
|
||||||
|
onload?: () => void,
|
||||||
|
onerror?: () => void,
|
||||||
|
dontCreateFile?: boolean,
|
||||||
|
canOwn?: boolean,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
createDataFile(
|
||||||
|
parent: string,
|
||||||
|
name: string,
|
||||||
|
data: ArrayBufferView,
|
||||||
|
canRead: boolean,
|
||||||
|
canWrite: boolean,
|
||||||
|
canOwn: boolean,
|
||||||
|
): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface EmscriptenModule {
|
||||||
|
print(str: string): void;
|
||||||
|
printErr(str: string): void;
|
||||||
|
arguments: string[];
|
||||||
|
environment: EnvironmentType;
|
||||||
|
preInit: Array<{ (): void }>;
|
||||||
|
preRun: Array<{ (): void }>;
|
||||||
|
postRun: Array<{ (): void }>;
|
||||||
|
onAbort: { (what: any): void };
|
||||||
|
onRuntimeInitialized: { (): void };
|
||||||
|
preinitializedWebGLContext: WebGLRenderingContext;
|
||||||
|
noInitialRun: boolean;
|
||||||
|
noExitRuntime: boolean;
|
||||||
|
logReadFiles: boolean;
|
||||||
|
filePackagePrefixURL: string;
|
||||||
|
wasmBinary: ArrayBuffer;
|
||||||
|
|
||||||
|
destroy(object: object): void;
|
||||||
|
getPreloadedPackage(
|
||||||
|
remotePackageName: string,
|
||||||
|
remotePackageSize: number,
|
||||||
|
): ArrayBuffer;
|
||||||
|
instantiateWasm(
|
||||||
|
imports: WebAssemblyImports,
|
||||||
|
successCallback: (module: WebAssemblyModule) => void,
|
||||||
|
): WebAssemblyExports;
|
||||||
|
locateFile(url: string): string;
|
||||||
|
onCustomMessage(event: MessageEvent): void;
|
||||||
|
|
||||||
|
Runtime: any;
|
||||||
|
|
||||||
|
ccall(
|
||||||
|
ident: string,
|
||||||
|
returnType: ValueType | null,
|
||||||
|
argTypes: ValueType[],
|
||||||
|
args: TypeCompatibleWithC[],
|
||||||
|
opts?: CCallOpts,
|
||||||
|
): any;
|
||||||
|
cwrap(
|
||||||
|
ident: string,
|
||||||
|
returnType: ValueType | null,
|
||||||
|
argTypes: ValueType[],
|
||||||
|
opts?: CCallOpts,
|
||||||
|
): (...args: any[]) => any;
|
||||||
|
|
||||||
|
setValue(ptr: number, value: any, type: string, noSafe?: boolean): void;
|
||||||
|
getValue(ptr: number, type: string, noSafe?: boolean): number;
|
||||||
|
|
||||||
|
ALLOC_NORMAL: number;
|
||||||
|
ALLOC_STACK: number;
|
||||||
|
ALLOC_STATIC: number;
|
||||||
|
ALLOC_DYNAMIC: number;
|
||||||
|
ALLOC_NONE: number;
|
||||||
|
|
||||||
|
allocate(
|
||||||
|
slab: any,
|
||||||
|
types: string | string[],
|
||||||
|
allocator: number,
|
||||||
|
ptr: number,
|
||||||
|
): number;
|
||||||
|
|
||||||
|
// USE_TYPED_ARRAYS == 1
|
||||||
|
HEAP: Int32Array;
|
||||||
|
IHEAP: Int32Array;
|
||||||
|
FHEAP: Float64Array;
|
||||||
|
|
||||||
|
// USE_TYPED_ARRAYS == 2
|
||||||
|
HEAP8: Int8Array;
|
||||||
|
HEAP16: Int16Array;
|
||||||
|
HEAP32: Int32Array;
|
||||||
|
HEAPU8: Uint8Array;
|
||||||
|
HEAPU16: Uint16Array;
|
||||||
|
HEAPU32: Uint32Array;
|
||||||
|
HEAPF32: Float32Array;
|
||||||
|
HEAPF64: Float64Array;
|
||||||
|
|
||||||
|
TOTAL_STACK: number;
|
||||||
|
TOTAL_MEMORY: number;
|
||||||
|
FAST_MEMORY: number;
|
||||||
|
|
||||||
|
addOnPreRun(cb: () => any): void;
|
||||||
|
addOnInit(cb: () => any): void;
|
||||||
|
addOnPreMain(cb: () => any): void;
|
||||||
|
addOnExit(cb: () => any): void;
|
||||||
|
addOnPostRun(cb: () => any): void;
|
||||||
|
|
||||||
|
// Tools
|
||||||
|
intArrayFromString(
|
||||||
|
stringy: string,
|
||||||
|
dontAddNull?: boolean,
|
||||||
|
length?: number,
|
||||||
|
): number[];
|
||||||
|
intArrayToString(array: number[]): string;
|
||||||
|
writeStringToMemory(str: string, buffer: number, dontAddNull: boolean): void;
|
||||||
|
writeArrayToMemory(array: number[], buffer: number): void;
|
||||||
|
writeAsciiToMemory(str: string, buffer: number, dontAddNull: boolean): void;
|
||||||
|
|
||||||
|
addRunDependency(id: any): void;
|
||||||
|
removeRunDependency(id: any): void;
|
||||||
|
|
||||||
|
preloadedImages: any;
|
||||||
|
preloadedAudios: any;
|
||||||
|
|
||||||
|
_malloc(size: number): number;
|
||||||
|
_free(ptr: number): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
// declare namespace Emscripten {
|
||||||
|
interface FileSystemType {}
|
||||||
|
type EnvironmentType = "WEB" | "NODE" | "SHELL" | "WORKER";
|
||||||
|
type ValueType = "number" | "string" | "array" | "boolean";
|
||||||
|
type TypeCompatibleWithC = number | string | any[] | boolean;
|
||||||
|
|
||||||
|
type WebAssemblyImports = Array<{
|
||||||
|
name: string;
|
||||||
|
kind: string;
|
||||||
|
}>;
|
||||||
|
|
||||||
|
type WebAssemblyExports = Array<{
|
||||||
|
module: string;
|
||||||
|
name: string;
|
||||||
|
kind: string;
|
||||||
|
}>;
|
||||||
|
|
||||||
|
interface CCallOpts {
|
||||||
|
async?: boolean;
|
||||||
|
}
|
||||||
|
// }
|
||||||
|
|
||||||
|
// declare namespace WebAssembly {
|
||||||
|
interface WebAssemblyModule {}
|
||||||
|
// }
|
||||||
206
opencv-js-4.10.0/src/types/opencv/Affine3.ts
Normal file
206
opencv-js-4.10.0/src/types/opencv/Affine3.ts
Normal file
|
|
@ -0,0 +1,206 @@
|
||||||
|
import type { float_type, int, Mat, Mat3, Mat4, Vec3 } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* It represents a 4x4 homogeneous transformation matrix `$T$`
|
||||||
|
*
|
||||||
|
* `\\[T = \\begin{bmatrix} R & t\\\\ 0 & 1\\\\ \\end{bmatrix} \\]`
|
||||||
|
*
|
||||||
|
* where `$R$` is a 3x3 rotation matrix and `$t$` is a 3x1 translation vector.
|
||||||
|
*
|
||||||
|
* You can specify `$R$` either by a 3x3 rotation matrix or by a 3x1 rotation vector, which is
|
||||||
|
* converted to a 3x3 rotation matrix by the Rodrigues formula.
|
||||||
|
*
|
||||||
|
* To construct a matrix `$T$` representing first rotation around the axis `$r$` with rotation angle
|
||||||
|
* `$|r|$` in radian (right hand rule) and then translation by the vector `$t$`, you can use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Vec3f r, t;
|
||||||
|
* cv::Affine3f T(r, t);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* If you already have the rotation matrix `$R$`, then you can use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Matx33f R;
|
||||||
|
* cv::Affine3f T(R, t);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* To extract the rotation matrix `$R$` from `$T$`, use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Matx33f R = T.rotation();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* To extract the translation vector `$t$` from `$T$`, use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Vec3f t = T.translation();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* To extract the rotation vector `$r$` from `$T$`, use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Vec3f r = T.rvec();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Note that since the mapping from rotation vectors to rotation matrices is many to one. The returned
|
||||||
|
* rotation vector is not necessarily the one you used before to set the matrix.
|
||||||
|
*
|
||||||
|
* If you have two transformations `$T = T_1 * T_2$`, use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Affine3f T, T1, T2;
|
||||||
|
* T = T2.concatenate(T1);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* To get the inverse transform of `$T$`, use
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::Affine3f T, T_inv;
|
||||||
|
* T_inv = T.inv();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core/affine.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/affine.hpp#L129).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class Affine3 {
|
||||||
|
public matrix: Mat4;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(affine: Mat4);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The resulting 4x4 matrix is
|
||||||
|
*
|
||||||
|
* `\\[ \\begin{bmatrix} R & t\\\\ 0 & 1\\\\ \\end{bmatrix} \\]`
|
||||||
|
*
|
||||||
|
* @param R 3x3 rotation matrix.
|
||||||
|
*
|
||||||
|
* @param t 3x1 translation vector.
|
||||||
|
*/
|
||||||
|
public constructor(R: Mat3, t?: Vec3);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rodrigues vector.
|
||||||
|
*
|
||||||
|
* The last row of the current matrix is set to [0,0,0,1].
|
||||||
|
*
|
||||||
|
* @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length
|
||||||
|
* indicates the rotation angle in radian (using right hand rule).
|
||||||
|
*
|
||||||
|
* @param t 3x1 translation vector.
|
||||||
|
*/
|
||||||
|
public constructor(rvec: Vec3, t?: Vec3);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix.
|
||||||
|
*
|
||||||
|
* The last row of the current matrix is set to [0,0,0,1] when data is not 4x4.
|
||||||
|
*
|
||||||
|
* @param data 1-channel matrix. when it is 4x4, it is copied to the current matrix and t is not
|
||||||
|
* used. When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used.
|
||||||
|
* When it is 3x3, it is copied to the upper left 3x3 part of the current matrix. When it is 3x1 or
|
||||||
|
* 1x3, it is treated as a rotation vector and the Rodrigues formula is used to compute a 3x3 rotation
|
||||||
|
* matrix.
|
||||||
|
*
|
||||||
|
* @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4.
|
||||||
|
*/
|
||||||
|
public constructor(data: Mat, t?: Vec3);
|
||||||
|
|
||||||
|
public constructor(vals: float_type);
|
||||||
|
|
||||||
|
public cast(arg401: any): Affine3;
|
||||||
|
|
||||||
|
public concatenate(affine: Affine3): Affine3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the inverse of the current matrix.
|
||||||
|
*/
|
||||||
|
public inv(method?: int): Affine3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy the 3x3 matrix L to the upper left part of the current matrix
|
||||||
|
*
|
||||||
|
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
||||||
|
*
|
||||||
|
* @param L 3x3 matrix.
|
||||||
|
*/
|
||||||
|
public linear(L: Mat3): Mat3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the upper left 3x3 part
|
||||||
|
*/
|
||||||
|
public linear(): Mat3;
|
||||||
|
|
||||||
|
public rotate(R: Mat3): Affine3;
|
||||||
|
|
||||||
|
public rotate(rvec: Vec3): Affine3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rotation matrix.
|
||||||
|
*
|
||||||
|
* Copy the rotation matrix to the upper left 3x3 part of the current matrix. The remaining elements
|
||||||
|
* of the current matrix are not changed.
|
||||||
|
*
|
||||||
|
* @param R 3x3 rotation matrix.
|
||||||
|
*/
|
||||||
|
public rotation(R: Mat3): Mat3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rodrigues vector.
|
||||||
|
*
|
||||||
|
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
||||||
|
*
|
||||||
|
* @param rvec 3x1 rotation vector. The direction indicates the rotation axis and its length
|
||||||
|
* indicates the rotation angle in radian (using the right thumb convention).
|
||||||
|
*/
|
||||||
|
public rotation(rvec: Vec3): Vec3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix.
|
||||||
|
*
|
||||||
|
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
||||||
|
*
|
||||||
|
* @param data 1-channel matrix. When it is a 3x3 matrix, it sets the upper left 3x3 part of the
|
||||||
|
* current matrix. When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues
|
||||||
|
* formula is used to compute the rotation matrix and sets the upper left 3x3 part of the current
|
||||||
|
* matrix.
|
||||||
|
*/
|
||||||
|
public rotation(data: Mat): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the upper left 3x3 part
|
||||||
|
*/
|
||||||
|
public rotation(): Mat3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rodrigues vector.
|
||||||
|
*
|
||||||
|
* a vector representing the upper left 3x3 rotation matrix of the current matrix.
|
||||||
|
*
|
||||||
|
* Since the mapping between rotation vectors and rotation matrices is many to one, this function
|
||||||
|
* returns only one rotation vector that represents the current rotation matrix, which is not
|
||||||
|
* necessarily the same one set by `[rotation(const Vec3& rvec)]`.
|
||||||
|
*/
|
||||||
|
public rvec(): Vec3;
|
||||||
|
|
||||||
|
public translate(t: Vec3): Affine3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy t to the first three elements of the last column of the current matrix
|
||||||
|
*
|
||||||
|
* It sets the upper right 3x1 part of the matrix. The remaining part is unaffected.
|
||||||
|
*
|
||||||
|
* @param t 3x1 translation vector.
|
||||||
|
*/
|
||||||
|
public translation(t: Vec3): Vec3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the upper right 3x1 part
|
||||||
|
*/
|
||||||
|
public translation(): Vec3;
|
||||||
|
|
||||||
|
public static Identity(): Affine3;
|
||||||
|
}
|
||||||
126
opencv-js-4.10.0/src/types/opencv/Algorithm.ts
Normal file
126
opencv-js-4.10.0/src/types/opencv/Algorithm.ts
Normal file
|
|
@ -0,0 +1,126 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
EmscriptenEmbindInstance,
|
||||||
|
FileNode,
|
||||||
|
FileStorage,
|
||||||
|
Ptr,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* especially for classes of algorithms, for which there can be multiple implementations. The examples
|
||||||
|
* are stereo correspondence (for which there are algorithms like block matching, semi-global block
|
||||||
|
* matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians
|
||||||
|
* models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
|
||||||
|
* etc.).
|
||||||
|
*
|
||||||
|
* Here is example of [SimpleBlobDetector](#d0/d7a/classcv_1_1SimpleBlobDetector}) use in your
|
||||||
|
* application via [Algorithm](#d3/d46/classcv_1_1Algorithm}) interface:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Ptr<Feature2D> sbd = SimpleBlobDetector::create();
|
||||||
|
* FileStorage fs_read("SimpleBlobDetector_params.xml", FileStorage::READ);
|
||||||
|
*
|
||||||
|
* if (fs_read.isOpened()) // if we have file with parameters, read them
|
||||||
|
* {
|
||||||
|
* sbd->read(fs_read.root());
|
||||||
|
* fs_read.release();
|
||||||
|
* }
|
||||||
|
* else // else modify the parameters and store them; user can later edit the file to use different
|
||||||
|
* parameters
|
||||||
|
* {
|
||||||
|
* fs_read.release();
|
||||||
|
* FileStorage fs_write("SimpleBlobDetector_params.xml", FileStorage::WRITE);
|
||||||
|
* sbd->write(fs_write);
|
||||||
|
* fs_write.release();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* Mat result, image = imread("../data/detect_blob.png", IMREAD_COLOR);
|
||||||
|
* vector<KeyPoint> keypoints;
|
||||||
|
* sbd->detect(image, keypoints, Mat());
|
||||||
|
*
|
||||||
|
* drawKeypoints(image, keypoints, result);
|
||||||
|
* for (vector<KeyPoint>::iterator k = keypoints.begin(); k != keypoints.end(); ++k)
|
||||||
|
* circle(result, k->pt, (int)k->size, Scalar(0, 0, 255), 2);
|
||||||
|
*
|
||||||
|
* imshow("result", result);
|
||||||
|
* waitKey(0);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L3077).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class Algorithm extends EmscriptenEmbindInstance {
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public clear(): void;
|
||||||
|
|
||||||
|
public empty(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the algorithm string identifier. This string is used as top level xml/yml node tag when
|
||||||
|
* the object is saved to a file or string.
|
||||||
|
*/
|
||||||
|
public getDefaultName(): String;
|
||||||
|
|
||||||
|
public read(fn: FileNode): FileNode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Saves the algorithm to a file. In order to make this method work, the derived class must implement
|
||||||
|
* Algorithm::write(FileStorage& fs).
|
||||||
|
*/
|
||||||
|
public save(filename: String): String;
|
||||||
|
|
||||||
|
public write(fs: FileStorage): FileStorage;
|
||||||
|
|
||||||
|
public write(fs: Ptr, name?: String): Ptr;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is static template method of [Algorithm]. It's usage is following (in the case of SVM):
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Ptr<SVM> svm = Algorithm::load<SVM>("my_svm_model.xml");
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* In order to make this method work, the derived class must overwrite [Algorithm::read](const
|
||||||
|
* [FileNode]& fn).
|
||||||
|
*
|
||||||
|
* @param filename Name of the file to read.
|
||||||
|
*
|
||||||
|
* @param objname The optional name of the node to read (if empty, the first top-level node will be
|
||||||
|
* used)
|
||||||
|
*/
|
||||||
|
public static load(arg0: any, filename: String, objname?: String): Ptr;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is static template method of [Algorithm]. It's usage is following (in the case of SVM):
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Ptr<SVM> svm = Algorithm::loadFromString<SVM>(myStringModel);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param strModel The string variable containing the model you want to load.
|
||||||
|
*
|
||||||
|
* @param objname The optional name of the node to read (if empty, the first top-level node will be
|
||||||
|
* used)
|
||||||
|
*/
|
||||||
|
public static loadFromString(
|
||||||
|
arg1: any,
|
||||||
|
strModel: String,
|
||||||
|
objname?: String,
|
||||||
|
): Ptr;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is static template method of [Algorithm]. It's usage is following (in the case of SVM):
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* cv::FileStorage fsRead("example.xml", FileStorage::READ);
|
||||||
|
* Ptr<SVM> svm = Algorithm::read<SVM>(fsRead.root());
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* In order to make this method work, the derived class must overwrite [Algorithm::read](const
|
||||||
|
* [FileNode]& fn) and also have static create() method without parameters (or with all the optional
|
||||||
|
* parameters)
|
||||||
|
*/
|
||||||
|
public static read(arg2: any, fn: FileNode): Ptr;
|
||||||
|
}
|
||||||
50
opencv-js-4.10.0/src/types/opencv/AutoBuffer.ts
Normal file
50
opencv-js-4.10.0/src/types/opencv/AutoBuffer.ts
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
import type { size_t } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The class is used for temporary buffers in functions and methods. If a temporary buffer is usually
|
||||||
|
* small (a few K's of memory), but its size depends on the parameters, it makes sense to create a
|
||||||
|
* small fixed-size array on stack and use it if it's large enough. If the required buffer size is
|
||||||
|
* larger than the fixed size, another buffer of sufficient size is allocated dynamically and released
|
||||||
|
* after the processing. Therefore, in typical cases, when the buffer size is small, there is no
|
||||||
|
* overhead associated with malloc()/free(). At the same time, there is no limit on the size of
|
||||||
|
* processed data.
|
||||||
|
*
|
||||||
|
* This is what [AutoBuffer](#d8/dd0/classcv_1_1AutoBuffer}) does. The template takes 2 parameters -
|
||||||
|
* type of the buffer elements and the number of stack-allocated elements. Here is how the class is
|
||||||
|
* used:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* void my_func(const cv::Mat& m)
|
||||||
|
* {
|
||||||
|
* cv::AutoBuffer<float> buf(1000); // create automatic buffer containing 1000 floats
|
||||||
|
*
|
||||||
|
* buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used,
|
||||||
|
* // otherwise the buffer of "m.rows" floats will be allocated
|
||||||
|
* // dynamically and deallocated in cv::AutoBuffer destructor
|
||||||
|
* ...
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core/utility.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/utility.hpp#L128).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class AutoBuffer {
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(_size: size_t);
|
||||||
|
|
||||||
|
public constructor(buf: AutoBuffer);
|
||||||
|
|
||||||
|
public allocate(_size: size_t): void;
|
||||||
|
|
||||||
|
public data(): any;
|
||||||
|
|
||||||
|
public data(): any;
|
||||||
|
|
||||||
|
public deallocate(): void;
|
||||||
|
|
||||||
|
public resize(_size: size_t): void;
|
||||||
|
|
||||||
|
public size(): size_t;
|
||||||
|
}
|
||||||
37
opencv-js-4.10.0/src/types/opencv/BFMatcher.ts
Normal file
37
opencv-js-4.10.0/src/types/opencv/BFMatcher.ts
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
import type { bool, DescriptorMatcher, int, Ptr } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For each descriptor in the first set, this matcher finds the closest descriptor in the second set by
|
||||||
|
* trying each one. This descriptor matcher supports masking permissible matches of descriptor sets.
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1140).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class BFMatcher extends DescriptorMatcher {
|
||||||
|
public constructor(normType?: int, crossCheck?: bool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
|
||||||
|
* that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
|
||||||
|
* object copy with the current parameters but with empty train data.
|
||||||
|
*/
|
||||||
|
public clone(emptyTrainData?: bool): Ptr;
|
||||||
|
|
||||||
|
public isMaskSupported(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
|
||||||
|
* preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
|
||||||
|
* BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
|
||||||
|
* description).
|
||||||
|
*
|
||||||
|
* @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k
|
||||||
|
* nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
|
||||||
|
* k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the
|
||||||
|
* matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent
|
||||||
|
* pairs. Such technique usually produces best results with minimal number of outliers when there are
|
||||||
|
* enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
|
||||||
|
*/
|
||||||
|
public static create(normType?: int, crossCheck?: bool): Ptr;
|
||||||
|
}
|
||||||
43
opencv-js-4.10.0/src/types/opencv/BOWTrainer.ts
Normal file
43
opencv-js-4.10.0/src/types/opencv/BOWTrainer.ts
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
import type { int, Mat } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,
|
||||||
|
* Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1339).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class BOWTrainer {
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The training set is clustered using clustermethod to construct the vocabulary.
|
||||||
|
*
|
||||||
|
* @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a
|
||||||
|
* descriptor.
|
||||||
|
*/
|
||||||
|
public add(descriptors: Mat): Mat;
|
||||||
|
|
||||||
|
public clear(): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
public cluster(): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first
|
||||||
|
* variant of the method, train descriptors stored in the object are clustered. In the second variant,
|
||||||
|
* input descriptors are clustered.
|
||||||
|
*
|
||||||
|
* @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.
|
||||||
|
* Descriptors are not added to the inner train descriptor set.
|
||||||
|
*/
|
||||||
|
public cluster(descriptors: Mat): Mat;
|
||||||
|
|
||||||
|
public descriptorsCount(): int;
|
||||||
|
|
||||||
|
public getDescriptors(): Mat;
|
||||||
|
}
|
||||||
153
opencv-js-4.10.0/src/types/opencv/CascadeClassifier.ts
Normal file
153
opencv-js-4.10.0/src/types/opencv/CascadeClassifier.ts
Normal file
|
|
@ -0,0 +1,153 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
FileNode,
|
||||||
|
InputArray,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
Ptr,
|
||||||
|
Size,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
export declare class CascadeClassifier extends Mat {
|
||||||
|
public cc: Ptr;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param filename Name of the file from which the classifier is loaded.
|
||||||
|
*/
|
||||||
|
public constructor(filename: String);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function is parallelized with the TBB library.
|
||||||
|
*
|
||||||
|
* (Python) A face detection example using cascade classifiers can be found at
|
||||||
|
* opencv_source_code/samples/python/facedetect.py
|
||||||
|
*
|
||||||
|
* @param image Matrix of the type CV_8U containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param objects Vector of rectangles where each rectangle contains the detected object, the
|
||||||
|
* rectangles may be partially outside the original image.
|
||||||
|
*
|
||||||
|
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
|
||||||
|
*
|
||||||
|
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
|
||||||
|
* to retain it.
|
||||||
|
*
|
||||||
|
* @param flags Parameter with the same meaning for an old cascade as in the function
|
||||||
|
* cvHaarDetectObjects. It is not used for a new cascade.
|
||||||
|
*
|
||||||
|
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
|
||||||
|
*
|
||||||
|
* @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize ==
|
||||||
|
* minSize model is evaluated on single scale.
|
||||||
|
*/
|
||||||
|
public detectMultiScale(
|
||||||
|
image: InputArray,
|
||||||
|
objects: any,
|
||||||
|
scaleFactor?: double,
|
||||||
|
minNeighbors?: int,
|
||||||
|
flags?: int,
|
||||||
|
minSize?: Size,
|
||||||
|
maxSize?: Size,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param image Matrix of the type CV_8U containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param objects Vector of rectangles where each rectangle contains the detected object, the
|
||||||
|
* rectangles may be partially outside the original image.
|
||||||
|
*
|
||||||
|
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
|
||||||
|
* of detections is the number of neighboring positively classified rectangles that were joined
|
||||||
|
* together to form the object.
|
||||||
|
*
|
||||||
|
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
|
||||||
|
*
|
||||||
|
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
|
||||||
|
* to retain it.
|
||||||
|
*
|
||||||
|
* @param flags Parameter with the same meaning for an old cascade as in the function
|
||||||
|
* cvHaarDetectObjects. It is not used for a new cascade.
|
||||||
|
*
|
||||||
|
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
|
||||||
|
*
|
||||||
|
* @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize ==
|
||||||
|
* minSize model is evaluated on single scale.
|
||||||
|
*/
|
||||||
|
public detectMultiScale(
|
||||||
|
image: InputArray,
|
||||||
|
objects: any,
|
||||||
|
numDetections: any,
|
||||||
|
scaleFactor?: double,
|
||||||
|
minNeighbors?: int,
|
||||||
|
flags?: int,
|
||||||
|
minSize?: Size,
|
||||||
|
maxSize?: Size,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts. This function allows you to retrieve the final stage
|
||||||
|
* decision certainty of classification. For this, one needs to set `outputRejectLevels` on true and
|
||||||
|
* provide the `rejectLevels` and `levelWeights` parameter. For each resulting detection,
|
||||||
|
* `levelWeights` will then contain the certainty of classification at the final stage. This value can
|
||||||
|
* then be used to separate strong from weaker classifications.
|
||||||
|
*
|
||||||
|
* A code sample on how to use it efficiently can be found below:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Mat img;
|
||||||
|
* vector<double> weights;
|
||||||
|
* vector<int> levels;
|
||||||
|
* vector<Rect> detections;
|
||||||
|
* CascadeClassifier model("/path/to/your/model.xml");
|
||||||
|
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
|
||||||
|
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
public detectMultiScale(
|
||||||
|
image: InputArray,
|
||||||
|
objects: any,
|
||||||
|
rejectLevels: any,
|
||||||
|
levelWeights: any,
|
||||||
|
scaleFactor?: double,
|
||||||
|
minNeighbors?: int,
|
||||||
|
flags?: int,
|
||||||
|
minSize?: Size,
|
||||||
|
maxSize?: Size,
|
||||||
|
outputRejectLevels?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
public empty(): bool;
|
||||||
|
|
||||||
|
public getFeatureType(): int;
|
||||||
|
|
||||||
|
public getMaskGenerator(): Ptr;
|
||||||
|
|
||||||
|
public getOldCascade(): any;
|
||||||
|
|
||||||
|
public getOriginalWindowSize(): Size;
|
||||||
|
|
||||||
|
public isOldFormatCascade(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param filename Name of the file from which the classifier is loaded. The file may contain an old
|
||||||
|
* HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
|
||||||
|
* traincascade application.
|
||||||
|
*/
|
||||||
|
public load(filename: String): String;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The file may contain a new cascade classifier (trained traincascade application) only.
|
||||||
|
*/
|
||||||
|
public read(node: FileNode): FileNode;
|
||||||
|
|
||||||
|
public setMaskGenerator(maskGenerator: Ptr): Ptr;
|
||||||
|
|
||||||
|
public static convert(oldcascade: String, newcascade: String): String;
|
||||||
|
}
|
||||||
236
opencv-js-4.10.0/src/types/opencv/DescriptorMatcher.ts
Normal file
236
opencv-js-4.10.0/src/types/opencv/DescriptorMatcher.ts
Normal file
|
|
@ -0,0 +1,236 @@
|
||||||
|
import type {
|
||||||
|
Algorithm,
|
||||||
|
bool,
|
||||||
|
FileNode,
|
||||||
|
FileStorage,
|
||||||
|
float,
|
||||||
|
InputArray,
|
||||||
|
InputArrayOfArrays,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
Ptr,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* It has two groups of match methods: for matching descriptors of an image with another image or with
|
||||||
|
* an image set.
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L860).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class DescriptorMatcher extends Algorithm {
|
||||||
|
/**
|
||||||
|
* If the collection is not empty, the new descriptors are added to existing train descriptors.
|
||||||
|
*
|
||||||
|
* @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
|
||||||
|
* train image.
|
||||||
|
*/
|
||||||
|
public add(descriptors: InputArrayOfArrays): InputArrayOfArrays;
|
||||||
|
|
||||||
|
public clear(): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
|
||||||
|
* that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
|
||||||
|
* object copy with the current parameters but with empty train data.
|
||||||
|
*/
|
||||||
|
public clone(emptyTrainData?: bool): Ptr;
|
||||||
|
|
||||||
|
public empty(): bool;
|
||||||
|
|
||||||
|
public getTrainDescriptors(): Mat;
|
||||||
|
|
||||||
|
public isMaskSupported(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* These extended variants of [DescriptorMatcher::match] methods find several best matches for each
|
||||||
|
* query descriptor. The matches are returned in the distance increasing order. See
|
||||||
|
* [DescriptorMatcher::match] for the details about query and train descriptors.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
|
||||||
|
* collection stored in the class object.
|
||||||
|
*
|
||||||
|
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
|
||||||
|
*
|
||||||
|
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
|
||||||
|
* less than k possible matches in total.
|
||||||
|
*
|
||||||
|
* @param mask Mask specifying permissible matches between an input query and train matrices of
|
||||||
|
* descriptors.
|
||||||
|
*
|
||||||
|
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
|
||||||
|
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
|
||||||
|
* matches vector does not contain matches for fully masked-out query descriptors.
|
||||||
|
*/
|
||||||
|
public knnMatch(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
trainDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
k: int,
|
||||||
|
mask?: InputArray,
|
||||||
|
compactResult?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
|
||||||
|
*
|
||||||
|
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
|
||||||
|
* less than k possible matches in total.
|
||||||
|
*
|
||||||
|
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
|
||||||
|
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
|
||||||
|
*
|
||||||
|
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
|
||||||
|
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
|
||||||
|
* matches vector does not contain matches for fully masked-out query descriptors.
|
||||||
|
*/
|
||||||
|
public knnMatch(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
k: int,
|
||||||
|
masks?: InputArrayOfArrays,
|
||||||
|
compactResult?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In the first variant of this method, the train descriptors are passed as an input argument. In the
|
||||||
|
* second variant of the method, train descriptors collection that was set by [DescriptorMatcher::add]
|
||||||
|
* is used. Optional mask (or masks) can be passed to specify which query and training descriptors can
|
||||||
|
* be matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
|
||||||
|
* mask.at<uchar>(i,j) is non-zero.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
|
||||||
|
* collection stored in the class object.
|
||||||
|
*
|
||||||
|
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
|
||||||
|
* descriptor. So, matches size may be smaller than the query descriptors count.
|
||||||
|
*
|
||||||
|
* @param mask Mask specifying permissible matches between an input query and train matrices of
|
||||||
|
* descriptors.
|
||||||
|
*/
|
||||||
|
public match(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
trainDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
mask?: InputArray,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
|
||||||
|
* descriptor. So, matches size may be smaller than the query descriptors count.
|
||||||
|
*
|
||||||
|
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
|
||||||
|
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
|
||||||
|
*/
|
||||||
|
public match(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
masks?: InputArrayOfArrays,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For each query descriptor, the methods find such training descriptors that the distance between
|
||||||
|
* the query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches
|
||||||
|
* are returned in the distance increasing order.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
|
||||||
|
* collection stored in the class object.
|
||||||
|
*
|
||||||
|
* @param matches Found matches.
|
||||||
|
*
|
||||||
|
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
|
||||||
|
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in
|
||||||
|
* Pixels)!
|
||||||
|
*
|
||||||
|
* @param mask Mask specifying permissible matches between an input query and train matrices of
|
||||||
|
* descriptors.
|
||||||
|
*
|
||||||
|
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
|
||||||
|
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
|
||||||
|
* matches vector does not contain matches for fully masked-out query descriptors.
|
||||||
|
*/
|
||||||
|
public radiusMatch(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
trainDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
maxDistance: float,
|
||||||
|
mask?: InputArray,
|
||||||
|
compactResult?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param queryDescriptors Query set of descriptors.
|
||||||
|
*
|
||||||
|
* @param matches Found matches.
|
||||||
|
*
|
||||||
|
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
|
||||||
|
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in
|
||||||
|
* Pixels)!
|
||||||
|
*
|
||||||
|
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
|
||||||
|
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
|
||||||
|
*
|
||||||
|
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
|
||||||
|
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
|
||||||
|
* matches vector does not contain matches for fully masked-out query descriptors.
|
||||||
|
*/
|
||||||
|
public radiusMatch(
|
||||||
|
queryDescriptors: InputArray,
|
||||||
|
matches: any,
|
||||||
|
maxDistance: float,
|
||||||
|
masks?: InputArrayOfArrays,
|
||||||
|
compactResult?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
public read(fileName: String): String;
|
||||||
|
|
||||||
|
public read(fn: FileNode): FileNode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
|
||||||
|
* [train()] is run every time before matching. Some descriptor matchers (for example,
|
||||||
|
* BruteForceMatcher) have an empty implementation of this method. Other matchers really train their
|
||||||
|
* inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ).
|
||||||
|
*/
|
||||||
|
public train(): void;
|
||||||
|
|
||||||
|
public write(fileName: String): String;
|
||||||
|
|
||||||
|
public write(fs: FileStorage): FileStorage;
|
||||||
|
|
||||||
|
public write(fs: Ptr, name?: String): Ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare const FLANNBASED: MatcherType; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const BRUTEFORCE: MatcherType; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const BRUTEFORCE_L1: MatcherType; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const BRUTEFORCE_HAMMING: MatcherType; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const BRUTEFORCE_HAMMINGLUT: MatcherType; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const BRUTEFORCE_SL2: MatcherType; // initializer: = 6
|
||||||
|
|
||||||
|
export type MatcherType = any;
|
||||||
68
opencv-js-4.10.0/src/types/opencv/DynamicBitset.ts
Normal file
68
opencv-js-4.10.0/src/types/opencv/DynamicBitset.ts
Normal file
|
|
@ -0,0 +1,68 @@
|
||||||
|
import type { bool, size_t } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class re-implementing the boost version of it This helps not depending on boost, it also does not do
|
||||||
|
* the bound checks and has a way to reset a block for speed
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/flann/dynamic_bitset.h](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/flann/dynamic_bitset.h#L150).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class DynamicBitset {
|
||||||
|
/**
|
||||||
|
* default constructor
|
||||||
|
*/
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* only constructor we use in our code
|
||||||
|
*
|
||||||
|
* @param sz the size of the bitset (in bits)
|
||||||
|
*/
|
||||||
|
public constructor(sz: size_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets all the bits to 0
|
||||||
|
*/
|
||||||
|
public clear(): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* true if the bitset is empty
|
||||||
|
*/
|
||||||
|
public empty(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* set all the bits to 0
|
||||||
|
*/
|
||||||
|
public reset(): void;
|
||||||
|
|
||||||
|
public reset(index: size_t): void;
|
||||||
|
|
||||||
|
public reset_block(index: size_t): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* resize the bitset so that it contains at least sz bits
|
||||||
|
*/
|
||||||
|
public resize(sz: size_t): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* set a bit to true
|
||||||
|
*
|
||||||
|
* @param index the index of the bit to set to 1
|
||||||
|
*/
|
||||||
|
public set(index: size_t): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* gives the number of contained bits
|
||||||
|
*/
|
||||||
|
public size(): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* check if a bit is set
|
||||||
|
*
|
||||||
|
* true if the bit is set
|
||||||
|
*
|
||||||
|
* @param index the index of the bit to check
|
||||||
|
*/
|
||||||
|
public test(index: size_t): bool;
|
||||||
|
}
|
||||||
54
opencv-js-4.10.0/src/types/opencv/Exception.ts
Normal file
54
opencv-js-4.10.0/src/types/opencv/Exception.ts
Normal file
|
|
@ -0,0 +1,54 @@
|
||||||
|
import type { int } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class encapsulates all or almost all necessary information about the error happened in the
|
||||||
|
* program. The exception is usually constructed and thrown implicitly via CV_Error and CV_Error_
|
||||||
|
* macros.
|
||||||
|
*
|
||||||
|
* [error](#db/de0/group__core__utils_1gacbd081fdb20423a63cf731569ba70b2b})
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L135).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class Exception {
|
||||||
|
/**
|
||||||
|
* CVStatus
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public code: int;
|
||||||
|
|
||||||
|
public err: String;
|
||||||
|
|
||||||
|
public file: String;
|
||||||
|
|
||||||
|
public func: String;
|
||||||
|
|
||||||
|
public line: int;
|
||||||
|
|
||||||
|
public msg: String;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default constructor
|
||||||
|
*/
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Full constructor. Normally the constructor is not called explicitly. Instead, the macros
|
||||||
|
* [CV_Error()], [CV_Error_()] and [CV_Assert()] are used.
|
||||||
|
*/
|
||||||
|
public constructor(
|
||||||
|
_code: int,
|
||||||
|
_err: String,
|
||||||
|
_func: String,
|
||||||
|
_file: String,
|
||||||
|
_line: int,
|
||||||
|
);
|
||||||
|
|
||||||
|
public formatMessage(): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the error description and the context as a text string.
|
||||||
|
*/
|
||||||
|
public what(): any;
|
||||||
|
}
|
||||||
20
opencv-js-4.10.0/src/types/opencv/Feature2D.ts
Normal file
20
opencv-js-4.10.0/src/types/opencv/Feature2D.ts
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
import type { Algorithm, KeyPointVector, Mat, OutputArray } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://docs.opencv.org/4.10.0/d0/d13/classcv_1_1Feature2D.html
|
||||||
|
*/
|
||||||
|
export declare class Feature2D extends Algorithm {
|
||||||
|
/**
|
||||||
|
* Detects keypoints and computes the descriptors
|
||||||
|
* @param img
|
||||||
|
* @param mask
|
||||||
|
* @param keypoints
|
||||||
|
* @param descriptors
|
||||||
|
*/
|
||||||
|
public detectAndCompute(
|
||||||
|
img: Mat,
|
||||||
|
mask: Mat,
|
||||||
|
keypoints: KeyPointVector,
|
||||||
|
descriptors: OutputArray,
|
||||||
|
): void;
|
||||||
|
}
|
||||||
50
opencv-js-4.10.0/src/types/opencv/FlannBasedMatcher.ts
Normal file
50
opencv-js-4.10.0/src/types/opencv/FlannBasedMatcher.ts
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
import type { bool, DescriptorMatcher, FileNode, FileStorage, InputArrayOfArrays, Ptr } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This matcher trains [cv::flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) on a train descriptor
|
||||||
|
* collection and calls its nearest search methods to find the best matches. So, this matcher may be
|
||||||
|
* faster when matching a large train collection than the brute force matcher.
|
||||||
|
* [FlannBasedMatcher](#dc/de2/classcv_1_1FlannBasedMatcher}) does not support masking permissible
|
||||||
|
* matches of descriptor sets because [flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) does not
|
||||||
|
* support this. :
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1187).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class FlannBasedMatcher extends DescriptorMatcher {
|
||||||
|
public constructor(indexParams?: Ptr, searchParams?: Ptr);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the collection is not empty, the new descriptors are added to existing train descriptors.
|
||||||
|
*
|
||||||
|
* @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
|
||||||
|
* train image.
|
||||||
|
*/
|
||||||
|
public add(descriptors: InputArrayOfArrays): InputArrayOfArrays;
|
||||||
|
|
||||||
|
public clear(): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
|
||||||
|
* that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
|
||||||
|
* object copy with the current parameters but with empty train data.
|
||||||
|
*/
|
||||||
|
public clone(emptyTrainData?: bool): Ptr;
|
||||||
|
|
||||||
|
public isMaskSupported(): bool;
|
||||||
|
|
||||||
|
public read(fn: FileNode): FileNode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
|
||||||
|
* [train()] is run every time before matching. Some descriptor matchers (for example,
|
||||||
|
* BruteForceMatcher) have an empty implementation of this method. Other matchers really train their
|
||||||
|
* inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ).
|
||||||
|
*/
|
||||||
|
public train(): void;
|
||||||
|
|
||||||
|
public write(fs: FileStorage): FileStorage;
|
||||||
|
|
||||||
|
public static create(): Ptr;
|
||||||
|
}
|
||||||
401
opencv-js-4.10.0/src/types/opencv/HOGDescriptor.ts
Normal file
401
opencv-js-4.10.0/src/types/opencv/HOGDescriptor.ts
Normal file
|
|
@ -0,0 +1,401 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
FileNode,
|
||||||
|
FileStorage,
|
||||||
|
float,
|
||||||
|
InputArray,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
Point,
|
||||||
|
Size,
|
||||||
|
size_t,
|
||||||
|
UMat,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the HOG descriptor algorithm introduced by Navneet Dalal and Bill Triggs Dalal2005 .
|
||||||
|
*
|
||||||
|
* useful links:
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/objdetect.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/objdetect.hpp#L377).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class HOGDescriptor {
|
||||||
|
public blockSize: Size;
|
||||||
|
|
||||||
|
public blockStride: Size;
|
||||||
|
|
||||||
|
public cellSize: Size;
|
||||||
|
|
||||||
|
public derivAperture: int;
|
||||||
|
|
||||||
|
public free_coef: float;
|
||||||
|
|
||||||
|
public gammaCorrection: bool;
|
||||||
|
|
||||||
|
public histogramNormType: any;
|
||||||
|
|
||||||
|
public L2HysThreshold: double;
|
||||||
|
|
||||||
|
public nbins: int;
|
||||||
|
|
||||||
|
public nlevels: int;
|
||||||
|
|
||||||
|
public oclSvmDetector: UMat;
|
||||||
|
|
||||||
|
public signedGradient: bool;
|
||||||
|
|
||||||
|
public svmDetector: any;
|
||||||
|
|
||||||
|
public winSigma: double;
|
||||||
|
|
||||||
|
public winSize: Size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aqual to [HOGDescriptor](Size(64,128), Size(16,16), Size(8,8), Size(8,8), 9 )
|
||||||
|
*/
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param _winSize sets winSize with given value.
|
||||||
|
*
|
||||||
|
* @param _blockSize sets blockSize with given value.
|
||||||
|
*
|
||||||
|
* @param _blockStride sets blockStride with given value.
|
||||||
|
*
|
||||||
|
* @param _cellSize sets cellSize with given value.
|
||||||
|
*
|
||||||
|
* @param _nbins sets nbins with given value.
|
||||||
|
*
|
||||||
|
* @param _derivAperture sets derivAperture with given value.
|
||||||
|
*
|
||||||
|
* @param _winSigma sets winSigma with given value.
|
||||||
|
*
|
||||||
|
* @param _histogramNormType sets histogramNormType with given value.
|
||||||
|
*
|
||||||
|
* @param _L2HysThreshold sets L2HysThreshold with given value.
|
||||||
|
*
|
||||||
|
* @param _gammaCorrection sets gammaCorrection with given value.
|
||||||
|
*
|
||||||
|
* @param _nlevels sets nlevels with given value.
|
||||||
|
*
|
||||||
|
* @param _signedGradient sets signedGradient with given value.
|
||||||
|
*/
|
||||||
|
public constructor(
|
||||||
|
_winSize: Size,
|
||||||
|
_blockSize: Size,
|
||||||
|
_blockStride: Size,
|
||||||
|
_cellSize: Size,
|
||||||
|
_nbins: int,
|
||||||
|
_derivAperture?: int,
|
||||||
|
_winSigma?: double,
|
||||||
|
_histogramNormType?: any,
|
||||||
|
_L2HysThreshold?: double,
|
||||||
|
_gammaCorrection?: bool,
|
||||||
|
_nlevels?: int,
|
||||||
|
_signedGradient?: bool,
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param filename The file name containing HOGDescriptor properties and coefficients for the linear
|
||||||
|
* SVM classifier.
|
||||||
|
*/
|
||||||
|
public constructor(filename: String);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param d the HOGDescriptor which cloned to create a new one.
|
||||||
|
*/
|
||||||
|
public constructor(d: HOGDescriptor);
|
||||||
|
|
||||||
|
public checkDetectorSize(): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U containing an image where HOG features will be calculated.
|
||||||
|
*
|
||||||
|
* @param descriptors Matrix of the type CV_32F
|
||||||
|
*
|
||||||
|
* @param winStride Window stride. It must be a multiple of block stride.
|
||||||
|
*
|
||||||
|
* @param padding Padding
|
||||||
|
*
|
||||||
|
* @param locations Vector of Point
|
||||||
|
*/
|
||||||
|
public compute(
|
||||||
|
img: InputArray,
|
||||||
|
descriptors: any,
|
||||||
|
winStride?: Size,
|
||||||
|
padding?: Size,
|
||||||
|
locations?: Point,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix contains the image to be computed
|
||||||
|
*
|
||||||
|
* @param grad Matrix of type CV_32FC2 contains computed gradients
|
||||||
|
*
|
||||||
|
* @param angleOfs Matrix of type CV_8UC2 contains quantized gradient orientations
|
||||||
|
*
|
||||||
|
* @param paddingTL Padding from top-left
|
||||||
|
*
|
||||||
|
* @param paddingBR Padding from bottom-right
|
||||||
|
*/
|
||||||
|
public computeGradient(
|
||||||
|
img: InputArray,
|
||||||
|
grad: InputOutputArray,
|
||||||
|
angleOfs: InputOutputArray,
|
||||||
|
paddingTL?: Size,
|
||||||
|
paddingBR?: Size,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param c cloned HOGDescriptor
|
||||||
|
*/
|
||||||
|
public copyTo(c: HOGDescriptor): HOGDescriptor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of point where each point contains left-top corner point of detected
|
||||||
|
* object boundaries.
|
||||||
|
*
|
||||||
|
* @param weights Vector that will contain confidence values for each detected object.
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here.
|
||||||
|
*
|
||||||
|
* @param winStride Window stride. It must be a multiple of block stride.
|
||||||
|
*
|
||||||
|
* @param padding Padding
|
||||||
|
*
|
||||||
|
* @param searchLocations Vector of Point includes set of requested locations to be evaluated.
|
||||||
|
*/
|
||||||
|
public detect(
|
||||||
|
img: InputArray,
|
||||||
|
foundLocations: any,
|
||||||
|
weights: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
winStride?: Size,
|
||||||
|
padding?: Size,
|
||||||
|
searchLocations?: Point,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of point where each point contains left-top corner point of detected
|
||||||
|
* object boundaries.
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here.
|
||||||
|
*
|
||||||
|
* @param winStride Window stride. It must be a multiple of block stride.
|
||||||
|
*
|
||||||
|
* @param padding Padding
|
||||||
|
*
|
||||||
|
* @param searchLocations Vector of Point includes locations to search.
|
||||||
|
*/
|
||||||
|
public detect(
|
||||||
|
img: InputArray,
|
||||||
|
foundLocations: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
winStride?: Size,
|
||||||
|
padding?: Size,
|
||||||
|
searchLocations?: Point,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of rectangles where each rectangle contains the detected object.
|
||||||
|
*
|
||||||
|
* @param foundWeights Vector that will contain confidence values for each detected object.
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here.
|
||||||
|
*
|
||||||
|
* @param winStride Window stride. It must be a multiple of block stride.
|
||||||
|
*
|
||||||
|
* @param padding Padding
|
||||||
|
*
|
||||||
|
* @param scale Coefficient of the detection window increase.
|
||||||
|
*
|
||||||
|
* @param finalThreshold Final threshold
|
||||||
|
*
|
||||||
|
* @param useMeanshiftGrouping indicates grouping algorithm
|
||||||
|
*/
|
||||||
|
public detectMultiScale(
|
||||||
|
img: InputArray,
|
||||||
|
foundLocations: any,
|
||||||
|
foundWeights: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
winStride?: Size,
|
||||||
|
padding?: Size,
|
||||||
|
scale?: double,
|
||||||
|
finalThreshold?: double,
|
||||||
|
useMeanshiftGrouping?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of rectangles where each rectangle contains the detected object.
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here.
|
||||||
|
*
|
||||||
|
* @param winStride Window stride. It must be a multiple of block stride.
|
||||||
|
*
|
||||||
|
* @param padding Padding
|
||||||
|
*
|
||||||
|
* @param scale Coefficient of the detection window increase.
|
||||||
|
*
|
||||||
|
* @param finalThreshold Final threshold
|
||||||
|
*
|
||||||
|
* @param useMeanshiftGrouping indicates grouping algorithm
|
||||||
|
*/
|
||||||
|
public detectMultiScale(
|
||||||
|
img: InputArray,
|
||||||
|
foundLocations: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
winStride?: Size,
|
||||||
|
padding?: Size,
|
||||||
|
scale?: double,
|
||||||
|
finalThreshold?: double,
|
||||||
|
useMeanshiftGrouping?: bool,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of rectangles where each rectangle contains the detected object.
|
||||||
|
*
|
||||||
|
* @param locations Vector of DetectionROI
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here.
|
||||||
|
*
|
||||||
|
* @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
|
||||||
|
* group of rectangles to retain it.
|
||||||
|
*/
|
||||||
|
public detectMultiScaleROI(
|
||||||
|
img: InputArray,
|
||||||
|
foundLocations: any,
|
||||||
|
locations: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
groupThreshold?: int,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
|
||||||
|
*
|
||||||
|
* @param locations Vector of Point
|
||||||
|
*
|
||||||
|
* @param foundLocations Vector of Point where each Point is detected object's top-left point.
|
||||||
|
*
|
||||||
|
* @param confidences confidences
|
||||||
|
*
|
||||||
|
* @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
|
||||||
|
* it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
|
||||||
|
* the free coefficient is omitted (which is allowed), you can specify it manually here
|
||||||
|
*
|
||||||
|
* @param winStride winStride
|
||||||
|
*
|
||||||
|
* @param padding padding
|
||||||
|
*/
|
||||||
|
public detectROI(
|
||||||
|
img: InputArray,
|
||||||
|
locations: any,
|
||||||
|
foundLocations: any,
|
||||||
|
confidences: any,
|
||||||
|
hitThreshold?: double,
|
||||||
|
winStride?: any,
|
||||||
|
padding?: any,
|
||||||
|
): InputArray;
|
||||||
|
|
||||||
|
public getDescriptorSize(): size_t;
|
||||||
|
|
||||||
|
public getWinSigma(): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param rectList Input/output vector of rectangles. Output vector includes retained and grouped
|
||||||
|
* rectangles. (The Python list is not modified in place.)
|
||||||
|
*
|
||||||
|
* @param weights Input/output vector of weights of rectangles. Output vector includes weights of
|
||||||
|
* retained and grouped rectangles. (The Python list is not modified in place.)
|
||||||
|
*
|
||||||
|
* @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
|
||||||
|
* group of rectangles to retain it.
|
||||||
|
*
|
||||||
|
* @param eps Relative difference between sides of the rectangles to merge them into a group.
|
||||||
|
*/
|
||||||
|
public groupRectangles(
|
||||||
|
rectList: any,
|
||||||
|
weights: any,
|
||||||
|
groupThreshold: int,
|
||||||
|
eps: double,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param filename Path of the file to read.
|
||||||
|
*
|
||||||
|
* @param objname The optional name of the node to read (if empty, the first top-level node will be
|
||||||
|
* used).
|
||||||
|
*/
|
||||||
|
public load(filename: String, objname?: String): String;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fn File node
|
||||||
|
*/
|
||||||
|
public read(fn: FileNode): FileNode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param filename File name
|
||||||
|
*
|
||||||
|
* @param objname Object name
|
||||||
|
*/
|
||||||
|
public save(filename: String, objname?: String): String;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param svmdetector coefficients for the linear SVM classifier.
|
||||||
|
*/
|
||||||
|
public setSVMDetector(svmdetector: InputArray): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fs File storage
|
||||||
|
*
|
||||||
|
* @param objname Object name
|
||||||
|
*/
|
||||||
|
public write(fs: FileStorage, objname: String): FileStorage;
|
||||||
|
|
||||||
|
public static getDaimlerPeopleDetector(): any;
|
||||||
|
|
||||||
|
public static getDefaultPeopleDetector(): any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare const DEFAULT_NLEVELS: any; // initializer: = 64
|
||||||
|
|
||||||
|
export declare const DESCR_FORMAT_COL_BY_COL: DescriptorStorageFormat; // initializer:
|
||||||
|
|
||||||
|
export declare const DESCR_FORMAT_ROW_BY_ROW: DescriptorStorageFormat; // initializer:
|
||||||
|
|
||||||
|
export declare const L2Hys: HistogramNormType; // initializer: = 0
|
||||||
|
|
||||||
|
export type DescriptorStorageFormat = any;
|
||||||
|
|
||||||
|
export type HistogramNormType = any;
|
||||||
34
opencv-js-4.10.0/src/types/opencv/Logger.ts
Normal file
34
opencv-js-4.10.0/src/types/opencv/Logger.ts
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
import type { int } from "./_types";
|
||||||
|
|
||||||
|
export declare class Logger {
|
||||||
|
public static error(fmt: any, arg121: any): int;
|
||||||
|
|
||||||
|
public static fatal(fmt: any, arg122: any): int;
|
||||||
|
|
||||||
|
public static info(fmt: any, arg123: any): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Print log message
|
||||||
|
*
|
||||||
|
* @param level Log level
|
||||||
|
*
|
||||||
|
* @param fmt Message format
|
||||||
|
*/
|
||||||
|
public static log(level: int, fmt: any, arg124: any): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the logging destination
|
||||||
|
*
|
||||||
|
* @param name Filename or NULL for console
|
||||||
|
*/
|
||||||
|
public static setDestination(name: any): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the logging level. All messages with lower priority will be ignored.
|
||||||
|
*
|
||||||
|
* @param level Logging level
|
||||||
|
*/
|
||||||
|
public static setLevel(level: int): void;
|
||||||
|
|
||||||
|
public static warn(fmt: any, arg125: any): int;
|
||||||
|
}
|
||||||
81
opencv-js-4.10.0/src/types/opencv/LshTable.ts
Normal file
81
opencv-js-4.10.0/src/types/opencv/LshTable.ts
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
import type { Bucket, BucketKey, LshStats, Matrix, size_t } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lsh hash table. As its key is a sub-feature, and as usually the size of it is pretty small, we keep
|
||||||
|
* it as a continuous memory array. The value is an index in the corpus of features (we keep it as an
|
||||||
|
* unsigned int for pure memory reasons, it could be a size_t)
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/flann/lsh_table.h](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/flann/lsh_table.h#L261).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class LshTable {
|
||||||
|
/**
|
||||||
|
* Default constructor
|
||||||
|
*/
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default constructor Create the mask and allocate the memory
|
||||||
|
*
|
||||||
|
* @param feature_size is the size of the feature (considered as a ElementType[])
|
||||||
|
*
|
||||||
|
* @param key_size is the number of bits that are turned on in the feature
|
||||||
|
*/
|
||||||
|
public constructor(feature_size: any, key_size: any);
|
||||||
|
|
||||||
|
public constructor(feature_size: any, subsignature_size: any);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a feature to the table
|
||||||
|
*
|
||||||
|
* @param value the value to store for that feature
|
||||||
|
*
|
||||||
|
* @param feature the feature itself
|
||||||
|
*/
|
||||||
|
public add(value: any, feature: any): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a set of features to the table
|
||||||
|
*
|
||||||
|
* @param dataset the values to store
|
||||||
|
*/
|
||||||
|
public add(dataset: Matrix): Matrix;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a bucket given the key
|
||||||
|
*/
|
||||||
|
public getBucketFromKey(key: BucketKey): Bucket;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compute the sub-signature of a feature
|
||||||
|
*/
|
||||||
|
public getKey(arg50: any): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the Subsignature of a feature
|
||||||
|
*
|
||||||
|
* @param feature the feature to analyze
|
||||||
|
*/
|
||||||
|
public getKey(feature: any): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get statistics about the table
|
||||||
|
*/
|
||||||
|
public getStats(): LshStats;
|
||||||
|
|
||||||
|
public getStats(): LshStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare const kArray: SpeedLevel; // initializer:
|
||||||
|
|
||||||
|
export declare const kBitsetHash: SpeedLevel; // initializer:
|
||||||
|
|
||||||
|
export declare const kHash: SpeedLevel; // initializer:
|
||||||
|
|
||||||
|
/**
|
||||||
|
* defines the speed fo the implementation kArray uses a vector for storing data kBitsetHash uses a
|
||||||
|
* hash map but checks for the validity of a key with a bitset kHash uses a hash map only
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type SpeedLevel = any;
|
||||||
1791
opencv-js-4.10.0/src/types/opencv/Mat.ts
Normal file
1791
opencv-js-4.10.0/src/types/opencv/Mat.ts
Normal file
File diff suppressed because it is too large
Load diff
107
opencv-js-4.10.0/src/types/opencv/MatExpr.ts
Normal file
107
opencv-js-4.10.0/src/types/opencv/MatExpr.ts
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
import type { double, int, Mat, MatOp, Scalar } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <a name="d1/d10/classcv_1_1MatExpr_1MatrixExpressions"></a>This is a list of implemented matrix
|
||||||
|
* operations that can be combined in arbitrary complex expressions (here A, B stand for matrices (
|
||||||
|
* [Mat](#d3/d63/classcv_1_1Mat}) ), s for a scalar ( Scalar ), alpha for a real-valued scalar ( double
|
||||||
|
* )):
|
||||||
|
*
|
||||||
|
* Addition, subtraction, negation: `A+B`, `A-B`, `A+s`, `A-s`, `s+A`, `s-A`, `-A`
|
||||||
|
* Scaling: `A*alpha`
|
||||||
|
* Per-element multiplication and division: `A.mul(B)`, `A/B`, `alpha/A`
|
||||||
|
* Matrix multiplication: `A*B`
|
||||||
|
* Transposition: `A.t()` (means A)
|
||||||
|
* Matrix inversion and pseudo-inversion, solving linear systems and least-squares problems:
|
||||||
|
* `A.inv([method]) (~ A<sup>-1</sup>)`, `A.inv([method])*B (~ X: AX=B)`
|
||||||
|
* Comparison: `A cmpop B`, `A cmpop alpha`, `alpha cmpop A`, where *cmpop* is one of `>`, `>=`, `==`,
|
||||||
|
* `!=`, `<=`, `<`. The result of comparison is an 8-bit single channel mask whose elements are set to
|
||||||
|
* 255 (if the particular element or pair of elements satisfy the condition) or 0.
|
||||||
|
* Bitwise logical operations: `A logicop B`, `A logicop s`, `s logicop A`, `~A`, where *logicop* is
|
||||||
|
* one of `&`, `|`, `^`.
|
||||||
|
* Element-wise minimum and maximum: `min(A, B)`, `min(A, alpha)`, `max(A, B)`, `max(A, alpha)`
|
||||||
|
* Element-wise absolute value: `abs(A)`
|
||||||
|
* Cross-product, dot-product: `A.cross(B)`, `A.dot(B)`
|
||||||
|
* Any function of matrix or matrices and scalars that returns a matrix or a scalar, such as norm,
|
||||||
|
* mean, sum, countNonZero, trace, determinant, repeat, and others.
|
||||||
|
* Matrix initializers ( [Mat::eye()](#d3/d63/classcv_1_1Mat_1a2cf9b9acde7a9852542bbc20ef851ed2}),
|
||||||
|
* [Mat::zeros()](#d3/d63/classcv_1_1Mat_1a0b57b6a326c8876d944d188a46e0f556}),
|
||||||
|
* [Mat::ones()](#d3/d63/classcv_1_1Mat_1a69ae0402d116fc9c71908d8508dc2f09}) ), matrix comma-separated
|
||||||
|
* initializers, matrix constructors and operators that extract sub-matrices (see
|
||||||
|
* [Mat](#d3/d63/classcv_1_1Mat}) description).
|
||||||
|
* Mat_<destination_type>() constructors to cast the result to the proper type.
|
||||||
|
*
|
||||||
|
* Comma-separated initializers and probably some other operations may require additional explicit
|
||||||
|
* Mat() or Mat_<T>() constructor calls to resolve a possible ambiguity.
|
||||||
|
* Here are examples of matrix expressions:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* // compute pseudo-inverse of A, equivalent to A.inv(DECOMP_SVD)
|
||||||
|
* SVD svd(A);
|
||||||
|
* Mat pinvA = svd.vt.t()*Mat::diag(1./svd.w)*svd.u.t();
|
||||||
|
*
|
||||||
|
* // compute the new vector of parameters in the Levenberg-Marquardt algorithm
|
||||||
|
* x -= (A.t()*A + lambda*Mat::eye(A.cols,A.cols,A.type())).inv(DECOMP_CHOLESKY)*(A.t()*err);
|
||||||
|
*
|
||||||
|
* // sharpen image using "unsharp mask" algorithm
|
||||||
|
* Mat blurred; double sigma = 1, threshold = 5, amount = 1;
|
||||||
|
* GaussianBlur(img, blurred, Size(), sigma, sigma);
|
||||||
|
* Mat lowContrastMask = abs(img - blurred) < threshold;
|
||||||
|
* Mat sharpened = img*(1+amount) + blurred*(-amount);
|
||||||
|
* img.copyTo(sharpened, lowContrastMask);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core/mat.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/mat.hpp#L3557).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class MatExpr extends Mat {
|
||||||
|
public a: Mat;
|
||||||
|
|
||||||
|
public alpha: double;
|
||||||
|
|
||||||
|
public b: Mat;
|
||||||
|
|
||||||
|
public beta: double;
|
||||||
|
|
||||||
|
public c: Mat;
|
||||||
|
|
||||||
|
public flags: int;
|
||||||
|
|
||||||
|
public op: MatOp;
|
||||||
|
|
||||||
|
public s: Scalar;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(m: Mat);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
_op: MatOp,
|
||||||
|
_flags: int,
|
||||||
|
_a?: Mat,
|
||||||
|
_b?: Mat,
|
||||||
|
_c?: Mat,
|
||||||
|
_alpha?: double,
|
||||||
|
_beta?: double,
|
||||||
|
_s?: Scalar,
|
||||||
|
);
|
||||||
|
|
||||||
|
public col(x: int): MatExpr;
|
||||||
|
|
||||||
|
public cross(m: Mat): Mat;
|
||||||
|
|
||||||
|
public diag(d?: int): MatExpr;
|
||||||
|
|
||||||
|
public dot(m: Mat): Mat;
|
||||||
|
|
||||||
|
public inv(method?: int): MatExpr;
|
||||||
|
|
||||||
|
public mul(e: MatExpr, scale?: double): MatExpr;
|
||||||
|
|
||||||
|
public mul(m: Mat, scale?: double): MatExpr;
|
||||||
|
|
||||||
|
public row(y: int): MatExpr;
|
||||||
|
public t(): MatExpr;
|
||||||
|
|
||||||
|
public type(): int;
|
||||||
|
}
|
||||||
70
opencv-js-4.10.0/src/types/opencv/MatOp.ts
Normal file
70
opencv-js-4.10.0/src/types/opencv/MatOp.ts
Normal file
|
|
@ -0,0 +1,70 @@
|
||||||
|
import type { double, int, Mat, MatExpr, Scalar, Size } from "./_types";
|
||||||
|
|
||||||
|
export declare class MatOp {
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public abs(expr: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public add(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public add(expr1: MatExpr, s: Scalar, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public assign(expr: MatExpr, m: Mat, type?: int): MatExpr;
|
||||||
|
|
||||||
|
public augAssignAdd(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignAnd(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignDivide(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignMultiply(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignOr(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignSubtract(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public augAssignXor(expr: MatExpr, m: Mat): MatExpr;
|
||||||
|
|
||||||
|
public diag(expr: MatExpr, d: int, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public divide(
|
||||||
|
expr1: MatExpr,
|
||||||
|
expr2: MatExpr,
|
||||||
|
res: MatExpr,
|
||||||
|
scale?: double,
|
||||||
|
): MatExpr;
|
||||||
|
|
||||||
|
public divide(s: double, expr: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public elementWise(expr: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public invert(expr: MatExpr, method: int, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public matmul(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public multiply(
|
||||||
|
expr1: MatExpr,
|
||||||
|
expr2: MatExpr,
|
||||||
|
res: MatExpr,
|
||||||
|
scale?: double,
|
||||||
|
): MatExpr;
|
||||||
|
|
||||||
|
public multiply(expr1: MatExpr, s: double, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public roi(
|
||||||
|
expr: MatExpr,
|
||||||
|
rowRange: Range,
|
||||||
|
colRange: Range,
|
||||||
|
res: MatExpr,
|
||||||
|
): MatExpr;
|
||||||
|
|
||||||
|
public size(expr: MatExpr): Size;
|
||||||
|
|
||||||
|
public subtract(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public subtract(s: Scalar, expr: MatExpr, res: MatExpr): Scalar;
|
||||||
|
|
||||||
|
public transpose(expr: MatExpr, res: MatExpr): MatExpr;
|
||||||
|
|
||||||
|
public type(expr: MatExpr): MatExpr;
|
||||||
|
}
|
||||||
228
opencv-js-4.10.0/src/types/opencv/Matx.ts
Normal file
228
opencv-js-4.10.0/src/types/opencv/Matx.ts
Normal file
|
|
@ -0,0 +1,228 @@
|
||||||
|
import type {
|
||||||
|
diag_type,
|
||||||
|
int,
|
||||||
|
Matx_AddOp,
|
||||||
|
Matx_DivOp,
|
||||||
|
Matx_MatMulOp,
|
||||||
|
Matx_MulOp,
|
||||||
|
Matx_ScaleOp,
|
||||||
|
Matx_SubOp,
|
||||||
|
Matx_TOp,
|
||||||
|
Vec,
|
||||||
|
_T2,
|
||||||
|
_Tp,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If you need a more flexible type, use [Mat](#d3/d63/classcv_1_1Mat}) . The elements of the matrix M
|
||||||
|
* are accessible using the M(i,j) notation. Most of the common matrix operations (see also
|
||||||
|
* [MatrixExpressions](#d1/d10/classcv_1_1MatExpr_1MatrixExpressions}) ) are available. To do an
|
||||||
|
* operation on [Matx](#de/de1/classcv_1_1Matx}) that is not implemented, you can easily convert the
|
||||||
|
* matrix to [Mat](#d3/d63/classcv_1_1Mat}) and backwards:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Matx33f m(1, 2, 3,
|
||||||
|
* 4, 5, 6,
|
||||||
|
* 7, 8, 9);
|
||||||
|
* cout << sum(Mat(m*m.t())) << endl;
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Except of the plain constructor which takes a list of elements, [Matx](#de/de1/classcv_1_1Matx})
|
||||||
|
* can be initialized from a C-array:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* float values[] = { 1, 2, 3};
|
||||||
|
* Matx31f m(values);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* In case if C++11 features are available, std::initializer_list can be also used to initialize
|
||||||
|
* [Matx](#de/de1/classcv_1_1Matx}):
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Matx31f m = { 1, 2, 3};
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core/matx.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/matx.hpp#L1185).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class Matx {
|
||||||
|
public val: _Tp;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(v0: _Tp);
|
||||||
|
|
||||||
|
public constructor(v0: _Tp, v1: _Tp);
|
||||||
|
|
||||||
|
public constructor(v0: _Tp, v1: _Tp, v2: _Tp);
|
||||||
|
|
||||||
|
public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp);
|
||||||
|
|
||||||
|
public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp, v4: _Tp);
|
||||||
|
|
||||||
|
public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp, v4: _Tp, v5: _Tp);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
v8: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
v8: _Tp,
|
||||||
|
v9: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
v8: _Tp,
|
||||||
|
v9: _Tp,
|
||||||
|
v10: _Tp,
|
||||||
|
v11: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
v8: _Tp,
|
||||||
|
v9: _Tp,
|
||||||
|
v10: _Tp,
|
||||||
|
v11: _Tp,
|
||||||
|
v12: _Tp,
|
||||||
|
v13: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(
|
||||||
|
v0: _Tp,
|
||||||
|
v1: _Tp,
|
||||||
|
v2: _Tp,
|
||||||
|
v3: _Tp,
|
||||||
|
v4: _Tp,
|
||||||
|
v5: _Tp,
|
||||||
|
v6: _Tp,
|
||||||
|
v7: _Tp,
|
||||||
|
v8: _Tp,
|
||||||
|
v9: _Tp,
|
||||||
|
v10: _Tp,
|
||||||
|
v11: _Tp,
|
||||||
|
v12: _Tp,
|
||||||
|
v13: _Tp,
|
||||||
|
v14: _Tp,
|
||||||
|
v15: _Tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
public constructor(vals: any);
|
||||||
|
|
||||||
|
public constructor(arg334: any);
|
||||||
|
|
||||||
|
public constructor(a: Matx, b: Matx, arg335: Matx_AddOp);
|
||||||
|
|
||||||
|
public constructor(a: Matx, b: Matx, arg336: Matx_SubOp);
|
||||||
|
|
||||||
|
public constructor(arg337: any, a: Matx, alpha: _T2, arg338: Matx_ScaleOp);
|
||||||
|
|
||||||
|
public constructor(a: Matx, b: Matx, arg339: Matx_MulOp);
|
||||||
|
|
||||||
|
public constructor(a: Matx, b: Matx, arg340: Matx_DivOp);
|
||||||
|
|
||||||
|
public constructor(l: int, a: Matx, b: Matx, arg341: Matx_MatMulOp);
|
||||||
|
|
||||||
|
public constructor(a: Matx, arg342: Matx_TOp);
|
||||||
|
|
||||||
|
public col(i: int): Matx;
|
||||||
|
|
||||||
|
public ddot(v: Matx): Matx;
|
||||||
|
|
||||||
|
public diag(): diag_type;
|
||||||
|
|
||||||
|
public div(a: Matx): Matx;
|
||||||
|
|
||||||
|
public dot(v: Matx): Matx;
|
||||||
|
|
||||||
|
public get_minor(m1: int, n1: int, base_row: int, base_col: int): Matx;
|
||||||
|
|
||||||
|
public inv(method?: int, p_is_ok?: any): Matx;
|
||||||
|
|
||||||
|
public mul(a: Matx): Matx;
|
||||||
|
|
||||||
|
public reshape(m1: int, n1: int): Matx;
|
||||||
|
|
||||||
|
public row(i: int): Matx;
|
||||||
|
|
||||||
|
public solve(l: int, rhs: Matx, flags?: int): Matx;
|
||||||
|
|
||||||
|
public solve(rhs: Vec, method: int): Vec;
|
||||||
|
|
||||||
|
public t(): Matx;
|
||||||
|
|
||||||
|
public static all(alpha: _Tp): Matx;
|
||||||
|
|
||||||
|
public static diag(d: diag_type): Matx;
|
||||||
|
|
||||||
|
public static eye(): Matx;
|
||||||
|
|
||||||
|
public static ones(): Matx;
|
||||||
|
|
||||||
|
public static randn(a: _Tp, b: _Tp): Matx;
|
||||||
|
|
||||||
|
public static randu(a: _Tp, b: _Tp): Matx;
|
||||||
|
|
||||||
|
public static zeros(): Matx;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare const rows: any; // initializer: = m
|
||||||
|
|
||||||
|
export declare const cols: any; // initializer: = n
|
||||||
|
|
||||||
|
export declare const channels: any; // initializer: = rows*cols
|
||||||
|
|
||||||
|
export declare const shortdim: any; // initializer: = (m < n ? m : n)
|
||||||
33
opencv-js-4.10.0/src/types/opencv/Node.ts
Normal file
33
opencv-js-4.10.0/src/types/opencv/Node.ts
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
import type { double, int } from "./_types";
|
||||||
|
|
||||||
|
export declare class Node {
|
||||||
|
/**
|
||||||
|
* Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally
|
||||||
|
* in classification trees and tree ensembles.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public classIdx: int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default direction where to go (-1: left or +1: right). It helps in the case of missing values.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public defaultDir: int;
|
||||||
|
|
||||||
|
public left: int;
|
||||||
|
|
||||||
|
public parent: int;
|
||||||
|
|
||||||
|
public right: int;
|
||||||
|
|
||||||
|
public split: int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Value at the node: a class label in case of classification or estimated function value in case of
|
||||||
|
* regression.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public value: double;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
}
|
||||||
22
opencv-js-4.10.0/src/types/opencv/ORB.ts
Normal file
22
opencv-js-4.10.0/src/types/opencv/ORB.ts
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
import type { Feature2D, float, int } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://docs.opencv.org/4.10.0/db/d95/classcv_1_1ORB.html
|
||||||
|
*/
|
||||||
|
export declare class ORB extends Feature2D {
|
||||||
|
public constructor(
|
||||||
|
nfeatures?: int,
|
||||||
|
scaleFactor?: float,
|
||||||
|
nlevels?: int,
|
||||||
|
edgeThreshold?: int,
|
||||||
|
firstLevel?: int,
|
||||||
|
WTA_K?: int,
|
||||||
|
scoreType?: ORBScoreType,
|
||||||
|
patchSize?: int,
|
||||||
|
fastThreshold?: int,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
type ORBScoreType = int;
|
||||||
|
export declare const ORB_HARRIS_SCORE: ORBScoreType;
|
||||||
|
export declare const ORB_FAST_SCORE: ORBScoreType;
|
||||||
204
opencv-js-4.10.0/src/types/opencv/PCA.ts
Normal file
204
opencv-js-4.10.0/src/types/opencv/PCA.ts
Normal file
|
|
@ -0,0 +1,204 @@
|
||||||
|
import type {
|
||||||
|
double,
|
||||||
|
FileNode,
|
||||||
|
FileStorage,
|
||||||
|
InputArray,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
OutputArray,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The class is used to calculate a special basis for a set of vectors. The basis will consist of
|
||||||
|
* eigenvectors of the covariance matrix calculated from the input set of vectors. The class PCA can
|
||||||
|
* also transform vectors to/from the new coordinate space defined by the basis. Usually, in this new
|
||||||
|
* coordinate system, each vector from the original set (and any linear combination of such vectors)
|
||||||
|
* can be quite accurately approximated by taking its first few components, corresponding to the
|
||||||
|
* eigenvectors of the largest eigenvalues of the covariance matrix. Geometrically it means that you
|
||||||
|
* calculate a projection of the vector to a subspace formed by a few eigenvectors corresponding to the
|
||||||
|
* dominant eigenvalues of the covariance matrix. And usually such a projection is very close to the
|
||||||
|
* original vector. So, you can represent the original vector from a high-dimensional space with a much
|
||||||
|
* shorter vector consisting of the projected vector's coordinates in the subspace. Such a
|
||||||
|
* transformation is also known as Karhunen-Loeve Transform, or KLT. See
|
||||||
|
*
|
||||||
|
* The sample below is the function that takes two matrices. The first function stores a set of vectors
|
||||||
|
* (a row per vector) that is used to calculate [PCA](#d3/d8d/classcv_1_1PCA}). The second function
|
||||||
|
* stores another "test" set of vectors (a row per vector). First, these vectors are compressed with
|
||||||
|
* [PCA](#d3/d8d/classcv_1_1PCA}), then reconstructed back, and then the reconstruction error norm is
|
||||||
|
* computed and printed for each vector. :
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* using namespace cv;
|
||||||
|
*
|
||||||
|
* PCA compressPCA(const Mat& pcaset, int maxComponents,
|
||||||
|
* const Mat& testset, Mat& compressed)
|
||||||
|
* {
|
||||||
|
* PCA pca(pcaset, // pass the data
|
||||||
|
* Mat(), // we do not have a pre-computed mean vector,
|
||||||
|
* // so let the PCA engine to compute it
|
||||||
|
* PCA::DATA_AS_ROW, // indicate that the vectors
|
||||||
|
* // are stored as matrix rows
|
||||||
|
* // (use PCA::DATA_AS_COL if the vectors are
|
||||||
|
* // the matrix columns)
|
||||||
|
* maxComponents // specify, how many principal components to retain
|
||||||
|
* );
|
||||||
|
* // if there is no test data, just return the computed basis, ready-to-use
|
||||||
|
* if( !testset.data )
|
||||||
|
* return pca;
|
||||||
|
* CV_Assert( testset.cols == pcaset.cols );
|
||||||
|
*
|
||||||
|
* compressed.create(testset.rows, maxComponents, testset.type());
|
||||||
|
*
|
||||||
|
* Mat reconstructed;
|
||||||
|
* for( int i = 0; i < testset.rows; i++ )
|
||||||
|
* {
|
||||||
|
* Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed;
|
||||||
|
* // compress the vector, the result will be stored
|
||||||
|
* // in the i-th row of the output matrix
|
||||||
|
* pca.project(vec, coeffs);
|
||||||
|
* // and then reconstruct it
|
||||||
|
* pca.backProject(coeffs, reconstructed);
|
||||||
|
* // and measure the error
|
||||||
|
* printf("%d. diff = %g\\n", i, norm(vec, reconstructed, NORM_L2));
|
||||||
|
* }
|
||||||
|
* return pca;
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* [calcCovarMatrix](#d2/de8/group__core__array_1gae6ffa9354633f984246945d52823165d}),
|
||||||
|
* [mulTransposed](#d2/de8/group__core__array_1gadc4e49f8f7a155044e3be1b9e3b270ab}),
|
||||||
|
* [SVD](#df/df7/classcv_1_1SVD}),
|
||||||
|
* [dft](#d2/de8/group__core__array_1gadd6cf9baf2b8b704a11b5f04aaf4f39d}),
|
||||||
|
* [dct](#d2/de8/group__core__array_1ga85aad4d668c01fbd64825f589e3696d4})
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L2393).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class PCA {
|
||||||
|
public eigenvalues: Mat;
|
||||||
|
|
||||||
|
public eigenvectors: Mat;
|
||||||
|
|
||||||
|
public mean: Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The default constructor initializes an empty PCA structure. The other constructors initialize the
|
||||||
|
* structure and call [PCA::operator()()].
|
||||||
|
*/
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param data input samples stored as matrix rows or matrix columns.
|
||||||
|
*
|
||||||
|
* @param mean optional mean value; if the matrix is empty (noArray()), the mean is computed from the
|
||||||
|
* data.
|
||||||
|
*
|
||||||
|
* @param flags operation flags; currently the parameter is only used to specify the data layout
|
||||||
|
* (PCA::Flags)
|
||||||
|
*
|
||||||
|
* @param maxComponents maximum number of components that PCA should retain; by default, all the
|
||||||
|
* components are retained.
|
||||||
|
*/
|
||||||
|
public constructor(
|
||||||
|
data: InputArray,
|
||||||
|
mean: InputArray,
|
||||||
|
flags: int,
|
||||||
|
maxComponents?: int,
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param data input samples stored as matrix rows or matrix columns.
|
||||||
|
*
|
||||||
|
* @param mean optional mean value; if the matrix is empty (noArray()), the mean is computed from the
|
||||||
|
* data.
|
||||||
|
*
|
||||||
|
* @param flags operation flags; currently the parameter is only used to specify the data layout
|
||||||
|
* (PCA::Flags)
|
||||||
|
*
|
||||||
|
* @param retainedVariance Percentage of variance that PCA should retain. Using this parameter will
|
||||||
|
* let the PCA decided how many components to retain but it will always keep at least 2.
|
||||||
|
*/
|
||||||
|
public constructor(
|
||||||
|
data: InputArray,
|
||||||
|
mean: InputArray,
|
||||||
|
flags: int,
|
||||||
|
retainedVariance: double,
|
||||||
|
);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The methods are inverse operations to [PCA::project]. They take PC coordinates of projected
|
||||||
|
* vectors and reconstruct the original vectors. Unless all the principal components have been
|
||||||
|
* retained, the reconstructed vectors are different from the originals. But typically, the difference
|
||||||
|
* is small if the number of components is large enough (but still much smaller than the original
|
||||||
|
* vector dimensionality). As a result, [PCA] is used.
|
||||||
|
*
|
||||||
|
* @param vec coordinates of the vectors in the principal component subspace, the layout and size are
|
||||||
|
* the same as of PCA::project output vectors.
|
||||||
|
*/
|
||||||
|
public backProject(vec: InputArray): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param vec coordinates of the vectors in the principal component subspace, the layout and size are
|
||||||
|
* the same as of PCA::project output vectors.
|
||||||
|
*
|
||||||
|
* @param result reconstructed vectors; the layout and size are the same as of PCA::project input
|
||||||
|
* vectors.
|
||||||
|
*/
|
||||||
|
public backProject(vec: InputArray, result: OutputArray): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The methods project one or more vectors to the principal component subspace, where each vector
|
||||||
|
* projection is represented by coefficients in the principal component basis. The first form of the
|
||||||
|
* method returns the matrix that the second form writes to the result. So the first form can be used
|
||||||
|
* as a part of expression while the second form can be more efficient in a processing loop.
|
||||||
|
*
|
||||||
|
* @param vec input vector(s); must have the same dimensionality and the same layout as the input
|
||||||
|
* data used at PCA phase, that is, if DATA_AS_ROW are specified, then vec.cols==data.cols (vector
|
||||||
|
* dimensionality) and vec.rows is the number of vectors to project, and the same is true for the
|
||||||
|
* PCA::DATA_AS_COL case.
|
||||||
|
*/
|
||||||
|
public project(vec: InputArray): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above
|
||||||
|
* function only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param vec input vector(s); must have the same dimensionality and the same layout as the input
|
||||||
|
* data used at PCA phase, that is, if DATA_AS_ROW are specified, then vec.cols==data.cols (vector
|
||||||
|
* dimensionality) and vec.rows is the number of vectors to project, and the same is true for the
|
||||||
|
* PCA::DATA_AS_COL case.
|
||||||
|
*
|
||||||
|
* @param result output vectors; in case of PCA::DATA_AS_COL, the output matrix has as many columns
|
||||||
|
* as the number of input vectors, this means that result.cols==vec.cols and the number of rows match
|
||||||
|
* the number of principal components (for example, maxComponents parameter passed to the constructor).
|
||||||
|
*/
|
||||||
|
public project(vec: InputArray, result: OutputArray): InputArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads [eigenvalues] [eigenvectors] and [mean] from specified [FileNode]
|
||||||
|
*/
|
||||||
|
public read(fn: FileNode): FileNode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes [eigenvalues] [eigenvectors] and [mean] to specified [FileStorage]
|
||||||
|
*/
|
||||||
|
public write(fs: FileStorage): FileStorage;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare const DATA_AS_ROW: Flags; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const DATA_AS_COL: Flags; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const USE_AVG: Flags; // initializer: = 2
|
||||||
|
|
||||||
|
export type Flags = any;
|
||||||
72
opencv-js-4.10.0/src/types/opencv/RotatedRect.ts
Normal file
72
opencv-js-4.10.0/src/types/opencv/RotatedRect.ts
Normal file
|
|
@ -0,0 +1,72 @@
|
||||||
|
import type { float, Point2f, Rect, Rect_, Size2f } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Each rectangle is specified by the center point (mass center), length of each side (represented by
|
||||||
|
* [Size2f](#dc/d84/group__core__basic_1gab34496d2466b5f69930ab74c70f117d4}) structure) and the
|
||||||
|
* rotation angle in degrees.
|
||||||
|
*
|
||||||
|
* The sample below demonstrates how to use [RotatedRect](#db/dd6/classcv_1_1RotatedRect}):
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Mat test_image(200, 200, CV_8UC3, Scalar(0));
|
||||||
|
* RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
|
||||||
|
*
|
||||||
|
* Point2f vertices[4];
|
||||||
|
* rRect.points(vertices);
|
||||||
|
* for (int i = 0; i < 4; i++)
|
||||||
|
* line(test_image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0), 2);
|
||||||
|
*
|
||||||
|
* Rect brect = rRect.boundingRect();
|
||||||
|
* rectangle(test_image, brect, Scalar(255,0,0), 2);
|
||||||
|
*
|
||||||
|
* imshow("rectangles", test_image);
|
||||||
|
* waitKey(0);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* [CamShift](#dc/d6b/group__video__track_1gaef2bd39c8356f423124f1fe7c44d54a1}),
|
||||||
|
* [fitEllipse](#d3/dc0/group__imgproc__shape_1gaf259efaad93098103d6c27b9e4900ffa}),
|
||||||
|
* [minAreaRect](#d3/dc0/group__imgproc__shape_1ga3d476a3417130ae5154aea421ca7ead9}), CvBox2D
|
||||||
|
*
|
||||||
|
* Source:
|
||||||
|
* [opencv2/core/types.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/types.hpp#L534).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare class RotatedRect {
|
||||||
|
public angle: float;
|
||||||
|
|
||||||
|
public center: Point2f;
|
||||||
|
|
||||||
|
public size: Size2f;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* full constructor
|
||||||
|
*
|
||||||
|
* @param center The rectangle mass center.
|
||||||
|
*
|
||||||
|
* @param size Width and height of the rectangle.
|
||||||
|
*
|
||||||
|
* @param angle The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc.,
|
||||||
|
* the rectangle becomes an up-right rectangle.
|
||||||
|
*/
|
||||||
|
public constructor(center: Point2f, size: Size2f, angle: float);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Any 3 end points of the [RotatedRect]. They must be given in order (either clockwise or
|
||||||
|
* anticlockwise).
|
||||||
|
*/
|
||||||
|
public constructor(point1: Point2f, point2: Point2f, point3: Point2f);
|
||||||
|
|
||||||
|
public boundingRect(): Rect;
|
||||||
|
|
||||||
|
public boundingRect2f(): Rect_;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns 4 vertices of the rectangle
|
||||||
|
*
|
||||||
|
* @param pts The points array for storing rectangle vertices. The order is bottomLeft, topLeft,
|
||||||
|
* topRight, bottomRight.
|
||||||
|
*/
|
||||||
|
public points(pts: Point2f): Point2f;
|
||||||
|
}
|
||||||
1
opencv-js-4.10.0/src/types/opencv/Tracker.ts
Normal file
1
opencv-js-4.10.0/src/types/opencv/Tracker.ts
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
export declare class Tracker {}
|
||||||
3
opencv-js-4.10.0/src/types/opencv/TrackerMIL.ts
Normal file
3
opencv-js-4.10.0/src/types/opencv/TrackerMIL.ts
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
import type { Tracker } from "./_types";
|
||||||
|
|
||||||
|
export declare class TrackerMIL extends Tracker {}
|
||||||
327
opencv-js-4.10.0/src/types/opencv/_hacks.ts
Normal file
327
opencv-js-4.10.0/src/types/opencv/_hacks.ts
Normal file
|
|
@ -0,0 +1,327 @@
|
||||||
|
// Scalar, Point, Rect, etc are defined by opencv.js (helpers.js) and we need to declare them manually:
|
||||||
|
|
||||||
|
export declare class Range {
|
||||||
|
public start: number;
|
||||||
|
public end: number;
|
||||||
|
public constructor(start: number, end: number);
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class Scalar extends Array<number> {
|
||||||
|
public static all(...v: number[]): Scalar;
|
||||||
|
}
|
||||||
|
// Hack: expose Mat super classes like Mat_, InputArray, Vector, OutputArray we make them alias of Mat to simplify and make it work
|
||||||
|
export { Mat as InputArray };
|
||||||
|
export { Mat as InputOutputArray };
|
||||||
|
export { Mat as OutputArray };
|
||||||
|
export { MatVector as InputArrayOfArrays };
|
||||||
|
export { MatVector as InputOutputArrayOfArrays };
|
||||||
|
export { MatVector as OutputArrayOfArrays };
|
||||||
|
export { Scalar as GScalar };
|
||||||
|
export { Point as Point2f };
|
||||||
|
export { Point as KeyPoint };
|
||||||
|
export { Point as Point2l };
|
||||||
|
export { Size as Point2d };
|
||||||
|
export { Size as Size2d };
|
||||||
|
export { Size as Size2f };
|
||||||
|
export { Size as Size2l };
|
||||||
|
export { Rect as Rect_ };
|
||||||
|
|
||||||
|
export declare class Point {
|
||||||
|
public constructor(x: number, y: number);
|
||||||
|
public x: number;
|
||||||
|
public y: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class Circle {
|
||||||
|
public constructor(center: Point, radius: number);
|
||||||
|
public center: Point;
|
||||||
|
public radius: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class Size {
|
||||||
|
public constructor(width: number, height: number);
|
||||||
|
public width: number;
|
||||||
|
public height: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class Rect {
|
||||||
|
public constructor();
|
||||||
|
public constructor(point: Point, size: Size);
|
||||||
|
public constructor(x: number, y: number, width: number, height: number);
|
||||||
|
public x: number;
|
||||||
|
public y: number;
|
||||||
|
public width: number;
|
||||||
|
public height: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class TermCriteria {
|
||||||
|
public type: number;
|
||||||
|
public maxCount: number;
|
||||||
|
public epsilon: number;
|
||||||
|
public constructor();
|
||||||
|
public constructor(type: number, maxCount: number, epsilon: number);
|
||||||
|
}
|
||||||
|
export declare const TermCriteria_EPS: any;
|
||||||
|
export declare const TermCriteria_COUNT: any;
|
||||||
|
export declare const TermCriteria_MAX_ITER: any;
|
||||||
|
|
||||||
|
export declare class MinMaxLoc {
|
||||||
|
public minVal: number;
|
||||||
|
public maxVal: number;
|
||||||
|
public minLoc: Point;
|
||||||
|
public maxLoc: Point;
|
||||||
|
public constructor();
|
||||||
|
public constructor(
|
||||||
|
minVal: number,
|
||||||
|
maxVal: number,
|
||||||
|
minLoc: Point,
|
||||||
|
maxLoc: Point,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// expose emscripten / opencv.js specifics
|
||||||
|
|
||||||
|
export declare function exceptionFromPtr(err: number): any;
|
||||||
|
export declare function onRuntimeInitialized(): any;
|
||||||
|
export declare function FS_createDataFile(
|
||||||
|
arg0: string,
|
||||||
|
path: string,
|
||||||
|
data: Uint8Array,
|
||||||
|
arg3: boolean,
|
||||||
|
arg4: boolean,
|
||||||
|
arg5: boolean,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
import { Algorithm, type LineTypes, Mat, type NormTypes, RotatedRect } from ".";
|
||||||
|
import "../_cv";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for Contrast Limited Adaptive Histogram Equalization.
|
||||||
|
*/
|
||||||
|
export declare class CLAHE extends Algorithm {
|
||||||
|
/**
|
||||||
|
* @param clipLimit Threshold for contrast limiting. Default. 40.0,
|
||||||
|
* @param totalGridSize Size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. Default: Size(8, 8)
|
||||||
|
*/
|
||||||
|
constructor(clipLimit?: double, totalGridSize?: Size);
|
||||||
|
/**
|
||||||
|
* Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
|
||||||
|
* @param src Source image of type CV_8UC1 or CV_16UC1.
|
||||||
|
* @param dst Destination image.
|
||||||
|
*/
|
||||||
|
apply(src: Mat, dst: Mat): void;
|
||||||
|
collectGarbage(): void;
|
||||||
|
/**
|
||||||
|
* Returns threshold value for contrast limiting.
|
||||||
|
*/
|
||||||
|
getClipLimit(): double;
|
||||||
|
/**
|
||||||
|
* Returns Size defines the number of tiles in row and column.
|
||||||
|
*/
|
||||||
|
getTilesGridSize(): Size;
|
||||||
|
/**
|
||||||
|
* Sets threshold for contrast limiting.
|
||||||
|
*/
|
||||||
|
setClipLimit(clipLimit: double): void;
|
||||||
|
/**
|
||||||
|
* Sets size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles.
|
||||||
|
* @param tileGridSize defines the number of tiles in row and column.
|
||||||
|
*/
|
||||||
|
setTilesGridSize(tileGridSize: Size): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
// emscripten embind internals
|
||||||
|
export declare function getInheritedInstanceCount(...a: any[]): any;
|
||||||
|
export declare function getLiveInheritedInstances(...a: any[]): any;
|
||||||
|
export declare function flushPendingDeletes(...a: any[]): any;
|
||||||
|
export declare function setDelayFunction(...a: any[]): any;
|
||||||
|
|
||||||
|
export declare class EmscriptenEmbindInstance {
|
||||||
|
isAliasOf(other: any): bool;
|
||||||
|
clone(): any;
|
||||||
|
delete(): any;
|
||||||
|
isDeleted(): boolean;
|
||||||
|
deleteLater(): any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class InternalError extends Error {}
|
||||||
|
export declare class BindingError extends Error {}
|
||||||
|
export declare class UnboundTypeError extends Error {}
|
||||||
|
export declare class PureVirtualError extends Error {}
|
||||||
|
|
||||||
|
export declare class Vector<T> extends EmscriptenEmbindInstance {
|
||||||
|
get(i: number): T;
|
||||||
|
get(i: number, j: number, data: any): T;
|
||||||
|
set(i: number, t: T): void;
|
||||||
|
put(i: number, j: number, data: any): any;
|
||||||
|
size(): number;
|
||||||
|
push_back(n: T): any;
|
||||||
|
resize(count: number, value?: T): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class Vec3d extends Vector<any> {}
|
||||||
|
export declare class IntVector extends Vector<number> {}
|
||||||
|
export declare class FloatVector extends Vector<number> {}
|
||||||
|
export declare class DoubleVector extends Vector<number> {}
|
||||||
|
export declare class PointVector extends Vector<Point> {}
|
||||||
|
export declare class KeyPointVector extends Vector<any> {}
|
||||||
|
export declare class DMatchVector extends Vector<any> {}
|
||||||
|
export declare class DMatchVectorVector extends Vector<Vector<any>> {}
|
||||||
|
export declare class MatVector extends Vector<Mat> {}
|
||||||
|
|
||||||
|
export declare class RectVector extends Rect implements Vector<Rect> {
|
||||||
|
get(i: number): Rect;
|
||||||
|
isAliasOf(...a: any[]): any;
|
||||||
|
clone(...a: any[]): any;
|
||||||
|
delete(...a: any[]): any;
|
||||||
|
isDeleted(...a: any[]): any;
|
||||||
|
deleteLater(...a: any[]): any;
|
||||||
|
set(i: number, t: Rect): void;
|
||||||
|
put(i: number, j: number, data: any): any;
|
||||||
|
size(): number;
|
||||||
|
push_back(n: Rect): void;
|
||||||
|
resize(count: number, value?: Rect | undefined): void;
|
||||||
|
delete(): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export declare class VideoCapture {
|
||||||
|
public constructor(videoSource: HTMLVideoElement | string);
|
||||||
|
public read(m: Mat): any;
|
||||||
|
public video: HTMLVideoElement;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type MatSize = () => Size;
|
||||||
|
|
||||||
|
export declare function matFromImageData(imageData: ImageData): Mat;
|
||||||
|
export declare function matFromArray(
|
||||||
|
rows: number,
|
||||||
|
cols: number,
|
||||||
|
type: any,
|
||||||
|
array: number[] | ArrayBufferView,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
export declare class ImageData {
|
||||||
|
data: ArrayBufferView;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO this types should be exposed by the tool - want to make it work:
|
||||||
|
export declare const CV_8U: CVDataType;
|
||||||
|
export declare const CV_8UC1: CVDataType;
|
||||||
|
export declare const CV_8UC2: CVDataType;
|
||||||
|
export declare const CV_8UC3: CVDataType;
|
||||||
|
export declare const CV_8UC4: CVDataType;
|
||||||
|
export declare const CV_8S: CVDataType;
|
||||||
|
export declare const CV_8SC1: CVDataType;
|
||||||
|
export declare const CV_8SC2: CVDataType;
|
||||||
|
export declare const CV_8SC3: CVDataType;
|
||||||
|
export declare const CV_8SC4: CVDataType;
|
||||||
|
export declare const CV_16U: CVDataType;
|
||||||
|
export declare const CV_16UC1: CVDataType;
|
||||||
|
export declare const CV_16UC2: CVDataType;
|
||||||
|
export declare const CV_16UC3: CVDataType;
|
||||||
|
export declare const CV_16UC4: CVDataType;
|
||||||
|
export declare const CV_16S: CVDataType;
|
||||||
|
export declare const CV_16SC1: CVDataType;
|
||||||
|
export declare const CV_16SC2: CVDataType;
|
||||||
|
export declare const CV_16SC3: CVDataType;
|
||||||
|
export declare const CV_16SC4: CVDataType;
|
||||||
|
export declare const CV_32S: CVDataType;
|
||||||
|
export declare const CV_32SC1: CVDataType;
|
||||||
|
export declare const CV_32SC2: CVDataType;
|
||||||
|
export declare const CV_32SC3: CVDataType;
|
||||||
|
export declare const CV_32SC4: CVDataType;
|
||||||
|
export declare const CV_32F: CVDataType;
|
||||||
|
export declare const CV_32FC1: CVDataType;
|
||||||
|
export declare const CV_32FC2: CVDataType;
|
||||||
|
export declare const CV_32FC3: CVDataType;
|
||||||
|
export declare const CV_32FC4: CVDataType;
|
||||||
|
export declare const CV_64F: CVDataType;
|
||||||
|
export declare const CV_64FC1: CVDataType;
|
||||||
|
export declare const CV_64FC2: CVDataType;
|
||||||
|
export declare const CV_64FC3: CVDataType;
|
||||||
|
export declare const CV_64FC4: CVDataType;
|
||||||
|
|
||||||
|
export type CVDataType = any;
|
||||||
|
|
||||||
|
export declare function ellipse1(
|
||||||
|
dst: Mat,
|
||||||
|
rotatedRect: RotatedRect,
|
||||||
|
ellipseColor: Scalar,
|
||||||
|
arg0: number,
|
||||||
|
line: LineTypes,
|
||||||
|
): void;
|
||||||
|
export declare function imread(
|
||||||
|
canvasOrImageHtmlElement: HTMLElement | string,
|
||||||
|
): Mat;
|
||||||
|
export declare function norm1(a: Mat, b: Mat, type: NormTypes): number;
|
||||||
|
export declare function imshow(
|
||||||
|
canvasSource: HTMLElement | string,
|
||||||
|
mat: Mat,
|
||||||
|
): void;
|
||||||
|
export declare function matFromArray(
|
||||||
|
rows: number,
|
||||||
|
cols: number,
|
||||||
|
type: any,
|
||||||
|
array: any,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
// Missing imports:
|
||||||
|
export type Mat4 = any;
|
||||||
|
export type Mat3 = any;
|
||||||
|
export type Vec3 = any;
|
||||||
|
export type float_type = any;
|
||||||
|
export type int = number;
|
||||||
|
export type bool = boolean;
|
||||||
|
export type FileNode = any;
|
||||||
|
export type FileStorage = any;
|
||||||
|
export type Ptr = any;
|
||||||
|
export type size_t = any;
|
||||||
|
export type double = number;
|
||||||
|
export type float = number;
|
||||||
|
export type UMat = any;
|
||||||
|
export type Matrix = any;
|
||||||
|
export type BucketKey = any;
|
||||||
|
export type Bucket = any;
|
||||||
|
export type LshStats = any;
|
||||||
|
export type MatAllocator = any;
|
||||||
|
export type uchar = any;
|
||||||
|
export type MatStep = any;
|
||||||
|
export type UMatData = any;
|
||||||
|
export type typename = any;
|
||||||
|
export type Vec = any;
|
||||||
|
export type Point_ = any;
|
||||||
|
export type Point3_ = any;
|
||||||
|
export type MatCommaInitializer_ = any;
|
||||||
|
export type MatIterator_ = any;
|
||||||
|
export type MatConstIterator_ = any;
|
||||||
|
export type AccessFlag = any;
|
||||||
|
export type UMatUsageFlags = any;
|
||||||
|
export type _Tp = any;
|
||||||
|
export type Matx_AddOp = any;
|
||||||
|
export type Matx_SubOp = any;
|
||||||
|
export type _T2 = any;
|
||||||
|
export type Matx_ScaleOp = any;
|
||||||
|
export type Matx_MulOp = any;
|
||||||
|
export type Matx_DivOp = any;
|
||||||
|
export type Matx_MatMulOp = any;
|
||||||
|
export type Matx_TOp = any;
|
||||||
|
export type diag_type = any;
|
||||||
|
export type _EqPredicate = any;
|
||||||
|
export type cvhalDFT = any;
|
||||||
|
export type schar = any;
|
||||||
|
export type ushort = any;
|
||||||
|
export type short = any;
|
||||||
|
export type int64 = any;
|
||||||
|
export type ErrorCallback = any;
|
||||||
|
export type unsigned = any;
|
||||||
|
export type uint64 = any;
|
||||||
|
export type float16_t = any;
|
||||||
|
export type AsyncArray = any;
|
||||||
|
export type Net = any;
|
||||||
|
export type Moments = any;
|
||||||
|
export type uint64_t = any;
|
||||||
|
export type uint32_t = any;
|
||||||
|
export type int32_t = any;
|
||||||
|
export type int64_t = any;
|
||||||
47
opencv-js-4.10.0/src/types/opencv/_types.ts
Normal file
47
opencv-js-4.10.0/src/types/opencv/_types.ts
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
export * from "./Affine3";
|
||||||
|
export * from "./Algorithm";
|
||||||
|
export * from "./AutoBuffer";
|
||||||
|
export * from "./BFMatcher";
|
||||||
|
export * from "./BOWTrainer";
|
||||||
|
export * from "./calib3d";
|
||||||
|
export * from "./CascadeClassifier";
|
||||||
|
export * from "./core_array";
|
||||||
|
export * from "./core_cluster";
|
||||||
|
export * from "./core_hal_interface";
|
||||||
|
export * from "./core_utils";
|
||||||
|
export * from "./DescriptorMatcher";
|
||||||
|
export * from "./dnn";
|
||||||
|
export * from "./DynamicBitset";
|
||||||
|
export * from "./Exception";
|
||||||
|
export * from "./Feature2D";
|
||||||
|
export * from "./features2d_draw";
|
||||||
|
export * from "./fisheye";
|
||||||
|
export * from "./FlannBasedMatcher";
|
||||||
|
export * from "./HOGDescriptor";
|
||||||
|
export * from "./imgproc_color_conversions";
|
||||||
|
export * from "./imgproc_draw";
|
||||||
|
export * from "./imgproc_feature";
|
||||||
|
export * from "./imgproc_filter";
|
||||||
|
export * from "./imgproc_hist";
|
||||||
|
export * from "./imgproc_misc";
|
||||||
|
export * from "./imgproc_object";
|
||||||
|
export * from "./imgproc_shape";
|
||||||
|
export * from "./imgproc_transform";
|
||||||
|
export * from "./Logger";
|
||||||
|
export * from "./LshTable";
|
||||||
|
export * from "./Mat";
|
||||||
|
export * from "./MatExpr";
|
||||||
|
export * from "./MatOp";
|
||||||
|
export * from "./Matx";
|
||||||
|
export * from "./Node";
|
||||||
|
export * from "./objdetect";
|
||||||
|
export * from "./ORB";
|
||||||
|
export * from "./PCA";
|
||||||
|
export * from "./photo_inpaint";
|
||||||
|
export * from "./RotatedRect";
|
||||||
|
export * from "./softdouble";
|
||||||
|
export * from "./softfloat";
|
||||||
|
export * from "./video_track";
|
||||||
|
export * from "./_hacks";
|
||||||
|
export * from "./Tracker";
|
||||||
|
export * from "./TrackerMIL";
|
||||||
2937
opencv-js-4.10.0/src/types/opencv/calib3d.ts
Normal file
2937
opencv-js-4.10.0/src/types/opencv/calib3d.ts
Normal file
File diff suppressed because it is too large
Load diff
3102
opencv-js-4.10.0/src/types/opencv/core_array.ts
Normal file
3102
opencv-js-4.10.0/src/types/opencv/core_array.ts
Normal file
File diff suppressed because it is too large
Load diff
81
opencv-js-4.10.0/src/types/opencv/core_cluster.ts
Normal file
81
opencv-js-4.10.0/src/types/opencv/core_cluster.ts
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
import type {
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
OutputArray,
|
||||||
|
TermCriteria,
|
||||||
|
_EqPredicate,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Clustering
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters
|
||||||
|
* and groups the input samples around the clusters. As an output, `$\\texttt{bestLabels}_i$` contains
|
||||||
|
* a 0-based cluster index for the sample stored in the `$i^{th}$` row of the samples matrix.
|
||||||
|
*
|
||||||
|
* (Python) An example on K-means clustering can be found at
|
||||||
|
* opencv_source_code/samples/python/kmeans.py
|
||||||
|
*
|
||||||
|
* The function returns the compactness measure that is computed as `\\[\\sum _i \\| \\texttt{samples}
|
||||||
|
* _i - \\texttt{centers} _{ \\texttt{labels} _i} \\| ^2\\]` after every attempt. The best (minimum)
|
||||||
|
* value is chosen and the corresponding labels and the compactness value are returned by the function.
|
||||||
|
* Basically, you can use only the core of the function, set the number of attempts to 1, initialize
|
||||||
|
* labels each time using a custom algorithm, pass them with the ( flags = [KMEANS_USE_INITIAL_LABELS]
|
||||||
|
* ) flag, and then choose the best (most-compact) clustering.
|
||||||
|
*
|
||||||
|
* @param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
|
||||||
|
* Examples of this array can be:
|
||||||
|
* Mat points(count, 2, CV_32F);Mat points(count, 1, CV_32FC2);Mat points(1, count,
|
||||||
|
* CV_32FC2);std::vector<cv::Point2f> points(sampleCount);
|
||||||
|
*
|
||||||
|
* @param K Number of clusters to split the set by.
|
||||||
|
*
|
||||||
|
* @param bestLabels Input/output integer array that stores the cluster indices for every sample.
|
||||||
|
*
|
||||||
|
* @param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or
|
||||||
|
* the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster
|
||||||
|
* centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
|
||||||
|
*
|
||||||
|
* @param attempts Flag to specify the number of times the algorithm is executed using different
|
||||||
|
* initial labellings. The algorithm returns the labels that yield the best compactness (see the last
|
||||||
|
* function parameter).
|
||||||
|
*
|
||||||
|
* @param flags Flag that can take values of cv::KmeansFlags
|
||||||
|
*
|
||||||
|
* @param centers Output matrix of the cluster centers, one row per each cluster center.
|
||||||
|
*/
|
||||||
|
export declare function kmeans(
|
||||||
|
data: InputArray,
|
||||||
|
K: int,
|
||||||
|
bestLabels: InputOutputArray,
|
||||||
|
criteria: TermCriteria,
|
||||||
|
attempts: int,
|
||||||
|
flags: int,
|
||||||
|
centers?: OutputArray,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The generic function partition implements an `$O(N^2)$` algorithm for splitting a set of `$N$`
|
||||||
|
* elements into one or more equivalency classes, as described in . The function returns the number of
|
||||||
|
* equivalency classes.
|
||||||
|
*
|
||||||
|
* @param _vec Set of elements stored as a vector.
|
||||||
|
*
|
||||||
|
* @param labels Output vector of labels. It contains as many elements as vec. Each label labels[i] is
|
||||||
|
* a 0-based cluster index of vec[i].
|
||||||
|
*
|
||||||
|
* @param predicate Equivalence predicate (pointer to a boolean function of two arguments or an
|
||||||
|
* instance of the class that has the method bool operator()(const _Tp& a, const _Tp& b) ). The
|
||||||
|
* predicate returns true when the elements are certainly in the same class, and returns false if they
|
||||||
|
* may or may not be in the same class.
|
||||||
|
*/
|
||||||
|
export declare function partition(
|
||||||
|
arg119: any,
|
||||||
|
arg120: any,
|
||||||
|
_vec: any,
|
||||||
|
labels: any,
|
||||||
|
predicate?: _EqPredicate,
|
||||||
|
): any;
|
||||||
159
opencv-js-4.10.0/src/types/opencv/core_hal_interface.ts
Normal file
159
opencv-js-4.10.0/src/types/opencv/core_hal_interface.ts
Normal file
|
|
@ -0,0 +1,159 @@
|
||||||
|
import type { cvhalDFT, int, size_t, uchar } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Interface
|
||||||
|
* Define your functions to override default implementations:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #undef hal_add8u
|
||||||
|
* #define hal_add8u my_add8u
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param src_data source image data and step
|
||||||
|
*
|
||||||
|
* @param dst_data destination image data and step
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dct2D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
src_data: uchar,
|
||||||
|
src_step: size_t,
|
||||||
|
dst_data: uchar,
|
||||||
|
dst_step: size_t,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dctFree2D(context: cvhalDFT): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context double pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param width image dimensions
|
||||||
|
*
|
||||||
|
* @param depth image type (CV_32F or CV64F)
|
||||||
|
*
|
||||||
|
* @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...)
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dctInit2D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
depth: int,
|
||||||
|
flags: int,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param src source data
|
||||||
|
*
|
||||||
|
* @param dst destination data
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dft1D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
src: uchar,
|
||||||
|
dst: uchar,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param src_data source image data and step
|
||||||
|
*
|
||||||
|
* @param dst_data destination image data and step
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dft2D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
src_data: uchar,
|
||||||
|
src_step: size_t,
|
||||||
|
dst_data: uchar,
|
||||||
|
dst_step: size_t,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dftFree1D(context: cvhalDFT): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context pointer to context storing all necessary data
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dftFree2D(context: cvhalDFT): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context double pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param len transformed array length
|
||||||
|
*
|
||||||
|
* @param count estimated transformation count
|
||||||
|
*
|
||||||
|
* @param depth array type (CV_32F or CV_64F)
|
||||||
|
*
|
||||||
|
* @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, CV_HAL_DFT_SCALE, ...)
|
||||||
|
*
|
||||||
|
* @param needBuffer pointer to boolean variable, if valid pointer provided, then variable value should
|
||||||
|
* be set to true to signal that additional memory buffer is needed for operations
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dftInit1D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
len: int,
|
||||||
|
count: int,
|
||||||
|
depth: int,
|
||||||
|
flags: int,
|
||||||
|
needBuffer: any,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param context double pointer to context storing all necessary data
|
||||||
|
*
|
||||||
|
* @param width image dimensions
|
||||||
|
*
|
||||||
|
* @param depth image type (CV_32F or CV64F)
|
||||||
|
*
|
||||||
|
* @param src_channels number of channels in input image
|
||||||
|
*
|
||||||
|
* @param dst_channels number of channels in output image
|
||||||
|
*
|
||||||
|
* @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...)
|
||||||
|
*
|
||||||
|
* @param nonzero_rows number of nonzero rows in image, can be used for optimization
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_dftInit2D(
|
||||||
|
context: cvhalDFT,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
depth: int,
|
||||||
|
src_channels: int,
|
||||||
|
dst_channels: int,
|
||||||
|
flags: int,
|
||||||
|
nonzero_rows: int,
|
||||||
|
): cvhalDFT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param src_data Source image
|
||||||
|
*
|
||||||
|
* @param width Source image dimensions
|
||||||
|
*
|
||||||
|
* @param depth Depth of source image
|
||||||
|
*
|
||||||
|
* @param minVal Pointer to the returned global minimum and maximum in an array.
|
||||||
|
*
|
||||||
|
* @param minIdx Pointer to the returned minimum and maximum location.
|
||||||
|
*
|
||||||
|
* @param mask Specified array region.
|
||||||
|
*/
|
||||||
|
export declare function hal_ni_minMaxIdx(
|
||||||
|
src_data: uchar,
|
||||||
|
src_step: size_t,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
depth: int,
|
||||||
|
minVal: any,
|
||||||
|
maxVal: any,
|
||||||
|
minIdx: any,
|
||||||
|
maxIdx: any,
|
||||||
|
mask: uchar,
|
||||||
|
): uchar;
|
||||||
748
opencv-js-4.10.0/src/types/opencv/core_utils.ts
Normal file
748
opencv-js-4.10.0/src/types/opencv/core_utils.ts
Normal file
|
|
@ -0,0 +1,748 @@
|
||||||
|
import type {
|
||||||
|
AsyncArray,
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
ErrorCallback,
|
||||||
|
float,
|
||||||
|
float16_t,
|
||||||
|
InputArray,
|
||||||
|
InputArrayOfArrays,
|
||||||
|
InputOutputArray,
|
||||||
|
InputOutputArrayOfArrays,
|
||||||
|
int,
|
||||||
|
int64,
|
||||||
|
schar,
|
||||||
|
short,
|
||||||
|
size_t,
|
||||||
|
uchar,
|
||||||
|
uint64,
|
||||||
|
unsigned,
|
||||||
|
ushort,
|
||||||
|
_Tp,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Utility and system functions and macros
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function returns the aligned pointer of the same type as the input pointer:
|
||||||
|
* `\\[\\texttt{(_Tp*)(((size_t)ptr + n-1) & -n)}\\]`
|
||||||
|
*
|
||||||
|
* @param ptr Aligned pointer.
|
||||||
|
*
|
||||||
|
* @param n Alignment size that must be a power of two.
|
||||||
|
*/
|
||||||
|
export declare function alignPtr(arg92: any, ptr: any, n?: int): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns the minimum number that is greater than or equal to sz and is divisible by n :
|
||||||
|
* `\\[\\texttt{(sz + n-1) & -n}\\]`
|
||||||
|
*
|
||||||
|
* @param sz Buffer size to align.
|
||||||
|
*
|
||||||
|
* @param n Alignment size that must be a power of two.
|
||||||
|
*/
|
||||||
|
export declare function alignSize(sz: size_t, n: int): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns true if the host hardware supports the specified feature. When user calls
|
||||||
|
* setUseOptimized(false), the subsequent calls to [checkHardwareSupport()] will return false until
|
||||||
|
* setUseOptimized(true) is called. This way user can dynamically switch on and off the optimized code
|
||||||
|
* in OpenCV.
|
||||||
|
*
|
||||||
|
* @param feature The feature of interest, one of cv::CpuFeatures
|
||||||
|
*/
|
||||||
|
export declare function checkHardwareSupport(feature: int): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* proxy for hal::Cholesky
|
||||||
|
*/
|
||||||
|
export declare function Cholesky(
|
||||||
|
A: any,
|
||||||
|
astep: size_t,
|
||||||
|
m: int,
|
||||||
|
b: any,
|
||||||
|
bstep: size_t,
|
||||||
|
n: int,
|
||||||
|
): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* proxy for hal::Cholesky
|
||||||
|
*/
|
||||||
|
export declare function Cholesky(
|
||||||
|
A: any,
|
||||||
|
astep: size_t,
|
||||||
|
m: int,
|
||||||
|
b: any,
|
||||||
|
bstep: size_t,
|
||||||
|
n: int,
|
||||||
|
): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function cubeRoot computes `$\\sqrt[3]{\\texttt{val}}$`. Negative arguments are handled
|
||||||
|
* correctly. NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for
|
||||||
|
* single-precision data.
|
||||||
|
*
|
||||||
|
* @param val A function argument.
|
||||||
|
*/
|
||||||
|
export declare function cubeRoot(val: float): float;
|
||||||
|
|
||||||
|
export declare function cv_abs(arg93: any, x: _Tp): any;
|
||||||
|
|
||||||
|
export declare function cv_abs(x: uchar): uchar;
|
||||||
|
|
||||||
|
export declare function cv_abs(x: schar): schar;
|
||||||
|
|
||||||
|
export declare function cv_abs(x: ushort): ushort;
|
||||||
|
|
||||||
|
export declare function cv_abs(x: short): int;
|
||||||
|
|
||||||
|
export declare function CV_XADD(addr: any, delta: int): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes an integer i such that: `\\[i \\le \\texttt{value} < i+1\\]`
|
||||||
|
*
|
||||||
|
* @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the result
|
||||||
|
* is not defined.
|
||||||
|
*/
|
||||||
|
export declare function cvCeil(value: double): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvCeil(value: float): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvCeil(value: int): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes an integer i such that: `\\[i \\le \\texttt{value} < i+1\\]`
|
||||||
|
*
|
||||||
|
* @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the result
|
||||||
|
* is not defined.
|
||||||
|
*/
|
||||||
|
export declare function cvFloor(value: double): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvFloor(value: float): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvFloor(value: int): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns 1 if the argument is a plus or minus infinity (as defined by IEEE754 standard)
|
||||||
|
* and 0 otherwise.
|
||||||
|
*
|
||||||
|
* @param value The input floating-point value
|
||||||
|
*/
|
||||||
|
export declare function cvIsInf(value: double): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvIsInf(value: float): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns 1 if the argument is Not A Number (as defined by IEEE754 standard), 0
|
||||||
|
* otherwise.
|
||||||
|
*
|
||||||
|
* @param value The input floating-point value
|
||||||
|
*/
|
||||||
|
export declare function cvIsNaN(value: double): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvIsNaN(value: float): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the result
|
||||||
|
* is not defined.
|
||||||
|
*/
|
||||||
|
export declare function cvRound(value: double): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvRound(value: float): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function cvRound(value: int): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use this function instead of `ceil((float)a / b)` expressions.
|
||||||
|
*
|
||||||
|
* [alignSize]
|
||||||
|
*/
|
||||||
|
export declare function divUp(a: int, b: any): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function divUp(a: size_t, b: any): size_t;
|
||||||
|
|
||||||
|
export declare function dumpInputArray(argument: InputArray): String;
|
||||||
|
|
||||||
|
export declare function dumpInputArrayOfArrays(
|
||||||
|
argument: InputArrayOfArrays,
|
||||||
|
): String;
|
||||||
|
|
||||||
|
export declare function dumpInputOutputArray(
|
||||||
|
argument: InputOutputArray,
|
||||||
|
): String;
|
||||||
|
|
||||||
|
export declare function dumpInputOutputArrayOfArrays(
|
||||||
|
argument: InputOutputArrayOfArrays,
|
||||||
|
): String;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* By default the function prints information about the error to stderr, then it either stops if
|
||||||
|
* [cv::setBreakOnError()] had been called before or raises the exception. It is possible to alternate
|
||||||
|
* error processing by using [redirectError()].
|
||||||
|
*
|
||||||
|
* @param exc the exception raisen.
|
||||||
|
*/
|
||||||
|
export declare function error(exc: any): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* By default the function prints information about the error to stderr, then it either stops if
|
||||||
|
* [setBreakOnError()] had been called before or raises the exception. It is possible to alternate
|
||||||
|
* error processing by using [redirectError()].
|
||||||
|
*
|
||||||
|
* [CV_Error], [CV_Error_], [CV_Assert], [CV_DbgAssert]
|
||||||
|
*
|
||||||
|
* @param _code - error code (Error::Code)
|
||||||
|
*
|
||||||
|
* @param _err - error description
|
||||||
|
*
|
||||||
|
* @param _func - function name. Available only when the compiler supports getting it
|
||||||
|
*
|
||||||
|
* @param _file - source file name where the error has occurred
|
||||||
|
*
|
||||||
|
* @param _line - line number in the source file where the error has occurred
|
||||||
|
*/
|
||||||
|
export declare function error(
|
||||||
|
_code: int,
|
||||||
|
_err: any,
|
||||||
|
_func: any,
|
||||||
|
_file: any,
|
||||||
|
_line: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured
|
||||||
|
* in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.
|
||||||
|
*
|
||||||
|
* @param y y-coordinate of the vector.
|
||||||
|
*
|
||||||
|
* @param x x-coordinate of the vector.
|
||||||
|
*/
|
||||||
|
export declare function fastAtan2(y: float, x: float): float;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function deallocates the buffer allocated with fastMalloc . If NULL pointer is passed, the
|
||||||
|
* function does nothing. C version of the function clears the pointer *pptr* to avoid problems with
|
||||||
|
* double memory deallocation.
|
||||||
|
*
|
||||||
|
* @param ptr Pointer to the allocated buffer.
|
||||||
|
*/
|
||||||
|
export declare function fastFree(ptr: any): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function allocates the buffer of the specified size and returns it. When the buffer size is 16
|
||||||
|
* bytes or more, the returned buffer is aligned to 16 bytes.
|
||||||
|
*
|
||||||
|
* @param bufSize Allocated buffer size.
|
||||||
|
*/
|
||||||
|
export declare function fastMalloc(bufSize: size_t): any;
|
||||||
|
|
||||||
|
export declare function forEach_impl(
|
||||||
|
arg94: any,
|
||||||
|
arg95: any,
|
||||||
|
operation: any,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned value is raw cmake output including version control system revision, compiler version,
|
||||||
|
* compiler flags, enabled modules and third party libraries, etc. Output format depends on target
|
||||||
|
* architecture.
|
||||||
|
*/
|
||||||
|
export declare function getBuildInformation(): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned value is a string containing space separated list of CPU features with following markers:
|
||||||
|
*
|
||||||
|
* no markers - baseline features
|
||||||
|
* prefix `*` - features enabled in dispatcher
|
||||||
|
* suffix `?` - features enabled but not available in HW
|
||||||
|
*
|
||||||
|
* Example: `SSE SSE2 SSE3 *SSE4.1 *SSE4.2 *FP16 *AVX *AVX2 *AVX512-SKX?`
|
||||||
|
*/
|
||||||
|
export declare function getCPUFeaturesLine(): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns the current number of CPU ticks on some architectures (such as x86, x64,
|
||||||
|
* PowerPC). On other platforms the function is equivalent to getTickCount. It can also be used for
|
||||||
|
* very accurate time measurements, as well as for [RNG] initialization. Note that in case of multi-CPU
|
||||||
|
* systems a thread, from which getCPUTickCount is called, can be suspended and resumed at another CPU
|
||||||
|
* with its own counter. So, theoretically (and practically) the subsequent calls to the function do
|
||||||
|
* not necessary return the monotonously increasing values. Also, since a modern CPU varies the CPU
|
||||||
|
* frequency depending on the load, the number of CPU clocks spent in some code cannot be directly
|
||||||
|
* converted to time units. Therefore, getTickCount is generally a preferable solution for measuring
|
||||||
|
* execution time.
|
||||||
|
*/
|
||||||
|
export declare function getCPUTickCount(): int64;
|
||||||
|
|
||||||
|
export declare function getElemSize(type: int): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns empty string if feature is not defined
|
||||||
|
*/
|
||||||
|
export declare function getHardwareFeatureName(feature: int): String;
|
||||||
|
|
||||||
|
export declare function getNumberOfCPUs(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Always returns 1 if OpenCV is built without threading support.
|
||||||
|
*
|
||||||
|
* The exact meaning of return value depends on the threading framework used by OpenCV library:
|
||||||
|
*
|
||||||
|
* `TBB` - The number of threads, that OpenCV will try to use for parallel regions. If there is any
|
||||||
|
* tbb::thread_scheduler_init in user code conflicting with OpenCV, then function returns default
|
||||||
|
* number of threads used by TBB library.
|
||||||
|
* `OpenMP` - An upper bound on the number of threads that could be used to form a new team.
|
||||||
|
* `Concurrency` - The number of threads, that OpenCV will try to use for parallel regions.
|
||||||
|
* `GCD` - Unsupported; returns the GCD thread pool limit (512) for compatibility.
|
||||||
|
* `C=` - The number of threads, that OpenCV will try to use for parallel regions, if before called
|
||||||
|
* setNumThreads with threads > 0, otherwise returns the number of logical CPUs, available for the
|
||||||
|
* process.
|
||||||
|
*
|
||||||
|
* [setNumThreads], [getThreadNum]
|
||||||
|
*/
|
||||||
|
export declare function getNumThreads(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The exact meaning of the return value depends on the threading framework used by OpenCV library:
|
||||||
|
*
|
||||||
|
* `TBB` - Unsupported with current 4.1 TBB release. Maybe will be supported in future.
|
||||||
|
* `OpenMP` - The thread number, within the current team, of the calling thread.
|
||||||
|
* `Concurrency` - An ID for the virtual processor that the current context is executing on (0 for
|
||||||
|
* master thread and unique number for others, but not necessary 1,2,3,...).
|
||||||
|
* `GCD` - System calling thread's ID. Never returns 0 inside parallel region.
|
||||||
|
* `C=` - The index of the current parallel task.
|
||||||
|
*
|
||||||
|
* [setNumThreads], [getNumThreads]
|
||||||
|
*/
|
||||||
|
export declare function getThreadNum(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns the number of ticks after the certain event (for example, when the machine was
|
||||||
|
* turned on). It can be used to initialize [RNG] or to measure a function execution time by reading
|
||||||
|
* the tick count before and after the function call.
|
||||||
|
*
|
||||||
|
* [getTickFrequency], [TickMeter]
|
||||||
|
*/
|
||||||
|
export declare function getTickCount(): int64;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns the number of ticks per second. That is, the following code computes the
|
||||||
|
* execution time in seconds:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* double t = (double)getTickCount();
|
||||||
|
* // do something ...
|
||||||
|
* t = ((double)getTickCount() - t)/getTickFrequency();
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* [getTickCount], [TickMeter]
|
||||||
|
*/
|
||||||
|
export declare function getTickFrequency(): double;
|
||||||
|
|
||||||
|
export declare function getVersionMajor(): int;
|
||||||
|
|
||||||
|
export declare function getVersionMinor(): int;
|
||||||
|
|
||||||
|
export declare function getVersionRevision(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For example "3.4.1-dev".
|
||||||
|
*
|
||||||
|
* getMajorVersion, getMinorVersion, getRevisionVersion
|
||||||
|
*/
|
||||||
|
export declare function getVersionString(): String;
|
||||||
|
|
||||||
|
export declare function glob(
|
||||||
|
pattern: String,
|
||||||
|
result: any,
|
||||||
|
recursive?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* proxy for hal::LU
|
||||||
|
*/
|
||||||
|
export declare function LU(
|
||||||
|
A: any,
|
||||||
|
astep: size_t,
|
||||||
|
m: int,
|
||||||
|
b: any,
|
||||||
|
bstep: size_t,
|
||||||
|
n: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* proxy for hal::LU
|
||||||
|
*/
|
||||||
|
export declare function LU(
|
||||||
|
A: any,
|
||||||
|
astep: size_t,
|
||||||
|
m: int,
|
||||||
|
b: any,
|
||||||
|
bstep: size_t,
|
||||||
|
n: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
export declare function normInf(arg96: any, arg97: any, a: any, n: int): any;
|
||||||
|
|
||||||
|
export declare function normInf(
|
||||||
|
arg98: any,
|
||||||
|
arg99: any,
|
||||||
|
a: any,
|
||||||
|
b: any,
|
||||||
|
n: int,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
export declare function normL1(arg100: any, arg101: any, a: any, n: int): any;
|
||||||
|
|
||||||
|
export declare function normL1(
|
||||||
|
arg102: any,
|
||||||
|
arg103: any,
|
||||||
|
a: any,
|
||||||
|
b: any,
|
||||||
|
n: int,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
export declare function normL1(a: any, b: any, n: int): float;
|
||||||
|
|
||||||
|
export declare function normL1(a: uchar, b: uchar, n: int): uchar;
|
||||||
|
|
||||||
|
export declare function normL2Sqr(
|
||||||
|
arg104: any,
|
||||||
|
arg105: any,
|
||||||
|
a: any,
|
||||||
|
n: int,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
export declare function normL2Sqr(
|
||||||
|
arg106: any,
|
||||||
|
arg107: any,
|
||||||
|
a: any,
|
||||||
|
b: any,
|
||||||
|
n: int,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
export declare function normL2Sqr(a: any, b: any, n: int): float;
|
||||||
|
|
||||||
|
export declare function parallel_for_(
|
||||||
|
range: any,
|
||||||
|
body: any,
|
||||||
|
nstripes?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function parallel_for_(
|
||||||
|
range: any,
|
||||||
|
functor: any,
|
||||||
|
nstripes?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function sets the new error handler, called from [cv::error()].
|
||||||
|
*
|
||||||
|
* the previous error handler
|
||||||
|
*
|
||||||
|
* @param errCallback the new error handler. If NULL, the default error handler is used.
|
||||||
|
*
|
||||||
|
* @param userdata the optional user data pointer, passed to the callback.
|
||||||
|
*
|
||||||
|
* @param prevUserdata the optional output parameter where the previous user data pointer is stored
|
||||||
|
*/
|
||||||
|
export declare function redirectError(
|
||||||
|
errCallback: ErrorCallback,
|
||||||
|
userdata?: any,
|
||||||
|
prevUserdata?: any,
|
||||||
|
): ErrorCallback;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use this function instead of `ceil((float)a / b) * b` expressions.
|
||||||
|
*
|
||||||
|
* [divUp]
|
||||||
|
*/
|
||||||
|
export declare function roundUp(a: int, b: any): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function roundUp(a: size_t, b: any): size_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function saturate_cast resembles the standard C++ cast operations, such as static_cast<T>() and
|
||||||
|
* others. It perform an efficient and accurate conversion from one primitive type to another (see the
|
||||||
|
* introduction chapter). saturate in the name means that when the input value v is out of the range of
|
||||||
|
* the target type, the result is not formed just by taking low bits of the input, but instead the
|
||||||
|
* value is clipped. For example:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* uchar a = saturate_cast<uchar>(-100); // a = 0 (UCHAR_MIN)
|
||||||
|
* short b = saturate_cast<short>(33333.33333); // b = 32767 (SHRT_MAX)
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Such clipping is done when the target type is unsigned char , signed char , unsigned short or
|
||||||
|
* signed short . For 32-bit integers, no clipping is done.
|
||||||
|
*
|
||||||
|
* When the parameter is a floating-point value and the target type is an integer (8-, 16- or 32-bit),
|
||||||
|
* the floating-point value is first rounded to the nearest integer and then clipped if needed (when
|
||||||
|
* the target type is 8- or 16-bit).
|
||||||
|
*
|
||||||
|
* This operation is used in the simplest or most complex image processing functions in OpenCV.
|
||||||
|
*
|
||||||
|
* [add], [subtract], [multiply], [divide], [Mat::convertTo]
|
||||||
|
*
|
||||||
|
* @param v Function parameter.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg108: any, v: uchar): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg109: any, v: schar): schar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg110: any, v: ushort): ushort;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg111: any, v: short): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg112: any, v: unsigned): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg113: any, v: int): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg114: any, v: float): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg115: any, v: double): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg116: any, v: int64): int64;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg117: any, v: uint64): uint64;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function saturate_cast(arg118: any, v: float16_t): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When the break-on-error mode is set, the default error handler issues a hardware exception, which
|
||||||
|
* can make debugging more convenient.
|
||||||
|
*
|
||||||
|
* the previous state
|
||||||
|
*/
|
||||||
|
export declare function setBreakOnError(flag: bool): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If threads == 0, OpenCV will disable threading optimizations and run all it's functions
|
||||||
|
* sequentially. Passing threads < 0 will reset threads number to system default. This function must be
|
||||||
|
* called outside of parallel region.
|
||||||
|
*
|
||||||
|
* OpenCV will try to run its functions with specified threads number, but some behaviour differs from
|
||||||
|
* framework:
|
||||||
|
*
|
||||||
|
* `TBB` - User-defined parallel constructions will run with the same threads number, if another is not
|
||||||
|
* specified. If later on user creates his own scheduler, OpenCV will use it.
|
||||||
|
* `OpenMP` - No special defined behaviour.
|
||||||
|
* `Concurrency` - If threads == 1, OpenCV will disable threading optimizations and run its functions
|
||||||
|
* sequentially.
|
||||||
|
* `GCD` - Supports only values <= 0.
|
||||||
|
* `C=` - No special defined behaviour.
|
||||||
|
*
|
||||||
|
* [getNumThreads], [getThreadNum]
|
||||||
|
*
|
||||||
|
* @param nthreads Number of threads used by OpenCV.
|
||||||
|
*/
|
||||||
|
export declare function setNumThreads(nthreads: int): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function can be used to dynamically turn on and off optimized dispatched code (code that uses
|
||||||
|
* SSE4.2, AVX/AVX2, and other instructions on the platforms that support it). It sets a global flag
|
||||||
|
* that is further checked by OpenCV functions. Since the flag is not checked in the inner OpenCV
|
||||||
|
* loops, it is only safe to call the function on the very top level in your application where you can
|
||||||
|
* be sure that no other OpenCV function is currently executed.
|
||||||
|
*
|
||||||
|
* By default, the optimized code is enabled unless you disable it in CMake. The current status can be
|
||||||
|
* retrieved using useOptimized.
|
||||||
|
*
|
||||||
|
* @param onoff The boolean flag specifying whether the optimized code should be used (onoff=true) or
|
||||||
|
* not (onoff=false).
|
||||||
|
*/
|
||||||
|
export declare function setUseOptimized(onoff: bool): void;
|
||||||
|
|
||||||
|
export declare function tempfile(suffix?: any): String;
|
||||||
|
|
||||||
|
export declare function testAsyncArray(argument: InputArray): AsyncArray;
|
||||||
|
|
||||||
|
export declare function testAsyncException(): AsyncArray;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function returns true if the optimized code is enabled. Otherwise, it returns false.
|
||||||
|
*/
|
||||||
|
export declare function useOptimized(): bool;
|
||||||
|
|
||||||
|
export declare const CPU_MMX: CpuFeatures; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const CPU_SSE: CpuFeatures; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const CPU_SSE2: CpuFeatures; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const CPU_SSE3: CpuFeatures; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const CPU_SSSE3: CpuFeatures; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const CPU_SSE4_1: CpuFeatures; // initializer: = 6
|
||||||
|
|
||||||
|
export declare const CPU_SSE4_2: CpuFeatures; // initializer: = 7
|
||||||
|
|
||||||
|
export declare const CPU_POPCNT: CpuFeatures; // initializer: = 8
|
||||||
|
|
||||||
|
export declare const CPU_FP16: CpuFeatures; // initializer: = 9
|
||||||
|
|
||||||
|
export declare const CPU_AVX: CpuFeatures; // initializer: = 10
|
||||||
|
|
||||||
|
export declare const CPU_AVX2: CpuFeatures; // initializer: = 11
|
||||||
|
|
||||||
|
export declare const CPU_FMA3: CpuFeatures; // initializer: = 12
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512F: CpuFeatures; // initializer: = 13
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512BW: CpuFeatures; // initializer: = 14
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512CD: CpuFeatures; // initializer: = 15
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512DQ: CpuFeatures; // initializer: = 16
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512ER: CpuFeatures; // initializer: = 17
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512IFMA512: CpuFeatures; // initializer: = 18
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512IFMA: CpuFeatures; // initializer: = 18
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512PF: CpuFeatures; // initializer: = 19
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512VBMI: CpuFeatures; // initializer: = 20
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512VL: CpuFeatures; // initializer: = 21
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512VBMI2: CpuFeatures; // initializer: = 22
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512VNNI: CpuFeatures; // initializer: = 23
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512BITALG: CpuFeatures; // initializer: = 24
|
||||||
|
|
||||||
|
export declare const CPU_AVX_512VPOPCNTDQ: CpuFeatures; // initializer: = 25
|
||||||
|
|
||||||
|
export declare const CPU_AVX_5124VNNIW: CpuFeatures; // initializer: = 26
|
||||||
|
|
||||||
|
export declare const CPU_AVX_5124FMAPS: CpuFeatures; // initializer: = 27
|
||||||
|
|
||||||
|
export declare const CPU_NEON: CpuFeatures; // initializer: = 100
|
||||||
|
|
||||||
|
export declare const CPU_VSX: CpuFeatures; // initializer: = 200
|
||||||
|
|
||||||
|
export declare const CPU_VSX3: CpuFeatures; // initializer: = 201
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_SKX: CpuFeatures; // initializer: = 256
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_COMMON: CpuFeatures; // initializer: = 257
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_KNL: CpuFeatures; // initializer: = 258
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_KNM: CpuFeatures; // initializer: = 259
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_CNL: CpuFeatures; // initializer: = 260
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_CEL: CpuFeatures; // initializer: = 261
|
||||||
|
|
||||||
|
export declare const CPU_AVX512_ICL: CpuFeatures; // initializer: = 262
|
||||||
|
|
||||||
|
export declare const CPU_MAX_FEATURE: CpuFeatures; // initializer: = 512
|
||||||
|
|
||||||
|
export declare const SORT_EVERY_ROW: SortFlags; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* each matrix column is sorted independently; this flag and the previous one are mutually exclusive.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const SORT_EVERY_COLUMN: SortFlags; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* each matrix row is sorted in the ascending order.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const SORT_ASCENDING: SortFlags; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* each matrix row is sorted in the descending order; this flag and the previous one are also mutually
|
||||||
|
* exclusive.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const SORT_DESCENDING: SortFlags; // initializer: = 16
|
||||||
|
|
||||||
|
export type CpuFeatures = any;
|
||||||
|
|
||||||
|
export type SortFlags = any;
|
||||||
505
opencv-js-4.10.0/src/types/opencv/dnn.ts
Normal file
505
opencv-js-4.10.0/src/types/opencv/dnn.ts
Normal file
|
|
@ -0,0 +1,505 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputArrayOfArrays,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
Net,
|
||||||
|
OutputArray,
|
||||||
|
OutputArrayOfArrays,
|
||||||
|
Size,
|
||||||
|
size_t,
|
||||||
|
uchar,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Deep Neural Network module
|
||||||
|
* This module contains:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* * API for new layers creation, layers are building bricks of neural networks;
|
||||||
|
* * set of built-in most-useful Layers;
|
||||||
|
* * API to construct and modify comprehensive neural networks from layers;
|
||||||
|
* * functionality for loading serialized networks models from different frameworks.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Functionality of this module is designed only for forward pass computations (i.e. network testing). A network training is in principle not supported.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* if `crop` is true, input image is resized so one side after resize is equal to corresponding
|
||||||
|
* dimension in `size` and another one is equal or larger. Then, crop from the center is performed. If
|
||||||
|
* `crop` is false, direct resize without cropping and preserving aspect ratio is performed.
|
||||||
|
*
|
||||||
|
* 4-dimensional [Mat] with NCHW dimensions order.
|
||||||
|
*
|
||||||
|
* @param image input image (with 1-, 3- or 4-channels).
|
||||||
|
*
|
||||||
|
* @param scalefactor multiplier for image values.
|
||||||
|
*
|
||||||
|
* @param size spatial size for output image
|
||||||
|
*
|
||||||
|
* @param mean scalar with mean values which are subtracted from channels. Values are intended to be in
|
||||||
|
* (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.
|
||||||
|
*
|
||||||
|
* @param swapRB flag which indicates that swap first and last channels in 3-channel image is
|
||||||
|
* necessary.
|
||||||
|
*
|
||||||
|
* @param crop flag which indicates whether image will be cropped after resize or not
|
||||||
|
*
|
||||||
|
* @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
|
||||||
|
*/
|
||||||
|
export declare function blobFromImage(
|
||||||
|
image: InputArray,
|
||||||
|
scalefactor?: double,
|
||||||
|
size?: any,
|
||||||
|
mean?: any,
|
||||||
|
swapRB?: bool,
|
||||||
|
crop?: bool,
|
||||||
|
ddepth?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function blobFromImage(
|
||||||
|
image: InputArray,
|
||||||
|
blob: OutputArray,
|
||||||
|
scalefactor?: double,
|
||||||
|
size?: any,
|
||||||
|
mean?: any,
|
||||||
|
swapRB?: bool,
|
||||||
|
crop?: bool,
|
||||||
|
ddepth?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* if `crop` is true, input image is resized so one side after resize is equal to corresponding
|
||||||
|
* dimension in `size` and another one is equal or larger. Then, crop from the center is performed. If
|
||||||
|
* `crop` is false, direct resize without cropping and preserving aspect ratio is performed.
|
||||||
|
*
|
||||||
|
* 4-dimensional [Mat] with NCHW dimensions order.
|
||||||
|
*
|
||||||
|
* @param images input images (all with 1-, 3- or 4-channels).
|
||||||
|
*
|
||||||
|
* @param scalefactor multiplier for images values.
|
||||||
|
*
|
||||||
|
* @param size spatial size for output image
|
||||||
|
*
|
||||||
|
* @param mean scalar with mean values which are subtracted from channels. Values are intended to be in
|
||||||
|
* (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.
|
||||||
|
*
|
||||||
|
* @param swapRB flag which indicates that swap first and last channels in 3-channel image is
|
||||||
|
* necessary.
|
||||||
|
*
|
||||||
|
* @param crop flag which indicates whether image will be cropped after resize or not
|
||||||
|
*
|
||||||
|
* @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
|
||||||
|
*/
|
||||||
|
export declare function blobFromImages(
|
||||||
|
images: InputArrayOfArrays,
|
||||||
|
scalefactor?: double,
|
||||||
|
size?: Size,
|
||||||
|
mean?: any,
|
||||||
|
swapRB?: bool,
|
||||||
|
crop?: bool,
|
||||||
|
ddepth?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function blobFromImages(
|
||||||
|
images: InputArrayOfArrays,
|
||||||
|
blob: OutputArray,
|
||||||
|
scalefactor?: double,
|
||||||
|
size?: Size,
|
||||||
|
mean?: any,
|
||||||
|
swapRB?: bool,
|
||||||
|
crop?: bool,
|
||||||
|
ddepth?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function getAvailableBackends(): any;
|
||||||
|
|
||||||
|
export declare function getAvailableTargets(be: Backend): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param blob_ 4 dimensional array (images, channels, height, width) in floating point precision
|
||||||
|
* (CV_32F) from which you would like to extract the images.
|
||||||
|
*
|
||||||
|
* @param images_ array of 2D Mat containing the images extracted from the blob in floating point
|
||||||
|
* precision (CV_32F). They are non normalized neither mean added. The number of returned images equals
|
||||||
|
* the first dimension of the blob (batch size). Every image has a number of channels equals to the
|
||||||
|
* second dimension of the blob (depth).
|
||||||
|
*/
|
||||||
|
export declare function imagesFromBlob(
|
||||||
|
blob_: any,
|
||||||
|
images_: OutputArrayOfArrays,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param bboxes a set of bounding boxes to apply NMS.
|
||||||
|
*
|
||||||
|
* @param scores a set of corresponding confidences.
|
||||||
|
*
|
||||||
|
* @param score_threshold a threshold used to filter boxes by score.
|
||||||
|
*
|
||||||
|
* @param nms_threshold a threshold used in non maximum suppression.
|
||||||
|
*
|
||||||
|
* @param indices the kept indices of bboxes after NMS.
|
||||||
|
*
|
||||||
|
* @param eta a coefficient in adaptive threshold formula: $nms\_threshold_{i+1}=eta\cdot
|
||||||
|
* nms\_threshold_i$.
|
||||||
|
*
|
||||||
|
* @param top_k if >0, keep at most top_k picked indices.
|
||||||
|
*/
|
||||||
|
export declare function NMSBoxes(
|
||||||
|
bboxes: any,
|
||||||
|
scores: any,
|
||||||
|
score_threshold: any,
|
||||||
|
nms_threshold: any,
|
||||||
|
indices: any,
|
||||||
|
eta?: any,
|
||||||
|
top_k?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function NMSBoxes(
|
||||||
|
bboxes: any,
|
||||||
|
scores: any,
|
||||||
|
score_threshold: any,
|
||||||
|
nms_threshold: any,
|
||||||
|
indices: any,
|
||||||
|
eta?: any,
|
||||||
|
top_k?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function NMSBoxes(
|
||||||
|
bboxes: any,
|
||||||
|
scores: any,
|
||||||
|
score_threshold: any,
|
||||||
|
nms_threshold: any,
|
||||||
|
indices: any,
|
||||||
|
eta?: any,
|
||||||
|
top_k?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
* This function automatically detects an origin framework of trained model and calls an appropriate
|
||||||
|
* function such [readNetFromCaffe], [readNetFromTensorflow], [readNetFromTorch] or
|
||||||
|
* [readNetFromDarknet]. An order of `model` and `config` arguments does not matter.
|
||||||
|
*
|
||||||
|
* @param model Binary file contains trained weights. The following file extensions are expected for
|
||||||
|
* models from different frameworks:
|
||||||
|
* .caffemodel (Caffe, http://caffe.berkeleyvision.org/)*.pb (TensorFlow,
|
||||||
|
* https://www.tensorflow.org/)*.t7 | *.net (Torch, http://torch.ch/)*.weights (Darknet,
|
||||||
|
* https://pjreddie.com/darknet/)*.bin (DLDT, https://software.intel.com/openvino-toolkit)*.onnx (ONNX,
|
||||||
|
* https://onnx.ai/)
|
||||||
|
*
|
||||||
|
* @param config Text file contains network configuration. It could be a file with the following
|
||||||
|
* extensions:
|
||||||
|
* .prototxt (Caffe, http://caffe.berkeleyvision.org/)*.pbtxt (TensorFlow,
|
||||||
|
* https://www.tensorflow.org/)*.cfg (Darknet, https://pjreddie.com/darknet/)*.xml (DLDT,
|
||||||
|
* https://software.intel.com/openvino-toolkit)
|
||||||
|
*
|
||||||
|
* @param framework Explicit framework name tag to determine a format.
|
||||||
|
*/
|
||||||
|
export declare function readNet(model: any, config?: any, framework?: any): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param framework Name of origin framework.
|
||||||
|
*
|
||||||
|
* @param bufferModel A buffer with a content of binary file with weights
|
||||||
|
*
|
||||||
|
* @param bufferConfig A buffer with a content of text file contains network configuration.
|
||||||
|
*/
|
||||||
|
export declare function readNet(
|
||||||
|
framework: any,
|
||||||
|
bufferModel: uchar,
|
||||||
|
bufferConfig?: uchar,
|
||||||
|
): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param prototxt path to the .prototxt file with text description of the network architecture.
|
||||||
|
*
|
||||||
|
* @param caffeModel path to the .caffemodel file with learned network.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromCaffe(prototxt: any, caffeModel?: any): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param bufferProto buffer containing the content of the .prototxt file
|
||||||
|
*
|
||||||
|
* @param bufferModel buffer containing the content of the .caffemodel file
|
||||||
|
*/
|
||||||
|
export declare function readNetFromCaffe(
|
||||||
|
bufferProto: uchar,
|
||||||
|
bufferModel?: uchar,
|
||||||
|
): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param bufferProto buffer containing the content of the .prototxt file
|
||||||
|
*
|
||||||
|
* @param lenProto length of bufferProto
|
||||||
|
*
|
||||||
|
* @param bufferModel buffer containing the content of the .caffemodel file
|
||||||
|
*
|
||||||
|
* @param lenModel length of bufferModel
|
||||||
|
*/
|
||||||
|
export declare function readNetFromCaffe(
|
||||||
|
bufferProto: any,
|
||||||
|
lenProto: size_t,
|
||||||
|
bufferModel?: any,
|
||||||
|
lenModel?: size_t,
|
||||||
|
): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Network object that ready to do forward, throw an exception in failure cases.
|
||||||
|
*
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param cfgFile path to the .cfg file with text description of the network architecture.
|
||||||
|
*
|
||||||
|
* @param darknetModel path to the .weights file with learned network.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromDarknet(
|
||||||
|
cfgFile: any,
|
||||||
|
darknetModel?: any,
|
||||||
|
): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param bufferCfg A buffer contains a content of .cfg file with text description of the network
|
||||||
|
* architecture.
|
||||||
|
*
|
||||||
|
* @param bufferModel A buffer contains a content of .weights file with learned network.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromDarknet(
|
||||||
|
bufferCfg: uchar,
|
||||||
|
bufferModel?: uchar,
|
||||||
|
): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param bufferCfg A buffer contains a content of .cfg file with text description of the network
|
||||||
|
* architecture.
|
||||||
|
*
|
||||||
|
* @param lenCfg Number of bytes to read from bufferCfg
|
||||||
|
*
|
||||||
|
* @param bufferModel A buffer contains a content of .weights file with learned network.
|
||||||
|
*
|
||||||
|
* @param lenModel Number of bytes to read from bufferModel
|
||||||
|
*/
|
||||||
|
export declare function readNetFromDarknet(
|
||||||
|
bufferCfg: any,
|
||||||
|
lenCfg: size_t,
|
||||||
|
bufferModel?: any,
|
||||||
|
lenModel?: size_t,
|
||||||
|
): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object. Networks imported from Intel's [Model] Optimizer are launched in Intel's Inference
|
||||||
|
* Engine backend.
|
||||||
|
*
|
||||||
|
* @param xml XML configuration file with network's topology.
|
||||||
|
*
|
||||||
|
* @param bin Binary file with trained weights.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromModelOptimizer(xml: any, bin: any): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Network object that ready to do forward, throw an exception in failure cases.
|
||||||
|
*
|
||||||
|
* @param onnxFile path to the .onnx file with text description of the network architecture.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromONNX(onnxFile: any): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Network object that ready to do forward, throw an exception in failure cases.
|
||||||
|
*
|
||||||
|
* @param buffer memory address of the first byte of the buffer.
|
||||||
|
*
|
||||||
|
* @param sizeBuffer size of the buffer.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromONNX(buffer: any, sizeBuffer: size_t): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Network object that ready to do forward, throw an exception in failure cases.
|
||||||
|
*
|
||||||
|
* @param buffer in-memory buffer that stores the ONNX model bytes.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromONNX(buffer: uchar): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param model path to the .pb file with binary protobuf description of the network architecture
|
||||||
|
*
|
||||||
|
* @param config path to the .pbtxt file that contains text graph definition in protobuf format.
|
||||||
|
* Resulting Net object is built by text graph using weights from a binary one that let us make it more
|
||||||
|
* flexible.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromTensorflow(model: any, config?: any): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* @param bufferModel buffer containing the content of the pb file
|
||||||
|
*
|
||||||
|
* @param bufferConfig buffer containing the content of the pbtxt file
|
||||||
|
*/
|
||||||
|
export declare function readNetFromTensorflow(
|
||||||
|
bufferModel: uchar,
|
||||||
|
bufferConfig?: uchar,
|
||||||
|
): uchar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param bufferModel buffer containing the content of the pb file
|
||||||
|
*
|
||||||
|
* @param lenModel length of bufferModel
|
||||||
|
*
|
||||||
|
* @param bufferConfig buffer containing the content of the pbtxt file
|
||||||
|
*
|
||||||
|
* @param lenConfig length of bufferConfig
|
||||||
|
*/
|
||||||
|
export declare function readNetFromTensorflow(
|
||||||
|
bufferModel: any,
|
||||||
|
lenModel: size_t,
|
||||||
|
bufferConfig?: any,
|
||||||
|
lenConfig?: size_t,
|
||||||
|
): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net] object.
|
||||||
|
*
|
||||||
|
* Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type
|
||||||
|
* of C language, which has various bit-length on different systems.
|
||||||
|
* The loading file must contain serialized object with importing network. Try to eliminate a custom
|
||||||
|
* objects from serialazing data to avoid importing errors.
|
||||||
|
*
|
||||||
|
* List of supported layers (i.e. object instances derived from Torch nn.Module class):
|
||||||
|
*
|
||||||
|
* nn.Sequential
|
||||||
|
* nn.Parallel
|
||||||
|
* nn.Concat
|
||||||
|
* nn.Linear
|
||||||
|
* nn.SpatialConvolution
|
||||||
|
* nn.SpatialMaxPooling, nn.SpatialAveragePooling
|
||||||
|
* nn.ReLU, nn.TanH, nn.Sigmoid
|
||||||
|
* nn.Reshape
|
||||||
|
* nn.SoftMax, nn.LogSoftMax
|
||||||
|
*
|
||||||
|
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
|
||||||
|
*
|
||||||
|
* @param model path to the file, dumped from Torch by using torch.save() function.
|
||||||
|
*
|
||||||
|
* @param isBinary specifies whether the network was serialized in ascii mode or binary.
|
||||||
|
*
|
||||||
|
* @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in
|
||||||
|
* Torch.
|
||||||
|
*/
|
||||||
|
export declare function readNetFromTorch(
|
||||||
|
model: any,
|
||||||
|
isBinary?: bool,
|
||||||
|
evaluate?: bool,
|
||||||
|
): Net;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Mat].
|
||||||
|
*
|
||||||
|
* @param path to the .pb file with input tensor.
|
||||||
|
*/
|
||||||
|
export declare function readTensorFromONNX(path: any): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function has the same limitations as [readNetFromTorch()].
|
||||||
|
*/
|
||||||
|
export declare function readTorchBlob(filename: any, isBinary?: bool): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shrinked model has no origin float32 weights so it can't be used in origin Caffe framework anymore.
|
||||||
|
* However the structure of data is taken from NVidia's Caffe fork: . So the resulting model may be
|
||||||
|
* used there.
|
||||||
|
*
|
||||||
|
* @param src Path to origin model from Caffe framework contains single precision floating point
|
||||||
|
* weights (usually has .caffemodel extension).
|
||||||
|
*
|
||||||
|
* @param dst Path to destination model with updated weights.
|
||||||
|
*
|
||||||
|
* @param layersTypes Set of layers types which parameters will be converted. By default, converts only
|
||||||
|
* Convolutional and Fully-Connected layers' weights.
|
||||||
|
*/
|
||||||
|
export declare function shrinkCaffeModel(
|
||||||
|
src: any,
|
||||||
|
dst: any,
|
||||||
|
layersTypes?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To reduce output file size, trained weights are not included.
|
||||||
|
*
|
||||||
|
* @param model A path to binary network.
|
||||||
|
*
|
||||||
|
* @param output A path to output text file to be created.
|
||||||
|
*/
|
||||||
|
export declare function writeTextGraph(model: any, output: any): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if OpenCV is built with Intel's Inference
|
||||||
|
* Engine library or DNN_BACKEND_OPENCV otherwise.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DNN_BACKEND_DEFAULT: Backend; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_BACKEND_HALIDE: Backend; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_BACKEND_INFERENCE_ENGINE: Backend; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_BACKEND_OPENCV: Backend; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_BACKEND_VKCOM: Backend; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_CPU: Target; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_OPENCL: Target; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_OPENCL_FP16: Target; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_MYRIAD: Target; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_VULKAN: Target; // initializer:
|
||||||
|
|
||||||
|
export declare const DNN_TARGET_FPGA: Target; // initializer:
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net::setPreferableBackend]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type Backend = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Net::setPreferableBackend]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type Target = any;
|
||||||
114
opencv-js-4.10.0/src/types/opencv/features2d_draw.ts
Normal file
114
opencv-js-4.10.0/src/types/opencv/features2d_draw.ts
Normal file
|
|
@ -0,0 +1,114 @@
|
||||||
|
import type { InputArray, InputOutputArray } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Drawing Function of Keypoints and Matches
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* For Python API, flags are modified as cv.DRAW_MATCHES_FLAGS_DEFAULT,
|
||||||
|
* cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
|
||||||
|
* cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
|
||||||
|
*
|
||||||
|
* @param image Source image.
|
||||||
|
*
|
||||||
|
* @param keypoints Keypoints from the source image.
|
||||||
|
*
|
||||||
|
* @param outImage Output image. Its content depends on the flags value defining what is drawn in the
|
||||||
|
* output image. See possible flags bit values below.
|
||||||
|
*
|
||||||
|
* @param color Color of keypoints.
|
||||||
|
*
|
||||||
|
* @param flags Flags setting drawing features. Possible flags bit values are defined by
|
||||||
|
* DrawMatchesFlags. See details above in drawMatches .
|
||||||
|
*/
|
||||||
|
export declare function drawKeypoints(
|
||||||
|
image: InputArray,
|
||||||
|
keypoints: any,
|
||||||
|
outImage: InputOutputArray,
|
||||||
|
color?: any,
|
||||||
|
flags?: DrawMatchesFlags,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function draws matches of keypoints from two images in the output image. Match is a line
|
||||||
|
* connecting two keypoints (circles). See [cv::DrawMatchesFlags].
|
||||||
|
*
|
||||||
|
* @param img1 First source image.
|
||||||
|
*
|
||||||
|
* @param keypoints1 Keypoints from the first source image.
|
||||||
|
*
|
||||||
|
* @param img2 Second source image.
|
||||||
|
*
|
||||||
|
* @param keypoints2 Keypoints from the second source image.
|
||||||
|
*
|
||||||
|
* @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]
|
||||||
|
* has a corresponding point in keypoints2[matches[i]] .
|
||||||
|
*
|
||||||
|
* @param outImg Output image. Its content depends on the flags value defining what is drawn in the
|
||||||
|
* output image. See possible flags bit values below.
|
||||||
|
*
|
||||||
|
* @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) ,
|
||||||
|
* the color is generated randomly.
|
||||||
|
*
|
||||||
|
* @param singlePointColor Color of single keypoints (circles), which means that keypoints do not have
|
||||||
|
* the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.
|
||||||
|
*
|
||||||
|
* @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are
|
||||||
|
* drawn.
|
||||||
|
*
|
||||||
|
* @param flags Flags setting drawing features. Possible flags bit values are defined by
|
||||||
|
* DrawMatchesFlags.
|
||||||
|
*/
|
||||||
|
export declare function drawMatches(
|
||||||
|
img1: InputArray,
|
||||||
|
keypoints1: any,
|
||||||
|
img2: InputArray,
|
||||||
|
keypoints2: any,
|
||||||
|
matches1to2: any,
|
||||||
|
outImg: InputOutputArray,
|
||||||
|
matchColor?: any,
|
||||||
|
singlePointColor?: any,
|
||||||
|
matchesMask?: any,
|
||||||
|
flags?: DrawMatchesFlags,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function drawMatches(
|
||||||
|
img1: InputArray,
|
||||||
|
keypoints1: any,
|
||||||
|
img2: InputArray,
|
||||||
|
keypoints2: any,
|
||||||
|
matches1to2: any,
|
||||||
|
outImg: InputOutputArray,
|
||||||
|
matchColor?: any,
|
||||||
|
singlePointColor?: any,
|
||||||
|
matchesMask?: any,
|
||||||
|
flags?: DrawMatchesFlags,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output image matrix will be created ([Mat::create]), i.e. existing memory of output image may be
|
||||||
|
* reused. Two source image, matches and single keypoints will be drawn. For each keypoint only the
|
||||||
|
* center point will be drawn (without the circle around keypoint with keypoint size and orientation).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DEFAULT: DrawMatchesFlags; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output image matrix will not be created ([Mat::create]). Matches will be drawn on existing content
|
||||||
|
* of output image.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DRAW_OVER_OUTIMG: DrawMatchesFlags; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const NOT_DRAW_SINGLE_POINTS: DrawMatchesFlags; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For each keypoint the circle around keypoint with keypoint size and orientation will be drawn.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DRAW_RICH_KEYPOINTS: DrawMatchesFlags; // initializer: = 4
|
||||||
|
|
||||||
|
export type DrawMatchesFlags = any;
|
||||||
26
opencv-js-4.10.0/src/types/opencv/fisheye.ts
Normal file
26
opencv-js-4.10.0/src/types/opencv/fisheye.ts
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
import type { InputArray, OutputArray, int, Size } from "./_types";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the undistortion and rectification maps for the image transform using remap.
|
||||||
|
* If D is empty, zero distortion is used. If R or P is empty, identity matrices are used.
|
||||||
|
*
|
||||||
|
* @param {InputArray} K - Camera intrinsic matrix.
|
||||||
|
* @param {InputArray} D - Input vector of distortion coefficients (k1, k2, k3, k4).
|
||||||
|
* @param {InputArray} R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 1-channel or 1x1 3-channel.
|
||||||
|
* @param {InputArray} P - New camera intrinsic matrix (3x3) or new projection matrix (3x4).
|
||||||
|
* @param {Size} size - Undistorted image size.
|
||||||
|
* @param {int} m1type - Type of the first output map that can be CV_32FC1 or CV_16SC2. See convertMaps for details.
|
||||||
|
* @param {OutputArray} map1 - The first output map.
|
||||||
|
* @param {OutputArray} map2 - The second output map.
|
||||||
|
* @return {void}
|
||||||
|
*/
|
||||||
|
export declare function fisheye_initUndistortRectifyMap(
|
||||||
|
K: InputArray,
|
||||||
|
D: InputArray,
|
||||||
|
R: InputArray,
|
||||||
|
P: InputArray,
|
||||||
|
size: Size,
|
||||||
|
m1type: int,
|
||||||
|
map1: OutputArray,
|
||||||
|
map2: OutputArray,
|
||||||
|
): void;
|
||||||
527
opencv-js-4.10.0/src/types/opencv/imgproc_color_conversions.ts
Normal file
527
opencv-js-4.10.0/src/types/opencv/imgproc_color_conversions.ts
Normal file
|
|
@ -0,0 +1,527 @@
|
||||||
|
import type { InputArray, int, OutputArray } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Color Space Conversions
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function converts an input image from one color space to another. In case of a transformation
|
||||||
|
* to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
|
||||||
|
* that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
|
||||||
|
* bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
|
||||||
|
* component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
|
||||||
|
* sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
|
||||||
|
*
|
||||||
|
* The conventional ranges for R, G, and B channel values are:
|
||||||
|
*
|
||||||
|
* 0 to 255 for CV_8U images
|
||||||
|
* 0 to 65535 for CV_16U images
|
||||||
|
* 0 to 1 for CV_32F images
|
||||||
|
*
|
||||||
|
* In case of linear transformations, the range does not matter. But in case of a non-linear
|
||||||
|
* transformation, an input RGB image should be normalized to the proper value range to get the correct
|
||||||
|
* results, for example, for RGB `$\\rightarrow$` L*u*v* transformation. For example, if you have a
|
||||||
|
* 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
|
||||||
|
* have the 0..255 value range instead of 0..1 assumed by the function. So, before calling [cvtColor] ,
|
||||||
|
* you need first to scale the image down:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* img *= 1./255;
|
||||||
|
* cvtColor(img, img, COLOR_BGR2Luv);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* If you use [cvtColor] with 8-bit images, the conversion will have some information lost. For many
|
||||||
|
* applications, this will not be noticeable but it is recommended to use 32-bit images in applications
|
||||||
|
* that need the full range of colors or that convert an image before an operation and then convert
|
||||||
|
* back.
|
||||||
|
*
|
||||||
|
* If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
|
||||||
|
* range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
|
||||||
|
*
|
||||||
|
* [Color conversions]
|
||||||
|
*
|
||||||
|
* @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
|
||||||
|
* floating-point.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and depth as src.
|
||||||
|
*
|
||||||
|
* @param code color space conversion code (see ColorConversionCodes).
|
||||||
|
*
|
||||||
|
* @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
|
||||||
|
* channels is derived automatically from src and code.
|
||||||
|
*/
|
||||||
|
export declare function cvtColor(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
code: int,
|
||||||
|
dstCn?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function only supports YUV420 to RGB conversion as of now.
|
||||||
|
*
|
||||||
|
* @param src1 8-bit image (CV_8U) of the Y plane.
|
||||||
|
*
|
||||||
|
* @param src2 image containing interleaved U/V plane.
|
||||||
|
*
|
||||||
|
* @param dst output image.
|
||||||
|
*
|
||||||
|
* @param code Specifies the type of conversion. It can take any of the following values:
|
||||||
|
* COLOR_YUV2BGR_NV12COLOR_YUV2RGB_NV12COLOR_YUV2BGRA_NV12COLOR_YUV2RGBA_NV12COLOR_YUV2BGR_NV21COLOR_YUV2RGB_NV21COLOR_YUV2BGRA_NV21COLOR_YUV2RGBA_NV21
|
||||||
|
*/
|
||||||
|
export declare function cvtColorTwoPlane(
|
||||||
|
src1: InputArray,
|
||||||
|
src2: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
code: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function can do the following transformations:
|
||||||
|
*
|
||||||
|
* Demosaicing using bilinear interpolation[COLOR_BayerBG2BGR] , [COLOR_BayerGB2BGR] ,
|
||||||
|
* [COLOR_BayerRG2BGR] , [COLOR_BayerGR2BGR][COLOR_BayerBG2GRAY] , [COLOR_BayerGB2GRAY] ,
|
||||||
|
* [COLOR_BayerRG2GRAY] , [COLOR_BayerGR2GRAY]
|
||||||
|
* Demosaicing using Variable Number of Gradients.[COLOR_BayerBG2BGR_VNG] , [COLOR_BayerGB2BGR_VNG] ,
|
||||||
|
* [COLOR_BayerRG2BGR_VNG] , [COLOR_BayerGR2BGR_VNG]
|
||||||
|
* Edge-Aware Demosaicing.[COLOR_BayerBG2BGR_EA] , [COLOR_BayerGB2BGR_EA] , [COLOR_BayerRG2BGR_EA] ,
|
||||||
|
* [COLOR_BayerGR2BGR_EA]
|
||||||
|
* Demosaicing with alpha channel[COLOR_BayerBG2BGRA] , [COLOR_BayerGB2BGRA] , [COLOR_BayerRG2BGRA] ,
|
||||||
|
* [COLOR_BayerGR2BGRA]
|
||||||
|
*
|
||||||
|
* [cvtColor]
|
||||||
|
*
|
||||||
|
* @param src input image: 8-bit unsigned or 16-bit unsigned.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and depth as src.
|
||||||
|
*
|
||||||
|
* @param code Color space conversion code (see the description below).
|
||||||
|
*
|
||||||
|
* @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
|
||||||
|
* channels is derived automatically from src and code.
|
||||||
|
*/
|
||||||
|
export declare function demosaicing(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
code: int,
|
||||||
|
dstCn?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2BGRA: ColorConversionCodes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2RGBA: ColorConversionCodes; // initializer: = COLOR_BGR2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2BGR: ColorConversionCodes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2RGB: ColorConversionCodes; // initializer: = COLOR_BGRA2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2RGBA: ColorConversionCodes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2BGRA: ColorConversionCodes; // initializer: = COLOR_BGR2RGBA
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2BGR: ColorConversionCodes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2RGB: ColorConversionCodes; // initializer: = COLOR_RGBA2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2RGB: ColorConversionCodes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2BGR: ColorConversionCodes; // initializer: = COLOR_BGR2RGB
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2RGBA: ColorConversionCodes; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2BGRA: ColorConversionCodes; // initializer: = COLOR_BGRA2RGBA
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2GRAY: ColorConversionCodes; // initializer: = 6
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2GRAY: ColorConversionCodes; // initializer: = 7
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2BGR: ColorConversionCodes; // initializer: = 8
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2RGB: ColorConversionCodes; // initializer: = COLOR_GRAY2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2BGRA: ColorConversionCodes; // initializer: = 9
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2RGBA: ColorConversionCodes; // initializer: = COLOR_GRAY2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2GRAY: ColorConversionCodes; // initializer: = 10
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2GRAY: ColorConversionCodes; // initializer: = 11
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2BGR565: ColorConversionCodes; // initializer: = 12
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2BGR565: ColorConversionCodes; // initializer: = 13
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5652BGR: ColorConversionCodes; // initializer: = 14
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5652RGB: ColorConversionCodes; // initializer: = 15
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2BGR565: ColorConversionCodes; // initializer: = 16
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2BGR565: ColorConversionCodes; // initializer: = 17
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5652BGRA: ColorConversionCodes; // initializer: = 18
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5652RGBA: ColorConversionCodes; // initializer: = 19
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2BGR565: ColorConversionCodes; // initializer: = 20
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5652GRAY: ColorConversionCodes; // initializer: = 21
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2BGR555: ColorConversionCodes; // initializer: = 22
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2BGR555: ColorConversionCodes; // initializer: = 23
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5552BGR: ColorConversionCodes; // initializer: = 24
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5552RGB: ColorConversionCodes; // initializer: = 25
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2BGR555: ColorConversionCodes; // initializer: = 26
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2BGR555: ColorConversionCodes; // initializer: = 27
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5552BGRA: ColorConversionCodes; // initializer: = 28
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5552RGBA: ColorConversionCodes; // initializer: = 29
|
||||||
|
|
||||||
|
export declare const COLOR_GRAY2BGR555: ColorConversionCodes; // initializer: = 30
|
||||||
|
|
||||||
|
export declare const COLOR_BGR5552GRAY: ColorConversionCodes; // initializer: = 31
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2XYZ: ColorConversionCodes; // initializer: = 32
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2XYZ: ColorConversionCodes; // initializer: = 33
|
||||||
|
|
||||||
|
export declare const COLOR_XYZ2BGR: ColorConversionCodes; // initializer: = 34
|
||||||
|
|
||||||
|
export declare const COLOR_XYZ2RGB: ColorConversionCodes; // initializer: = 35
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2YCrCb: ColorConversionCodes; // initializer: = 36
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2YCrCb: ColorConversionCodes; // initializer: = 37
|
||||||
|
|
||||||
|
export declare const COLOR_YCrCb2BGR: ColorConversionCodes; // initializer: = 38
|
||||||
|
|
||||||
|
export declare const COLOR_YCrCb2RGB: ColorConversionCodes; // initializer: = 39
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2HSV: ColorConversionCodes; // initializer: = 40
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2HSV: ColorConversionCodes; // initializer: = 41
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2Lab: ColorConversionCodes; // initializer: = 44
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2Lab: ColorConversionCodes; // initializer: = 45
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2Luv: ColorConversionCodes; // initializer: = 50
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2Luv: ColorConversionCodes; // initializer: = 51
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2HLS: ColorConversionCodes; // initializer: = 52
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2HLS: ColorConversionCodes; // initializer: = 53
|
||||||
|
|
||||||
|
export declare const COLOR_HSV2BGR: ColorConversionCodes; // initializer: = 54
|
||||||
|
|
||||||
|
export declare const COLOR_HSV2RGB: ColorConversionCodes; // initializer: = 55
|
||||||
|
|
||||||
|
export declare const COLOR_Lab2BGR: ColorConversionCodes; // initializer: = 56
|
||||||
|
|
||||||
|
export declare const COLOR_Lab2RGB: ColorConversionCodes; // initializer: = 57
|
||||||
|
|
||||||
|
export declare const COLOR_Luv2BGR: ColorConversionCodes; // initializer: = 58
|
||||||
|
|
||||||
|
export declare const COLOR_Luv2RGB: ColorConversionCodes; // initializer: = 59
|
||||||
|
|
||||||
|
export declare const COLOR_HLS2BGR: ColorConversionCodes; // initializer: = 60
|
||||||
|
|
||||||
|
export declare const COLOR_HLS2RGB: ColorConversionCodes; // initializer: = 61
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2HSV_FULL: ColorConversionCodes; // initializer: = 66
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2HSV_FULL: ColorConversionCodes; // initializer: = 67
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2HLS_FULL: ColorConversionCodes; // initializer: = 68
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2HLS_FULL: ColorConversionCodes; // initializer: = 69
|
||||||
|
|
||||||
|
export declare const COLOR_HSV2BGR_FULL: ColorConversionCodes; // initializer: = 70
|
||||||
|
|
||||||
|
export declare const COLOR_HSV2RGB_FULL: ColorConversionCodes; // initializer: = 71
|
||||||
|
|
||||||
|
export declare const COLOR_HLS2BGR_FULL: ColorConversionCodes; // initializer: = 72
|
||||||
|
|
||||||
|
export declare const COLOR_HLS2RGB_FULL: ColorConversionCodes; // initializer: = 73
|
||||||
|
|
||||||
|
export declare const COLOR_LBGR2Lab: ColorConversionCodes; // initializer: = 74
|
||||||
|
|
||||||
|
export declare const COLOR_LRGB2Lab: ColorConversionCodes; // initializer: = 75
|
||||||
|
|
||||||
|
export declare const COLOR_LBGR2Luv: ColorConversionCodes; // initializer: = 76
|
||||||
|
|
||||||
|
export declare const COLOR_LRGB2Luv: ColorConversionCodes; // initializer: = 77
|
||||||
|
|
||||||
|
export declare const COLOR_Lab2LBGR: ColorConversionCodes; // initializer: = 78
|
||||||
|
|
||||||
|
export declare const COLOR_Lab2LRGB: ColorConversionCodes; // initializer: = 79
|
||||||
|
|
||||||
|
export declare const COLOR_Luv2LBGR: ColorConversionCodes; // initializer: = 80
|
||||||
|
|
||||||
|
export declare const COLOR_Luv2LRGB: ColorConversionCodes; // initializer: = 81
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2YUV: ColorConversionCodes; // initializer: = 82
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2YUV: ColorConversionCodes; // initializer: = 83
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR: ColorConversionCodes; // initializer: = 84
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB: ColorConversionCodes; // initializer: = 85
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_NV12: ColorConversionCodes; // initializer: = 90
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_NV12: ColorConversionCodes; // initializer: = 91
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_NV21: ColorConversionCodes; // initializer: = 92
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_NV21: ColorConversionCodes; // initializer: = 93
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420sp2RGB: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_NV21
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420sp2BGR: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_NV21
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_NV12: ColorConversionCodes; // initializer: = 94
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_NV12: ColorConversionCodes; // initializer: = 95
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_NV21: ColorConversionCodes; // initializer: = 96
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_NV21: ColorConversionCodes; // initializer: = 97
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420sp2RGBA: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_NV21
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420sp2BGRA: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_NV21
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_YV12: ColorConversionCodes; // initializer: = 98
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_YV12: ColorConversionCodes; // initializer: = 99
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_IYUV: ColorConversionCodes; // initializer: = 100
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_IYUV: ColorConversionCodes; // initializer: = 101
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_I420: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_IYUV
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_I420: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_IYUV
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420p2RGB: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_YV12
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420p2BGR: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_YV12
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_YV12: ColorConversionCodes; // initializer: = 102
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_YV12: ColorConversionCodes; // initializer: = 103
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_IYUV: ColorConversionCodes; // initializer: = 104
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_IYUV: ColorConversionCodes; // initializer: = 105
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_I420: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_IYUV
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_I420: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_IYUV
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420p2RGBA: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_YV12
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420p2BGRA: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_YV12
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_420: ColorConversionCodes; // initializer: = 106
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_NV21: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_NV12: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_YV12: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_IYUV: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_I420: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420sp2GRAY: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV420p2GRAY: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_420
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_UYVY: ColorConversionCodes; // initializer: = 107
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_UYVY: ColorConversionCodes; // initializer: = 108
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_Y422: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_Y422: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_UYNV: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_UYNV: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_UYVY: ColorConversionCodes; // initializer: = 111
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_UYVY: ColorConversionCodes; // initializer: = 112
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_Y422: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_Y422: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_UYNV: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_UYNV: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_YUY2: ColorConversionCodes; // initializer: = 115
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_YUY2: ColorConversionCodes; // initializer: = 116
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_YVYU: ColorConversionCodes; // initializer: = 117
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_YVYU: ColorConversionCodes; // initializer: = 118
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_YUYV: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_YUYV: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGB_YUNV: ColorConversionCodes; // initializer: = COLOR_YUV2RGB_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGR_YUNV: ColorConversionCodes; // initializer: = COLOR_YUV2BGR_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_YUY2: ColorConversionCodes; // initializer: = 119
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_YUY2: ColorConversionCodes; // initializer: = 120
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_YVYU: ColorConversionCodes; // initializer: = 121
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_YVYU: ColorConversionCodes; // initializer: = 122
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_YUYV: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_YUYV: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2RGBA_YUNV: ColorConversionCodes; // initializer: = COLOR_YUV2RGBA_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2BGRA_YUNV: ColorConversionCodes; // initializer: = COLOR_YUV2BGRA_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_UYVY: ColorConversionCodes; // initializer: = 123
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_YUY2: ColorConversionCodes; // initializer: = 124
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_Y422: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_UYNV: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_UYVY
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_YVYU: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_YUYV: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_YUV2GRAY_YUNV: ColorConversionCodes; // initializer: = COLOR_YUV2GRAY_YUY2
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2mRGBA: ColorConversionCodes; // initializer: = 125
|
||||||
|
|
||||||
|
export declare const COLOR_mRGBA2RGBA: ColorConversionCodes; // initializer: = 126
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2YUV_I420: ColorConversionCodes; // initializer: = 127
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2YUV_I420: ColorConversionCodes; // initializer: = 128
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2YUV_IYUV: ColorConversionCodes; // initializer: = COLOR_RGB2YUV_I420
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2YUV_IYUV: ColorConversionCodes; // initializer: = COLOR_BGR2YUV_I420
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2YUV_I420: ColorConversionCodes; // initializer: = 129
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2YUV_I420: ColorConversionCodes; // initializer: = 130
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2YUV_IYUV: ColorConversionCodes; // initializer: = COLOR_RGBA2YUV_I420
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2YUV_IYUV: ColorConversionCodes; // initializer: = COLOR_BGRA2YUV_I420
|
||||||
|
|
||||||
|
export declare const COLOR_RGB2YUV_YV12: ColorConversionCodes; // initializer: = 131
|
||||||
|
|
||||||
|
export declare const COLOR_BGR2YUV_YV12: ColorConversionCodes; // initializer: = 132
|
||||||
|
|
||||||
|
export declare const COLOR_RGBA2YUV_YV12: ColorConversionCodes; // initializer: = 133
|
||||||
|
|
||||||
|
export declare const COLOR_BGRA2YUV_YV12: ColorConversionCodes; // initializer: = 134
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2BGR: ColorConversionCodes; // initializer: = 46
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2BGR: ColorConversionCodes; // initializer: = 47
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2BGR: ColorConversionCodes; // initializer: = 48
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2BGR: ColorConversionCodes; // initializer: = 49
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2RGB: ColorConversionCodes; // initializer: = COLOR_BayerRG2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2RGB: ColorConversionCodes; // initializer: = COLOR_BayerGR2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2RGB: ColorConversionCodes; // initializer: = COLOR_BayerBG2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2RGB: ColorConversionCodes; // initializer: = COLOR_BayerGB2BGR
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2GRAY: ColorConversionCodes; // initializer: = 86
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2GRAY: ColorConversionCodes; // initializer: = 87
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2GRAY: ColorConversionCodes; // initializer: = 88
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2GRAY: ColorConversionCodes; // initializer: = 89
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2BGR_VNG: ColorConversionCodes; // initializer: = 62
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2BGR_VNG: ColorConversionCodes; // initializer: = 63
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2BGR_VNG: ColorConversionCodes; // initializer: = 64
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2BGR_VNG: ColorConversionCodes; // initializer: = 65
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2RGB_VNG: ColorConversionCodes; // initializer: = COLOR_BayerRG2BGR_VNG
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2RGB_VNG: ColorConversionCodes; // initializer: = COLOR_BayerGR2BGR_VNG
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2RGB_VNG: ColorConversionCodes; // initializer: = COLOR_BayerBG2BGR_VNG
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2RGB_VNG: ColorConversionCodes; // initializer: = COLOR_BayerGB2BGR_VNG
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2BGR_EA: ColorConversionCodes; // initializer: = 135
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2BGR_EA: ColorConversionCodes; // initializer: = 136
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2BGR_EA: ColorConversionCodes; // initializer: = 137
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2BGR_EA: ColorConversionCodes; // initializer: = 138
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2RGB_EA: ColorConversionCodes; // initializer: = COLOR_BayerRG2BGR_EA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2RGB_EA: ColorConversionCodes; // initializer: = COLOR_BayerGR2BGR_EA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2RGB_EA: ColorConversionCodes; // initializer: = COLOR_BayerBG2BGR_EA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2RGB_EA: ColorConversionCodes; // initializer: = COLOR_BayerGB2BGR_EA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2BGRA: ColorConversionCodes; // initializer: = 139
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2BGRA: ColorConversionCodes; // initializer: = 140
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2BGRA: ColorConversionCodes; // initializer: = 141
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2BGRA: ColorConversionCodes; // initializer: = 142
|
||||||
|
|
||||||
|
export declare const COLOR_BayerBG2RGBA: ColorConversionCodes; // initializer: = COLOR_BayerRG2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGB2RGBA: ColorConversionCodes; // initializer: = COLOR_BayerGR2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerRG2RGBA: ColorConversionCodes; // initializer: = COLOR_BayerBG2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_BayerGR2RGBA: ColorConversionCodes; // initializer: = COLOR_BayerGB2BGRA
|
||||||
|
|
||||||
|
export declare const COLOR_COLORCVT_MAX: ColorConversionCodes; // initializer: = 143
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the color conversion codes
|
||||||
|
*
|
||||||
|
* [Color conversions]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type ColorConversionCodes = any;
|
||||||
733
opencv-js-4.10.0/src/types/opencv/imgproc_draw.ts
Normal file
733
opencv-js-4.10.0/src/types/opencv/imgproc_draw.ts
Normal file
|
|
@ -0,0 +1,733 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputArrayOfArrays,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
Point,
|
||||||
|
Point2d,
|
||||||
|
Rect,
|
||||||
|
Scalar,
|
||||||
|
Size,
|
||||||
|
Size2d,
|
||||||
|
Size2l,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Drawing Functions
|
||||||
|
* Drawing functions work with matrices/images of arbitrary depth. The boundaries of the shapes can be rendered with antialiasing (implemented only for 8-bit images for now). All the functions include the parameter color that uses an RGB value (that may be constructed with the Scalar constructor ) for color images and brightness for grayscale images. For color images, the channel ordering is normally *Blue, Green, Red*. This is what imshow, imread, and imwrite expect. So, if you form a color using the Scalar constructor, it should look like:
|
||||||
|
*
|
||||||
|
* `\[\texttt{Scalar} (blue \_ component, green \_ component, red \_ component[, alpha \_ component])\]`
|
||||||
|
*
|
||||||
|
* If you are using your own image rendering and I/O functions, you can use any channel ordering. The drawing functions process each channel independently and do not depend on the channel order or even on the used color space. The whole image can be converted from BGR to RGB or to a different color space using cvtColor .
|
||||||
|
*
|
||||||
|
* If a drawn figure is partially or completely outside the image, the drawing functions clip it. Also, many drawing functions can handle pixel coordinates specified with sub-pixel accuracy. This means that the coordinates can be passed as fixed-point numbers encoded as integers. The number of fractional bits is specified by the shift parameter and the real point coordinates are calculated as `$\texttt{Point}(x,y)\rightarrow\texttt{Point2f}(x*2^{-shift},y*2^{-shift})$` . This feature is especially effective when rendering antialiased shapes.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* The functions do not support alpha-transparency when the target image is 4-channel. In this case, the color[3] is simply copied to the repainted pixels. Thus, if you want to paint semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main image.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function [cv::arrowedLine] draws an arrow between pt1 and pt2 points in the image. See also
|
||||||
|
* [line].
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param pt1 The point the arrow starts from.
|
||||||
|
*
|
||||||
|
* @param pt2 The point the arrow points to.
|
||||||
|
*
|
||||||
|
* @param color Line color.
|
||||||
|
*
|
||||||
|
* @param thickness Line thickness.
|
||||||
|
*
|
||||||
|
* @param line_type Type of the line. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the point coordinates.
|
||||||
|
*
|
||||||
|
* @param tipLength The length of the arrow tip in relation to the arrow length
|
||||||
|
*/
|
||||||
|
export declare function arrowedLine(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pt1: Point,
|
||||||
|
pt2: Point,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
line_type?: int,
|
||||||
|
shift?: int,
|
||||||
|
tipLength?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::circle] draws a simple or filled circle with a given center and radius.
|
||||||
|
*
|
||||||
|
* @param img Image where the circle is drawn.
|
||||||
|
*
|
||||||
|
* @param center Center of the circle.
|
||||||
|
*
|
||||||
|
* @param radius Radius of the circle.
|
||||||
|
*
|
||||||
|
* @param color Circle color.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of the circle outline, if positive. Negative values, like FILLED, mean
|
||||||
|
* that a filled circle is to be drawn.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the circle boundary. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the coordinates of the center and in the radius value.
|
||||||
|
*/
|
||||||
|
export declare function circle(
|
||||||
|
img: InputOutputArray,
|
||||||
|
center: Point,
|
||||||
|
radius: int,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::clipLine] calculates a part of the line segment that is entirely within the
|
||||||
|
* specified rectangle. it returns false if the line segment is completely outside the rectangle.
|
||||||
|
* Otherwise, it returns true .
|
||||||
|
*
|
||||||
|
* @param imgSize Image size. The image rectangle is Rect(0, 0, imgSize.width, imgSize.height) .
|
||||||
|
*
|
||||||
|
* @param pt1 First line point.
|
||||||
|
*
|
||||||
|
* @param pt2 Second line point.
|
||||||
|
*/
|
||||||
|
export declare function clipLine(imgSize: Size, pt1: any, pt2: any): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param imgSize Image size. The image rectangle is Rect(0, 0, imgSize.width, imgSize.height) .
|
||||||
|
*
|
||||||
|
* @param pt1 First line point.
|
||||||
|
*
|
||||||
|
* @param pt2 Second line point.
|
||||||
|
*/
|
||||||
|
export declare function clipLine(imgSize: Size2l, pt1: any, pt2: any): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param imgRect Image rectangle.
|
||||||
|
*
|
||||||
|
* @param pt1 First line point.
|
||||||
|
*
|
||||||
|
* @param pt2 Second line point.
|
||||||
|
*/
|
||||||
|
export declare function clipLine(imgRect: Rect, pt1: any, pt2: any): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function draws contour outlines in the image if `$\\texttt{thickness} \\ge 0$` or fills the area
|
||||||
|
* bounded by the contours if `$\\texttt{thickness}<0$` . The example below shows how to retrieve
|
||||||
|
* connected components from the binary image and label them: :
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #include "opencv2/imgproc.hpp"
|
||||||
|
* #include "opencv2/highgui.hpp"
|
||||||
|
*
|
||||||
|
* using namespace cv;
|
||||||
|
* using namespace std;
|
||||||
|
*
|
||||||
|
* int main( int argc, char** argv )
|
||||||
|
* {
|
||||||
|
* Mat src;
|
||||||
|
* // the first command-line parameter must be a filename of the binary
|
||||||
|
* // (black-n-white) image
|
||||||
|
* if( argc != 2 || !(src=imread(argv[1], 0)).data)
|
||||||
|
* return -1;
|
||||||
|
*
|
||||||
|
* Mat dst = Mat::zeros(src.rows, src.cols, CV_8UC3);
|
||||||
|
*
|
||||||
|
* src = src > 1;
|
||||||
|
* namedWindow( "Source", 1 );
|
||||||
|
* imshow( "Source", src );
|
||||||
|
*
|
||||||
|
* vector<vector<Point> > contours;
|
||||||
|
* vector<Vec4i> hierarchy;
|
||||||
|
*
|
||||||
|
* findContours( src, contours, hierarchy,
|
||||||
|
* RETR_CCOMP, CHAIN_APPROX_SIMPLE );
|
||||||
|
*
|
||||||
|
* // iterate through all the top-level contours,
|
||||||
|
* // draw each connected component with its own random color
|
||||||
|
* int idx = 0;
|
||||||
|
* for( ; idx >= 0; idx = hierarchy[idx][0] )
|
||||||
|
* {
|
||||||
|
* Scalar color( rand()&255, rand()&255, rand()&255 );
|
||||||
|
* drawContours( dst, contours, idx, color, FILLED, 8, hierarchy );
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* namedWindow( "Components", 1 );
|
||||||
|
* imshow( "Components", dst );
|
||||||
|
* waitKey(0);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* When thickness=[FILLED], the function is designed to handle connected components with holes
|
||||||
|
* correctly even when no hierarchy date is provided. This is done by analyzing all the outlines
|
||||||
|
* together using even-odd rule. This may give incorrect results if you have a joint collection of
|
||||||
|
* separately retrieved contours. In order to solve this problem, you need to call [drawContours]
|
||||||
|
* separately for each sub-group of contours, or iterate over the collection using contourIdx
|
||||||
|
* parameter.
|
||||||
|
*
|
||||||
|
* @param image Destination image.
|
||||||
|
*
|
||||||
|
* @param contours All the input contours. Each contour is stored as a point vector.
|
||||||
|
*
|
||||||
|
* @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are
|
||||||
|
* drawn.
|
||||||
|
*
|
||||||
|
* @param color Color of the contours.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
|
||||||
|
* thickness=FILLED ), the contour interiors are drawn.
|
||||||
|
*
|
||||||
|
* @param lineType Line connectivity. See LineTypes
|
||||||
|
*
|
||||||
|
* @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
|
||||||
|
* some of the contours (see maxLevel ).
|
||||||
|
*
|
||||||
|
* @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
|
||||||
|
* If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
|
||||||
|
* draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
|
||||||
|
* parameter is only taken into account when there is hierarchy available.
|
||||||
|
*
|
||||||
|
* @param offset Optional contour shift parameter. Shift all the drawn contours by the specified
|
||||||
|
* $\texttt{offset}=(dx,dy)$ .
|
||||||
|
*/
|
||||||
|
export declare function drawContours(
|
||||||
|
image: InputOutputArray,
|
||||||
|
contours: InputArrayOfArrays,
|
||||||
|
contourIdx: int,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
hierarchy?: InputArray,
|
||||||
|
maxLevel?: int,
|
||||||
|
offset?: Point,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::drawMarker] draws a marker on a given position in the image. For the moment
|
||||||
|
* several marker types are supported, see [MarkerTypes] for more information.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param position The point where the crosshair is positioned.
|
||||||
|
*
|
||||||
|
* @param color Line color.
|
||||||
|
*
|
||||||
|
* @param markerType The specific type of marker you want to use, see MarkerTypes
|
||||||
|
*
|
||||||
|
* @param markerSize The length of the marker axis [default = 20 pixels]
|
||||||
|
*
|
||||||
|
* @param thickness Line thickness.
|
||||||
|
*
|
||||||
|
* @param line_type Type of the line, See LineTypes
|
||||||
|
*/
|
||||||
|
export declare function drawMarker(
|
||||||
|
img: InputOutputArray,
|
||||||
|
position: Point,
|
||||||
|
color: any,
|
||||||
|
markerType?: int,
|
||||||
|
markerSize?: int,
|
||||||
|
thickness?: int,
|
||||||
|
line_type?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::ellipse] with more parameters draws an ellipse outline, a filled ellipse, an
|
||||||
|
* elliptic arc, or a filled ellipse sector. The drawing code uses general parametric form. A
|
||||||
|
* piecewise-linear curve is used to approximate the elliptic arc boundary. If you need more control of
|
||||||
|
* the ellipse rendering, you can retrieve the curve using [ellipse2Poly] and then render it with
|
||||||
|
* [polylines] or fill it with [fillPoly]. If you use the first variant of the function and want to
|
||||||
|
* draw the whole ellipse, not an arc, pass `startAngle=0` and `endAngle=360`. If `startAngle` is
|
||||||
|
* greater than `endAngle`, they are swapped. The figure below explains the meaning of the parameters
|
||||||
|
* to draw the blue arc.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param center Center of the ellipse.
|
||||||
|
*
|
||||||
|
* @param axes Half of the size of the ellipse main axes.
|
||||||
|
*
|
||||||
|
* @param angle Ellipse rotation angle in degrees.
|
||||||
|
*
|
||||||
|
* @param startAngle Starting angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param endAngle Ending angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param color Ellipse color.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a
|
||||||
|
* filled ellipse sector is to be drawn.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the ellipse boundary. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the coordinates of the center and values of axes.
|
||||||
|
*/
|
||||||
|
export declare function ellipse(
|
||||||
|
img: InputOutputArray,
|
||||||
|
center: Point,
|
||||||
|
axes: Size,
|
||||||
|
angle: double,
|
||||||
|
startAngle: double,
|
||||||
|
endAngle: double,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param box Alternative ellipse representation via RotatedRect. This means that the function draws an
|
||||||
|
* ellipse inscribed in the rotated rectangle.
|
||||||
|
*
|
||||||
|
* @param color Ellipse color.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a
|
||||||
|
* filled ellipse sector is to be drawn.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the ellipse boundary. See LineTypes
|
||||||
|
*/
|
||||||
|
export declare function ellipse(
|
||||||
|
img: InputOutputArray,
|
||||||
|
box: any,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function ellipse2Poly computes the vertices of a polyline that approximates the specified
|
||||||
|
* elliptic arc. It is used by [ellipse]. If `arcStart` is greater than `arcEnd`, they are swapped.
|
||||||
|
*
|
||||||
|
* @param center Center of the arc.
|
||||||
|
*
|
||||||
|
* @param axes Half of the size of the ellipse main axes. See ellipse for details.
|
||||||
|
*
|
||||||
|
* @param angle Rotation angle of the ellipse in degrees. See ellipse for details.
|
||||||
|
*
|
||||||
|
* @param arcStart Starting angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param arcEnd Ending angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param delta Angle between the subsequent polyline vertices. It defines the approximation accuracy.
|
||||||
|
*
|
||||||
|
* @param pts Output vector of polyline vertices.
|
||||||
|
*/
|
||||||
|
export declare function ellipse2Poly(
|
||||||
|
center: Point,
|
||||||
|
axes: Size,
|
||||||
|
angle: int,
|
||||||
|
arcStart: int,
|
||||||
|
arcEnd: int,
|
||||||
|
delta: int,
|
||||||
|
pts: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param center Center of the arc.
|
||||||
|
*
|
||||||
|
* @param axes Half of the size of the ellipse main axes. See ellipse for details.
|
||||||
|
*
|
||||||
|
* @param angle Rotation angle of the ellipse in degrees. See ellipse for details.
|
||||||
|
*
|
||||||
|
* @param arcStart Starting angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param arcEnd Ending angle of the elliptic arc in degrees.
|
||||||
|
*
|
||||||
|
* @param delta Angle between the subsequent polyline vertices. It defines the approximation accuracy.
|
||||||
|
*
|
||||||
|
* @param pts Output vector of polyline vertices.
|
||||||
|
*/
|
||||||
|
export declare function ellipse2Poly(
|
||||||
|
center: Point2d,
|
||||||
|
axes: Size2d,
|
||||||
|
angle: int,
|
||||||
|
arcStart: int,
|
||||||
|
arcEnd: int,
|
||||||
|
delta: int,
|
||||||
|
pts: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function fillConvexPoly(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pts: any,
|
||||||
|
npts: int,
|
||||||
|
color: any,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::fillConvexPoly] draws a filled convex polygon. This function is much faster than
|
||||||
|
* the function [fillPoly] . It can fill not only convex polygons but any monotonic polygon without
|
||||||
|
* self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
|
||||||
|
* twice at the most (though, its top-most and/or the bottom edge could be horizontal).
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param points Polygon vertices.
|
||||||
|
*
|
||||||
|
* @param color Polygon color.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the polygon boundaries. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the vertex coordinates.
|
||||||
|
*/
|
||||||
|
export declare function fillConvexPoly(
|
||||||
|
img: InputOutputArray,
|
||||||
|
points: InputArray,
|
||||||
|
color: any,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function fillPoly(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pts: any,
|
||||||
|
npts: any,
|
||||||
|
ncontours: int,
|
||||||
|
color: any,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
offset?: Point,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::fillPoly] fills an area bounded by several polygonal contours. The function can
|
||||||
|
* fill complex areas, for example, areas with holes, contours with self-intersections (some of their
|
||||||
|
* parts), and so forth.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param pts Array of polygons where each polygon is represented as an array of points.
|
||||||
|
*
|
||||||
|
* @param color Polygon color.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the polygon boundaries. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the vertex coordinates.
|
||||||
|
*
|
||||||
|
* @param offset Optional offset of all points of the contours.
|
||||||
|
*/
|
||||||
|
export declare function fillPoly(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pts: InputArrayOfArrays,
|
||||||
|
color: any,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
offset?: Point,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The fontSize to use for [cv::putText]
|
||||||
|
*
|
||||||
|
* [cv::putText]
|
||||||
|
*
|
||||||
|
* @param fontFace Font to use, see cv::HersheyFonts.
|
||||||
|
*
|
||||||
|
* @param pixelHeight Pixel height to compute the fontScale for
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of lines used to render the text.See putText for details.
|
||||||
|
*/
|
||||||
|
export declare function getFontScaleFromHeight(
|
||||||
|
fontFace: any,
|
||||||
|
pixelHeight: any,
|
||||||
|
thickness?: any,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::getTextSize] calculates and returns the size of a box that contains the specified
|
||||||
|
* text. That is, the following code renders some text, the tight box surrounding it, and the baseline:
|
||||||
|
* :
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* String text = "Funny text inside the box";
|
||||||
|
* int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
|
||||||
|
* double fontScale = 2;
|
||||||
|
* int thickness = 3;
|
||||||
|
*
|
||||||
|
* Mat img(600, 800, CV_8UC3, Scalar::all(0));
|
||||||
|
*
|
||||||
|
* int baseline=0;
|
||||||
|
* Size textSize = getTextSize(text, fontFace,
|
||||||
|
* fontScale, thickness, &baseline);
|
||||||
|
* baseline += thickness;
|
||||||
|
*
|
||||||
|
* // center the text
|
||||||
|
* Point textOrg((img.cols - textSize.width)/2,
|
||||||
|
* (img.rows + textSize.height)/2);
|
||||||
|
*
|
||||||
|
* // draw the box
|
||||||
|
* rectangle(img, textOrg + Point(0, baseline),
|
||||||
|
* textOrg + Point(textSize.width, -textSize.height),
|
||||||
|
* Scalar(0,0,255));
|
||||||
|
* // ... and the baseline first
|
||||||
|
* line(img, textOrg + Point(0, thickness),
|
||||||
|
* textOrg + Point(textSize.width, thickness),
|
||||||
|
* Scalar(0, 0, 255));
|
||||||
|
*
|
||||||
|
* // then put the text itself
|
||||||
|
* putText(img, text, textOrg, fontFace, fontScale,
|
||||||
|
* Scalar::all(255), thickness, 8);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* The size of a box that contains the specified text.
|
||||||
|
*
|
||||||
|
* [putText]
|
||||||
|
*
|
||||||
|
* @param text Input text string.
|
||||||
|
*
|
||||||
|
* @param fontFace Font to use, see HersheyFonts.
|
||||||
|
*
|
||||||
|
* @param fontScale Font scale factor that is multiplied by the font-specific base size.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of lines used to render the text. See putText for details.
|
||||||
|
*
|
||||||
|
* @param baseLine y-coordinate of the baseline relative to the bottom-most text point.
|
||||||
|
*/
|
||||||
|
export declare function getTextSize(
|
||||||
|
text: any,
|
||||||
|
fontFace: int,
|
||||||
|
fontScale: double,
|
||||||
|
thickness: int,
|
||||||
|
baseLine: any,
|
||||||
|
): Size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function line draws the line segment between pt1 and pt2 points in the image. The line is
|
||||||
|
* clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
|
||||||
|
* or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
|
||||||
|
* lines are drawn using Gaussian filtering.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param pt1 First point of the line segment.
|
||||||
|
*
|
||||||
|
* @param pt2 Second point of the line segment.
|
||||||
|
*
|
||||||
|
* @param color Line color.
|
||||||
|
*
|
||||||
|
* @param thickness Line thickness.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the line. See LineTypes.
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the point coordinates.
|
||||||
|
*/
|
||||||
|
export declare function line(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pt1: Point,
|
||||||
|
pt2: Point,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function polylines(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pts: any,
|
||||||
|
npts: any,
|
||||||
|
ncontours: int,
|
||||||
|
isClosed: bool,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::polylines] draws one or more polygonal curves.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param pts Array of polygonal curves.
|
||||||
|
*
|
||||||
|
* @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
|
||||||
|
* the function draws a line from the last vertex of each curve to its first vertex.
|
||||||
|
*
|
||||||
|
* @param color Polyline color.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of the polyline edges.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the line segments. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the vertex coordinates.
|
||||||
|
*/
|
||||||
|
export declare function polylines(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pts: InputArrayOfArrays,
|
||||||
|
isClosed: bool,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::putText] renders the specified text string in the image. Symbols that cannot be
|
||||||
|
* rendered using the specified font are replaced by question marks. See [getTextSize] for a text
|
||||||
|
* rendering code example.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param text Text string to be drawn.
|
||||||
|
*
|
||||||
|
* @param org Bottom-left corner of the text string in the image.
|
||||||
|
*
|
||||||
|
* @param fontFace Font type, see HersheyFonts.
|
||||||
|
*
|
||||||
|
* @param fontScale Font scale factor that is multiplied by the font-specific base size.
|
||||||
|
*
|
||||||
|
* @param color Text color.
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of the lines used to draw a text.
|
||||||
|
*
|
||||||
|
* @param lineType Line type. See LineTypes
|
||||||
|
*
|
||||||
|
* @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise, it
|
||||||
|
* is at the top-left corner.
|
||||||
|
*/
|
||||||
|
export declare function putText(
|
||||||
|
img: InputOutputArray,
|
||||||
|
text: any,
|
||||||
|
org: Point,
|
||||||
|
fontFace: int,
|
||||||
|
fontScale: double,
|
||||||
|
color: Scalar,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
bottomLeftOrigin?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::rectangle] draws a rectangle outline or a filled rectangle whose two opposite
|
||||||
|
* corners are pt1 and pt2.
|
||||||
|
*
|
||||||
|
* @param img Image.
|
||||||
|
*
|
||||||
|
* @param pt1 Vertex of the rectangle.
|
||||||
|
*
|
||||||
|
* @param pt2 Vertex of the rectangle opposite to pt1 .
|
||||||
|
*
|
||||||
|
* @param color Rectangle color or brightness (grayscale image).
|
||||||
|
*
|
||||||
|
* @param thickness Thickness of lines that make up the rectangle. Negative values, like FILLED, mean
|
||||||
|
* that the function has to draw a filled rectangle.
|
||||||
|
*
|
||||||
|
* @param lineType Type of the line. See LineTypes
|
||||||
|
*
|
||||||
|
* @param shift Number of fractional bits in the point coordinates.
|
||||||
|
*/
|
||||||
|
export declare function rectangle(
|
||||||
|
img: InputOutputArray,
|
||||||
|
pt1: Point,
|
||||||
|
pt2: Point,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
|
||||||
|
* r.br()-Point(1,1)` are opposite corners
|
||||||
|
*/
|
||||||
|
export declare function rectangle(
|
||||||
|
img: InputOutputArray,
|
||||||
|
rec: Rect,
|
||||||
|
color: any,
|
||||||
|
thickness?: int,
|
||||||
|
lineType?: int,
|
||||||
|
shift?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_SIMPLEX: HersheyFonts; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_PLAIN: HersheyFonts; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_DUPLEX: HersheyFonts; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_COMPLEX: HersheyFonts; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_TRIPLEX: HersheyFonts; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_COMPLEX_SMALL: HersheyFonts; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_SCRIPT_SIMPLEX: HersheyFonts; // initializer: = 6
|
||||||
|
|
||||||
|
export declare const FONT_HERSHEY_SCRIPT_COMPLEX: HersheyFonts; // initializer: = 7
|
||||||
|
|
||||||
|
export declare const FONT_ITALIC: HersheyFonts; // initializer: = 16
|
||||||
|
|
||||||
|
export declare const FILLED: LineTypes; // initializer: = -1
|
||||||
|
|
||||||
|
export declare const LINE_4: LineTypes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const LINE_8: LineTypes; // initializer: = 8
|
||||||
|
|
||||||
|
export declare const LINE_AA: LineTypes; // initializer: = 16
|
||||||
|
|
||||||
|
export declare const MARKER_CROSS: MarkerTypes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const MARKER_TILTED_CROSS: MarkerTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const MARKER_STAR: MarkerTypes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const MARKER_DIAMOND: MarkerTypes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const MARKER_SQUARE: MarkerTypes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const MARKER_TRIANGLE_UP: MarkerTypes; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const MARKER_TRIANGLE_DOWN: MarkerTypes; // initializer: = 6
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Only a subset of Hershey fonts are supported
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type HersheyFonts = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Only a subset of Hershey fonts are supported
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type LineTypes = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Only a subset of Hershey fonts are supported
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type MarkerTypes = any;
|
||||||
681
opencv-js-4.10.0/src/types/opencv/imgproc_feature.ts
Normal file
681
opencv-js-4.10.0/src/types/opencv/imgproc_feature.ts
Normal file
|
|
@ -0,0 +1,681 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
OutputArray,
|
||||||
|
Size,
|
||||||
|
TermCriteria,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Feature Detection
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function finds edges in the input image and marks them in the output map edges using the Canny
|
||||||
|
* algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
|
||||||
|
* largest value is used to find initial segments of strong edges. See
|
||||||
|
*
|
||||||
|
* @param image 8-bit input image.
|
||||||
|
*
|
||||||
|
* @param edges output edge map; single channels 8-bit image, which has the same size as image .
|
||||||
|
*
|
||||||
|
* @param threshold1 first threshold for the hysteresis procedure.
|
||||||
|
*
|
||||||
|
* @param threshold2 second threshold for the hysteresis procedure.
|
||||||
|
*
|
||||||
|
* @param apertureSize aperture size for the Sobel operator.
|
||||||
|
*
|
||||||
|
* @param L2gradient a flag, indicating whether a more accurate $L_2$ norm $=\sqrt{(dI/dx)^2 +
|
||||||
|
* (dI/dy)^2}$ should be used to calculate the image gradient magnitude ( L2gradient=true ), or whether
|
||||||
|
* the default $L_1$ norm $=|dI/dx|+|dI/dy|$ is enough ( L2gradient=false ).
|
||||||
|
*/
|
||||||
|
export declare function Canny(
|
||||||
|
image: InputArray,
|
||||||
|
edges: OutputArray,
|
||||||
|
threshold1: double,
|
||||||
|
threshold2: double,
|
||||||
|
apertureSize?: int,
|
||||||
|
L2gradient?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* Finds edges in an image using the Canny algorithm with custom image gradient.
|
||||||
|
*
|
||||||
|
* @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
|
||||||
|
*
|
||||||
|
* @param dy 16-bit y derivative of input image (same type as dx).
|
||||||
|
*
|
||||||
|
* @param edges output edge map; single channels 8-bit image, which has the same size as image .
|
||||||
|
*
|
||||||
|
* @param threshold1 first threshold for the hysteresis procedure.
|
||||||
|
*
|
||||||
|
* @param threshold2 second threshold for the hysteresis procedure.
|
||||||
|
*
|
||||||
|
* @param L2gradient a flag, indicating whether a more accurate $L_2$ norm $=\sqrt{(dI/dx)^2 +
|
||||||
|
* (dI/dy)^2}$ should be used to calculate the image gradient magnitude ( L2gradient=true ), or whether
|
||||||
|
* the default $L_1$ norm $=|dI/dx|+|dI/dy|$ is enough ( L2gradient=false ).
|
||||||
|
*/
|
||||||
|
export declare function Canny(
|
||||||
|
dx: InputArray,
|
||||||
|
dy: InputArray,
|
||||||
|
edges: OutputArray,
|
||||||
|
threshold1: double,
|
||||||
|
threshold2: double,
|
||||||
|
L2gradient?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For every pixel `$p$` , the function cornerEigenValsAndVecs considers a blockSize `$\\times$`
|
||||||
|
* blockSize neighborhood `$S(p)$` . It calculates the covariation matrix of derivatives over the
|
||||||
|
* neighborhood as:
|
||||||
|
*
|
||||||
|
* `\\[M = \\begin{bmatrix} \\sum _{S(p)}(dI/dx)^2 & \\sum _{S(p)}dI/dx dI/dy \\\\ \\sum _{S(p)}dI/dx
|
||||||
|
* dI/dy & \\sum _{S(p)}(dI/dy)^2 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* where the derivatives are computed using the Sobel operator.
|
||||||
|
*
|
||||||
|
* After that, it finds eigenvectors and eigenvalues of `$M$` and stores them in the destination image
|
||||||
|
* as `$(\\lambda_1, \\lambda_2, x_1, y_1, x_2, y_2)$` where
|
||||||
|
*
|
||||||
|
* `$\\lambda_1, \\lambda_2$` are the non-sorted eigenvalues of `$M$`
|
||||||
|
* `$x_1, y_1$` are the eigenvectors corresponding to `$\\lambda_1$`
|
||||||
|
* `$x_2, y_2$` are the eigenvectors corresponding to `$\\lambda_2$`
|
||||||
|
*
|
||||||
|
* The output of the function can be used for robust edge or corner detection.
|
||||||
|
*
|
||||||
|
* [cornerMinEigenVal], [cornerHarris], [preCornerDetect]
|
||||||
|
*
|
||||||
|
* @param src Input single-channel 8-bit or floating-point image.
|
||||||
|
*
|
||||||
|
* @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
|
||||||
|
*
|
||||||
|
* @param blockSize Neighborhood size (see details below).
|
||||||
|
*
|
||||||
|
* @param ksize Aperture parameter for the Sobel operator.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method. See BorderTypes.
|
||||||
|
*/
|
||||||
|
export declare function cornerEigenValsAndVecs(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
blockSize: int,
|
||||||
|
ksize: int,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
|
||||||
|
* cornerEigenValsAndVecs , for each pixel `$(x, y)$` it calculates a `$2\\times2$` gradient covariance
|
||||||
|
* matrix `$M^{(x,y)}$` over a `$\\texttt{blockSize} \\times \\texttt{blockSize}$` neighborhood. Then,
|
||||||
|
* it computes the following characteristic:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\mathrm{det} M^{(x,y)} - k \\cdot \\left ( \\mathrm{tr} M^{(x,y)} \\right
|
||||||
|
* )^2\\]`
|
||||||
|
*
|
||||||
|
* Corners in the image can be found as the local maxima of this response map.
|
||||||
|
*
|
||||||
|
* @param src Input single-channel 8-bit or floating-point image.
|
||||||
|
*
|
||||||
|
* @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same size
|
||||||
|
* as src .
|
||||||
|
*
|
||||||
|
* @param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).
|
||||||
|
*
|
||||||
|
* @param ksize Aperture parameter for the Sobel operator.
|
||||||
|
*
|
||||||
|
* @param k Harris detector free parameter. See the formula above.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method. See BorderTypes.
|
||||||
|
*/
|
||||||
|
export declare function cornerHarris(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
blockSize: int,
|
||||||
|
ksize: int,
|
||||||
|
k: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
|
||||||
|
* eigenvalue of the covariance matrix of derivatives, that is, `$\\min(\\lambda_1, \\lambda_2)$` in
|
||||||
|
* terms of the formulae in the cornerEigenValsAndVecs description.
|
||||||
|
*
|
||||||
|
* @param src Input single-channel 8-bit or floating-point image.
|
||||||
|
*
|
||||||
|
* @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as src
|
||||||
|
* .
|
||||||
|
*
|
||||||
|
* @param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).
|
||||||
|
*
|
||||||
|
* @param ksize Aperture parameter for the Sobel operator.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method. See BorderTypes.
|
||||||
|
*/
|
||||||
|
export declare function cornerMinEigenVal(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
blockSize: int,
|
||||||
|
ksize?: int,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function iterates to find the sub-pixel accurate location of corners or radial saddle points, as
|
||||||
|
* shown on the figure below.
|
||||||
|
*
|
||||||
|
* Sub-pixel accurate corner locator is based on the observation that every vector from the center
|
||||||
|
* `$q$` to a point `$p$` located within a neighborhood of `$q$` is orthogonal to the image gradient at
|
||||||
|
* `$p$` subject to image and measurement noise. Consider the expression:
|
||||||
|
*
|
||||||
|
* `\\[\\epsilon _i = {DI_{p_i}}^T \\cdot (q - p_i)\\]`
|
||||||
|
*
|
||||||
|
* where `${DI_{p_i}}$` is an image gradient at one of the points `$p_i$` in a neighborhood of `$q$` .
|
||||||
|
* The value of `$q$` is to be found so that `$\\epsilon_i$` is minimized. A system of equations may be
|
||||||
|
* set up with `$\\epsilon_i$` set to zero:
|
||||||
|
*
|
||||||
|
* `\\[\\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T) \\cdot q - \\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T \\cdot
|
||||||
|
* p_i)\\]`
|
||||||
|
*
|
||||||
|
* where the gradients are summed within a neighborhood ("search window") of `$q$` . Calling the first
|
||||||
|
* gradient term `$G$` and the second gradient term `$b$` gives:
|
||||||
|
*
|
||||||
|
* `\\[q = G^{-1} \\cdot b\\]`
|
||||||
|
*
|
||||||
|
* The algorithm sets the center of the neighborhood window at this new center `$q$` and then iterates
|
||||||
|
* until the center stays within a set threshold.
|
||||||
|
*
|
||||||
|
* @param image Input single-channel, 8-bit or float image.
|
||||||
|
*
|
||||||
|
* @param corners Initial coordinates of the input corners and refined coordinates provided for output.
|
||||||
|
*
|
||||||
|
* @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,
|
||||||
|
* then a $(5*2+1) \times (5*2+1) = 11 \times 11$ search window is used.
|
||||||
|
*
|
||||||
|
* @param zeroZone Half of the size of the dead region in the middle of the search zone over which the
|
||||||
|
* summation in the formula below is not done. It is used sometimes to avoid possible singularities of
|
||||||
|
* the autocorrelation matrix. The value of (-1,-1) indicates that there is no such a size.
|
||||||
|
*
|
||||||
|
* @param criteria Criteria for termination of the iterative process of corner refinement. That is, the
|
||||||
|
* process of corner position refinement stops either after criteria.maxCount iterations or when the
|
||||||
|
* corner position moves by less than criteria.epsilon on some iteration.
|
||||||
|
*/
|
||||||
|
export declare function cornerSubPix(
|
||||||
|
image: InputArray,
|
||||||
|
corners: InputOutputArray,
|
||||||
|
winSize: Size,
|
||||||
|
zeroZone: Size,
|
||||||
|
criteria: TermCriteria,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The [LineSegmentDetector] algorithm is defined using the standard values. Only advanced users may
|
||||||
|
* want to edit those, as to tailor it for their own application.
|
||||||
|
*
|
||||||
|
* Implementation has been removed due original code license conflict
|
||||||
|
*
|
||||||
|
* @param _refine The way found lines will be refined, see LineSegmentDetectorModes
|
||||||
|
*
|
||||||
|
* @param _scale The scale of the image that will be used to find the lines. Range (0..1].
|
||||||
|
*
|
||||||
|
* @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
|
||||||
|
*
|
||||||
|
* @param _quant Bound to the quantization error on the gradient norm.
|
||||||
|
*
|
||||||
|
* @param _ang_th Gradient angle tolerance in degrees.
|
||||||
|
*
|
||||||
|
* @param _log_eps Detection threshold: -log10(NFA) > log_eps. Used only when advance refinement is
|
||||||
|
* chosen.
|
||||||
|
*
|
||||||
|
* @param _density_th Minimal density of aligned region points in the enclosing rectangle.
|
||||||
|
*
|
||||||
|
* @param _n_bins Number of bins in pseudo-ordering of gradient modulus.
|
||||||
|
*/
|
||||||
|
export declare function createLineSegmentDetector(
|
||||||
|
_refine?: int,
|
||||||
|
_scale?: double,
|
||||||
|
_sigma_scale?: double,
|
||||||
|
_quant?: double,
|
||||||
|
_ang_th?: double,
|
||||||
|
_log_eps?: double,
|
||||||
|
_density_th?: double,
|
||||||
|
_n_bins?: int,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds the most prominent corners in the image or in the specified image region, as
|
||||||
|
* described in Shi94
|
||||||
|
*
|
||||||
|
* Function calculates the corner quality measure at every source image pixel using the
|
||||||
|
* [cornerMinEigenVal] or [cornerHarris] .
|
||||||
|
* Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
|
||||||
|
* retained).
|
||||||
|
* The corners with the minimal eigenvalue less than `$\\texttt{qualityLevel} \\cdot \\max_{x,y}
|
||||||
|
* qualityMeasureMap(x,y)$` are rejected.
|
||||||
|
* The remaining corners are sorted by the quality measure in the descending order.
|
||||||
|
* Function throws away each corner for which there is a stronger corner at a distance less than
|
||||||
|
* maxDistance.
|
||||||
|
*
|
||||||
|
* The function can be used to initialize a point-based tracker of an object.
|
||||||
|
*
|
||||||
|
* If the function is called with different values A and B of the parameter qualityLevel , and A > B,
|
||||||
|
* the vector of returned corners with qualityLevel=A will be the prefix of the output vector with
|
||||||
|
* qualityLevel=B .
|
||||||
|
*
|
||||||
|
* [cornerMinEigenVal], [cornerHarris], [calcOpticalFlowPyrLK], [estimateRigidTransform],
|
||||||
|
*
|
||||||
|
* @param image Input 8-bit or floating-point 32-bit, single-channel image.
|
||||||
|
*
|
||||||
|
* @param corners Output vector of detected corners.
|
||||||
|
*
|
||||||
|
* @param maxCorners Maximum number of corners to return. If there are more corners than are found, the
|
||||||
|
* strongest of them is returned. maxCorners <= 0 implies that no limit on the maximum is set and all
|
||||||
|
* detected corners are returned.
|
||||||
|
*
|
||||||
|
* @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
|
||||||
|
* parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
|
||||||
|
* (see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the
|
||||||
|
* quality measure less than the product are rejected. For example, if the best corner has the quality
|
||||||
|
* measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than
|
||||||
|
* 15 are rejected.
|
||||||
|
*
|
||||||
|
* @param minDistance Minimum possible Euclidean distance between the returned corners.
|
||||||
|
*
|
||||||
|
* @param mask Optional region of interest. If the image is not empty (it needs to have the type
|
||||||
|
* CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
|
||||||
|
*
|
||||||
|
* @param blockSize Size of an average block for computing a derivative covariation matrix over each
|
||||||
|
* pixel neighborhood. See cornerEigenValsAndVecs .
|
||||||
|
*
|
||||||
|
* @param useHarrisDetector Parameter indicating whether to use a Harris detector (see cornerHarris) or
|
||||||
|
* cornerMinEigenVal.
|
||||||
|
*
|
||||||
|
* @param k Free parameter of the Harris detector.
|
||||||
|
*/
|
||||||
|
export declare function goodFeaturesToTrack(
|
||||||
|
image: InputArray,
|
||||||
|
corners: OutputArray,
|
||||||
|
maxCorners: int,
|
||||||
|
qualityLevel: double,
|
||||||
|
minDistance: double,
|
||||||
|
mask?: InputArray,
|
||||||
|
blockSize?: int,
|
||||||
|
useHarrisDetector?: bool,
|
||||||
|
k?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function goodFeaturesToTrack(
|
||||||
|
image: InputArray,
|
||||||
|
corners: OutputArray,
|
||||||
|
maxCorners: int,
|
||||||
|
qualityLevel: double,
|
||||||
|
minDistance: double,
|
||||||
|
mask: InputArray,
|
||||||
|
blockSize: int,
|
||||||
|
gradientSize: int,
|
||||||
|
useHarrisDetector?: bool,
|
||||||
|
k?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds circles in a grayscale image using a modification of the Hough transform.
|
||||||
|
*
|
||||||
|
* Example: :
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #include <opencv2/imgproc.hpp>
|
||||||
|
* #include <opencv2/highgui.hpp>
|
||||||
|
* #include <math.h>
|
||||||
|
*
|
||||||
|
* using namespace cv;
|
||||||
|
* using namespace std;
|
||||||
|
*
|
||||||
|
* int main(int argc, char** argv)
|
||||||
|
* {
|
||||||
|
* Mat img, gray;
|
||||||
|
* if( argc != 2 || !(img=imread(argv[1], 1)).data)
|
||||||
|
* return -1;
|
||||||
|
* cvtColor(img, gray, COLOR_BGR2GRAY);
|
||||||
|
* // smooth it, otherwise a lot of false circles may be detected
|
||||||
|
* GaussianBlur( gray, gray, Size(9, 9), 2, 2 );
|
||||||
|
* vector<Vec3f> circles;
|
||||||
|
* HoughCircles(gray, circles, HOUGH_GRADIENT,
|
||||||
|
* 2, gray.rows/4, 200, 100 );
|
||||||
|
* for( size_t i = 0; i < circles.size(); i++ )
|
||||||
|
* {
|
||||||
|
* Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
|
||||||
|
* int radius = cvRound(circles[i][2]);
|
||||||
|
* // draw the circle center
|
||||||
|
* circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 );
|
||||||
|
* // draw the circle outline
|
||||||
|
* circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 );
|
||||||
|
* }
|
||||||
|
* namedWindow( "circles", 1 );
|
||||||
|
* imshow( "circles", img );
|
||||||
|
*
|
||||||
|
* waitKey(0);
|
||||||
|
* return 0;
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* Usually the function detects the centers of circles well. However, it may fail to find correct
|
||||||
|
* radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
|
||||||
|
* you know it. Or, you may set maxRadius to a negative number to return centers only without radius
|
||||||
|
* search, and find the correct radius using an additional procedure.
|
||||||
|
*
|
||||||
|
* [fitEllipse], [minEnclosingCircle]
|
||||||
|
*
|
||||||
|
* @param image 8-bit, single-channel, grayscale input image.
|
||||||
|
*
|
||||||
|
* @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
|
||||||
|
* floating-point vector $(x, y, radius)$ or $(x, y, radius, votes)$ .
|
||||||
|
*
|
||||||
|
* @param method Detection method, see HoughModes. Currently, the only implemented method is
|
||||||
|
* HOUGH_GRADIENT
|
||||||
|
*
|
||||||
|
* @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1
|
||||||
|
* , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as
|
||||||
|
* big width and height.
|
||||||
|
*
|
||||||
|
* @param minDist Minimum distance between the centers of the detected circles. If the parameter is too
|
||||||
|
* small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too
|
||||||
|
* large, some circles may be missed.
|
||||||
|
*
|
||||||
|
* @param param1 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher
|
||||||
|
* threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
|
||||||
|
*
|
||||||
|
* @param param2 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator
|
||||||
|
* threshold for the circle centers at the detection stage. The smaller it is, the more false circles
|
||||||
|
* may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
|
||||||
|
*
|
||||||
|
* @param minRadius Minimum circle radius.
|
||||||
|
*
|
||||||
|
* @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns
|
||||||
|
* centers without finding the radius.
|
||||||
|
*/
|
||||||
|
export declare function HoughCircles(
|
||||||
|
image: InputArray,
|
||||||
|
circles: OutputArray,
|
||||||
|
method: int,
|
||||||
|
dp: double,
|
||||||
|
minDist: double,
|
||||||
|
param1?: double,
|
||||||
|
param2?: double,
|
||||||
|
minRadius?: int,
|
||||||
|
maxRadius?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements the standard or standard multi-scale Hough transform algorithm for line
|
||||||
|
* detection. See for a good explanation of Hough transform.
|
||||||
|
*
|
||||||
|
* @param image 8-bit, single-channel binary source image. The image may be modified by the function.
|
||||||
|
*
|
||||||
|
* @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector $(\rho,
|
||||||
|
* \theta)$ or $(\rho, \theta, \textrm{votes})$ . $\rho$ is the distance from the coordinate origin
|
||||||
|
* $(0,0)$ (top-left corner of the image). $\theta$ is the line rotation angle in radians ( $0 \sim
|
||||||
|
* \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$ ). $\textrm{votes}$ is the value of
|
||||||
|
* accumulator.
|
||||||
|
*
|
||||||
|
* @param rho Distance resolution of the accumulator in pixels.
|
||||||
|
*
|
||||||
|
* @param theta Angle resolution of the accumulator in radians.
|
||||||
|
*
|
||||||
|
* @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
|
||||||
|
* votes ( $>\texttt{threshold}$ ).
|
||||||
|
*
|
||||||
|
* @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
|
||||||
|
* The coarse accumulator distance resolution is rho and the accurate accumulator resolution is rho/srn
|
||||||
|
* . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these parameters
|
||||||
|
* should be positive.
|
||||||
|
*
|
||||||
|
* @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
|
||||||
|
*
|
||||||
|
* @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
|
||||||
|
* Must fall between 0 and max_theta.
|
||||||
|
*
|
||||||
|
* @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
|
||||||
|
* Must fall between min_theta and CV_PI.
|
||||||
|
*/
|
||||||
|
export declare function HoughLines(
|
||||||
|
image: InputArray,
|
||||||
|
lines: OutputArray,
|
||||||
|
rho: double,
|
||||||
|
theta: double,
|
||||||
|
threshold: int,
|
||||||
|
srn?: double,
|
||||||
|
stn?: double,
|
||||||
|
min_theta?: double,
|
||||||
|
max_theta?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements the probabilistic Hough transform algorithm for line detection, described in
|
||||||
|
* Matas00
|
||||||
|
*
|
||||||
|
* See the line detection example below:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #include <opencv2/imgproc.hpp>
|
||||||
|
* #include <opencv2/highgui.hpp>
|
||||||
|
*
|
||||||
|
* using namespace cv;
|
||||||
|
* using namespace std;
|
||||||
|
*
|
||||||
|
* int main(int argc, char** argv)
|
||||||
|
* {
|
||||||
|
* Mat src, dst, color_dst;
|
||||||
|
* if( argc != 2 || !(src=imread(argv[1], 0)).data)
|
||||||
|
* return -1;
|
||||||
|
*
|
||||||
|
* Canny( src, dst, 50, 200, 3 );
|
||||||
|
* cvtColor( dst, color_dst, COLOR_GRAY2BGR );
|
||||||
|
*
|
||||||
|
* vector<Vec4i> lines;
|
||||||
|
* HoughLinesP( dst, lines, 1, CV_PI/180, 80, 30, 10 );
|
||||||
|
* for( size_t i = 0; i < lines.size(); i++ )
|
||||||
|
* {
|
||||||
|
* line( color_dst, Point(lines[i][0], lines[i][1]),
|
||||||
|
* Point( lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8 );
|
||||||
|
* }
|
||||||
|
* namedWindow( "Source", 1 );
|
||||||
|
* imshow( "Source", src );
|
||||||
|
*
|
||||||
|
* namedWindow( "Detected Lines", 1 );
|
||||||
|
* imshow( "Detected Lines", color_dst );
|
||||||
|
*
|
||||||
|
* waitKey(0);
|
||||||
|
* return 0;
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* This is a sample picture the function parameters have been tuned for:
|
||||||
|
*
|
||||||
|
* And this is the output of the above program in case of the probabilistic Hough transform:
|
||||||
|
*
|
||||||
|
* [LineSegmentDetector]
|
||||||
|
*
|
||||||
|
* @param image 8-bit, single-channel binary source image. The image may be modified by the function.
|
||||||
|
*
|
||||||
|
* @param lines Output vector of lines. Each line is represented by a 4-element vector $(x_1, y_1, x_2,
|
||||||
|
* y_2)$ , where $(x_1,y_1)$ and $(x_2, y_2)$ are the ending points of each detected line segment.
|
||||||
|
*
|
||||||
|
* @param rho Distance resolution of the accumulator in pixels.
|
||||||
|
*
|
||||||
|
* @param theta Angle resolution of the accumulator in radians.
|
||||||
|
*
|
||||||
|
* @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
|
||||||
|
* votes ( $>\texttt{threshold}$ ).
|
||||||
|
*
|
||||||
|
* @param minLineLength Minimum line length. Line segments shorter than that are rejected.
|
||||||
|
*
|
||||||
|
* @param maxLineGap Maximum allowed gap between points on the same line to link them.
|
||||||
|
*/
|
||||||
|
export declare function HoughLinesP(
|
||||||
|
image: InputArray,
|
||||||
|
lines: OutputArray,
|
||||||
|
rho: double,
|
||||||
|
theta: double,
|
||||||
|
threshold: int,
|
||||||
|
minLineLength?: double,
|
||||||
|
maxLineGap?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds lines in a set of points using a modification of the Hough transform.
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #include <opencv2/core.hpp>
|
||||||
|
* #include <opencv2/imgproc.hpp>
|
||||||
|
*
|
||||||
|
* using namespace cv;
|
||||||
|
* using namespace std;
|
||||||
|
*
|
||||||
|
* int main()
|
||||||
|
* {
|
||||||
|
* Mat lines;
|
||||||
|
* vector<Vec3d> line3d;
|
||||||
|
* vector<Point2f> point;
|
||||||
|
* const static float Points[20][2] = {
|
||||||
|
* { 0.0f, 369.0f }, { 10.0f, 364.0f }, { 20.0f, 358.0f }, { 30.0f, 352.0f },
|
||||||
|
* { 40.0f, 346.0f }, { 50.0f, 341.0f }, { 60.0f, 335.0f }, { 70.0f, 329.0f },
|
||||||
|
* { 80.0f, 323.0f }, { 90.0f, 318.0f }, { 100.0f, 312.0f }, { 110.0f, 306.0f },
|
||||||
|
* { 120.0f, 300.0f }, { 130.0f, 295.0f }, { 140.0f, 289.0f }, { 150.0f, 284.0f },
|
||||||
|
* { 160.0f, 277.0f }, { 170.0f, 271.0f }, { 180.0f, 266.0f }, { 190.0f, 260.0f }
|
||||||
|
* };
|
||||||
|
*
|
||||||
|
* for (int i = 0; i < 20; i++)
|
||||||
|
* {
|
||||||
|
* point.push_back(Point2f(Points[i][0],Points[i][1]));
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* double rhoMin = 0.0f, rhoMax = 360.0f, rhoStep = 1;
|
||||||
|
* double thetaMin = 0.0f, thetaMax = CV_PI / 2.0f, thetaStep = CV_PI / 180.0f;
|
||||||
|
*
|
||||||
|
* HoughLinesPointSet(point, lines, 20, 1,
|
||||||
|
* rhoMin, rhoMax, rhoStep,
|
||||||
|
* thetaMin, thetaMax, thetaStep);
|
||||||
|
*
|
||||||
|
* lines.copyTo(line3d);
|
||||||
|
* printf("votes:%d, rho:%.7f, theta:%.7f\\n",(int)line3d.at(0).val[0], line3d.at(0).val[1],
|
||||||
|
* line3d.at(0).val[2]);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param _point Input vector of points. Each vector must be encoded as a Point vector $(x,y)$. Type
|
||||||
|
* must be CV_32FC2 or CV_32SC2.
|
||||||
|
*
|
||||||
|
* @param _lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> $(votes, rho,
|
||||||
|
* theta)$. The larger the value of 'votes', the higher the reliability of the Hough line.
|
||||||
|
*
|
||||||
|
* @param lines_max Max count of hough lines.
|
||||||
|
*
|
||||||
|
* @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
|
||||||
|
* votes ( $>\texttt{threshold}$ )
|
||||||
|
*
|
||||||
|
* @param min_rho Minimum Distance value of the accumulator in pixels.
|
||||||
|
*
|
||||||
|
* @param max_rho Maximum Distance value of the accumulator in pixels.
|
||||||
|
*
|
||||||
|
* @param rho_step Distance resolution of the accumulator in pixels.
|
||||||
|
*
|
||||||
|
* @param min_theta Minimum angle value of the accumulator in radians.
|
||||||
|
*
|
||||||
|
* @param max_theta Maximum angle value of the accumulator in radians.
|
||||||
|
*
|
||||||
|
* @param theta_step Angle resolution of the accumulator in radians.
|
||||||
|
*/
|
||||||
|
export declare function HoughLinesPointSet(
|
||||||
|
_point: InputArray,
|
||||||
|
_lines: OutputArray,
|
||||||
|
lines_max: int,
|
||||||
|
threshold: int,
|
||||||
|
min_rho: double,
|
||||||
|
max_rho: double,
|
||||||
|
rho_step: double,
|
||||||
|
min_theta: double,
|
||||||
|
max_theta: double,
|
||||||
|
theta_step: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the complex spatial derivative-based function of the source image
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} = (D_x \\texttt{src} )^2 \\cdot D_{yy} \\texttt{src} + (D_y \\texttt{src} )^2
|
||||||
|
* \\cdot D_{xx} \\texttt{src} - 2 D_x \\texttt{src} \\cdot D_y \\texttt{src} \\cdot D_{xy}
|
||||||
|
* \\texttt{src}\\]`
|
||||||
|
*
|
||||||
|
* where `$D_x$`, `$D_y$` are the first image derivatives, `$D_{xx}$`, `$D_{yy}$` are the second image
|
||||||
|
* derivatives, and `$D_{xy}$` is the mixed derivative.
|
||||||
|
*
|
||||||
|
* The corners can be found as local maximums of the functions, as shown below:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Mat corners, dilated_corners;
|
||||||
|
* preCornerDetect(image, corners, 3);
|
||||||
|
* // dilation with 3x3 rectangular structuring element
|
||||||
|
* dilate(corners, dilated_corners, Mat(), 1);
|
||||||
|
* Mat corner_mask = corners == dilated_corners;
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param src Source single-channel 8-bit of floating-point image.
|
||||||
|
*
|
||||||
|
* @param dst Output image that has the type CV_32F and the same size as src .
|
||||||
|
*
|
||||||
|
* @param ksize Aperture size of the Sobel .
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method. See BorderTypes.
|
||||||
|
*/
|
||||||
|
export declare function preCornerDetect(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ksize: int,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* classical or standard Hough transform. Every line is represented by two floating-point numbers
|
||||||
|
* `$(\\rho, \\theta)$` , where `$\\rho$` is a distance between (0,0) point and the line, and
|
||||||
|
* `$\\theta$` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the
|
||||||
|
* created sequence will be) of CV_32FC2 type
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HOUGH_STANDARD: HoughModes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* probabilistic Hough transform (more efficient in case if the picture contains a few long linear
|
||||||
|
* segments). It returns line segments rather than the whole line. Each segment is represented by
|
||||||
|
* starting and ending points, and the matrix must be (the created sequence will be) of the CV_32SC4
|
||||||
|
* type.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HOUGH_PROBABILISTIC: HoughModes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* multi-scale variant of the classical Hough transform. The lines are encoded the same way as
|
||||||
|
* HOUGH_STANDARD.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HOUGH_MULTI_SCALE: HoughModes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const HOUGH_GRADIENT: HoughModes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const LSD_REFINE_NONE: LineSegmentDetectorModes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const LSD_REFINE_STD: LineSegmentDetectorModes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Advanced refinement. Number of false alarms is calculated, lines are refined through increase of
|
||||||
|
* precision, decrement in size, etc.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const LSD_REFINE_ADV: LineSegmentDetectorModes; // initializer: = 2
|
||||||
|
|
||||||
|
export type HoughModes = any;
|
||||||
|
|
||||||
|
export type LineSegmentDetectorModes = any;
|
||||||
918
opencv-js-4.10.0/src/types/opencv/imgproc_filter.ts
Normal file
918
opencv-js-4.10.0/src/types/opencv/imgproc_filter.ts
Normal file
|
|
@ -0,0 +1,918 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
OutputArray,
|
||||||
|
OutputArrayOfArrays,
|
||||||
|
Point,
|
||||||
|
Scalar,
|
||||||
|
Size,
|
||||||
|
TermCriteria,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Image Filtering
|
||||||
|
* Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images (represented as [Mat]'s). It means that for each pixel location `$(x,y)$` in the source image (normally, rectangular), its neighborhood is considered and used to compute the response. In case of a linear filter, it is a weighted sum of pixel values. In case of morphological operations, it is the minimum or maximum values, and so on. The computed response is stored in the destination image at the same location `$(x,y)$`. It means that the output image will be of the same size as the input image. Normally, the functions support multi-channel arrays, in which case every channel is processed independently. Therefore, the output image will also have the same number of channels as the input one.
|
||||||
|
*
|
||||||
|
* Another common feature of the functions and classes described in this section is that, unlike simple arithmetic functions, they need to extrapolate values of some non-existing pixels. For example, if you want to smooth an image using a Gaussian `$3 \times 3$` filter, then, when processing the left-most pixels in each row, you need pixels to the left of them, that is, outside of the image. You can let these pixels be the same as the left-most image pixels ("replicated
|
||||||
|
* border" extrapolation method), or assume that all the non-existing pixels are zeros ("constant
|
||||||
|
* border" extrapolation method), and so on. OpenCV enables you to specify the extrapolation method. For details, see [BorderTypes]
|
||||||
|
*
|
||||||
|
* <a name="d4/d86/group__imgproc__filter_1filter_depths"></a>
|
||||||
|
*
|
||||||
|
* ## Depth combinations
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* when ddepth=-1, the output image will have the same depth as the source.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function applies bilateral filtering to the input image, as described in bilateralFilter can
|
||||||
|
* reduce unwanted noise very well while keeping edges fairly sharp. However, it is very slow compared
|
||||||
|
* to most filters.
|
||||||
|
*
|
||||||
|
* Sigma values*: For simplicity, you can set the 2 sigma values to be the same. If they are small (<
|
||||||
|
* 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very
|
||||||
|
* strong effect, making the image look "cartoonish".
|
||||||
|
*
|
||||||
|
* Filter size*: Large filters (d > 5) are very slow, so it is recommended to use d=5 for real-time
|
||||||
|
* applications, and perhaps d=9 for offline applications that need heavy noise filtering.
|
||||||
|
*
|
||||||
|
* This filter does not work inplace.
|
||||||
|
*
|
||||||
|
* @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and type as src .
|
||||||
|
*
|
||||||
|
* @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
|
||||||
|
* it is computed from sigmaSpace.
|
||||||
|
*
|
||||||
|
* @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
|
||||||
|
* farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting in
|
||||||
|
* larger areas of semi-equal color.
|
||||||
|
*
|
||||||
|
* @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
|
||||||
|
* farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
|
||||||
|
* When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is proportional
|
||||||
|
* to sigmaSpace.
|
||||||
|
*
|
||||||
|
* @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function bilateralFilter(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
d: int,
|
||||||
|
sigmaColor: double,
|
||||||
|
sigmaSpace: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function smooths an image using the kernel:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{K} = \\frac{1}{\\texttt{ksize.width*ksize.height}} \\begin{bmatrix} 1 & 1 & 1 & \\cdots
|
||||||
|
* & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\
|
||||||
|
* \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst,
|
||||||
|
* src.type(), anchor, true, borderType)`.
|
||||||
|
*
|
||||||
|
* [boxFilter], [bilateralFilter], [GaussianBlur], [medianBlur]
|
||||||
|
*
|
||||||
|
* @param src input image; it can have any number of channels, which are processed independently, but
|
||||||
|
* the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param ksize blurring kernel size.
|
||||||
|
*
|
||||||
|
* @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
|
||||||
|
* center.
|
||||||
|
*
|
||||||
|
* @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function blur(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ksize: Size,
|
||||||
|
anchor?: Point,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function smooths an image using the kernel:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{K} = \\alpha \\begin{bmatrix} 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1
|
||||||
|
* & 1 \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* where
|
||||||
|
*
|
||||||
|
* `\\[\\alpha = \\fork{\\frac{1}{\\texttt{ksize.width*ksize.height}}}{when
|
||||||
|
* \\texttt{normalize=true}}{1}{otherwise}\\]`
|
||||||
|
*
|
||||||
|
* Unnormalized box filter is useful for computing various integral characteristics over each pixel
|
||||||
|
* neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
|
||||||
|
* algorithms, and so on). If you need to compute pixel sums over variable-size windows, use
|
||||||
|
* [integral].
|
||||||
|
*
|
||||||
|
* [blur], [bilateralFilter], [GaussianBlur], [medianBlur], [integral]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param ddepth the output image depth (-1 to use src.depth()).
|
||||||
|
*
|
||||||
|
* @param ksize blurring kernel size.
|
||||||
|
*
|
||||||
|
* @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
|
||||||
|
* center.
|
||||||
|
*
|
||||||
|
* @param normalize flag, specifying whether the kernel is normalized by its area or not.
|
||||||
|
*
|
||||||
|
* @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function boxFilter(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
ksize: Size,
|
||||||
|
anchor?: Point,
|
||||||
|
normalize?: bool,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function constructs a vector of images and builds the Gaussian pyramid by recursively applying
|
||||||
|
* pyrDown to the previously built pyramid layers, starting from `dst[0]==src`.
|
||||||
|
*
|
||||||
|
* @param src Source image. Check pyrDown for the list of supported types.
|
||||||
|
*
|
||||||
|
* @param dst Destination vector of maxlevel+1 images of the same type as src. dst[0] will be the same
|
||||||
|
* as src. dst[1] is the next pyramid layer, a smoothed and down-sized src, and so on.
|
||||||
|
*
|
||||||
|
* @param maxlevel 0-based index of the last (the smallest) pyramid layer. It must be non-negative.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes (BORDER_CONSTANT isn't supported)
|
||||||
|
*/
|
||||||
|
export declare function buildPyramid(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArrayOfArrays,
|
||||||
|
maxlevel: int,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function dilates the source image using the specified structuring element that determines the
|
||||||
|
* shape of a pixel neighborhood over which the maximum is taken: `\\[\\texttt{dst} (x,y) = \\max
|
||||||
|
* _{(x',y'): \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src} (x+x',y+y')\\]`
|
||||||
|
*
|
||||||
|
* The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
|
||||||
|
* case of multi-channel images, each channel is processed independently.
|
||||||
|
*
|
||||||
|
* [erode], [morphologyEx], [getStructuringElement]
|
||||||
|
*
|
||||||
|
* @param src input image; the number of channels can be arbitrary, but the depth should be one of
|
||||||
|
* CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
|
||||||
|
* structuring element is used. Kernel can be created using getStructuringElement
|
||||||
|
*
|
||||||
|
* @param anchor position of the anchor within the element; default value (-1, -1) means that the
|
||||||
|
* anchor is at the element center.
|
||||||
|
*
|
||||||
|
* @param iterations number of times dilation is applied.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*
|
||||||
|
* @param borderValue border value in case of a constant border
|
||||||
|
*/
|
||||||
|
export declare function dilate(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
kernel: InputArray,
|
||||||
|
anchor?: Point,
|
||||||
|
iterations?: int,
|
||||||
|
borderType?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function erodes the source image using the specified structuring element that determines the
|
||||||
|
* shape of a pixel neighborhood over which the minimum is taken:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\min _{(x',y'): \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src}
|
||||||
|
* (x+x',y+y')\\]`
|
||||||
|
*
|
||||||
|
* The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
|
||||||
|
* case of multi-channel images, each channel is processed independently.
|
||||||
|
*
|
||||||
|
* [dilate], [morphologyEx], [getStructuringElement]
|
||||||
|
*
|
||||||
|
* @param src input image; the number of channels can be arbitrary, but the depth should be one of
|
||||||
|
* CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param kernel structuring element used for erosion; if element=Mat(), a 3 x 3 rectangular
|
||||||
|
* structuring element is used. Kernel can be created using getStructuringElement.
|
||||||
|
*
|
||||||
|
* @param anchor position of the anchor within the element; default value (-1, -1) means that the
|
||||||
|
* anchor is at the element center.
|
||||||
|
*
|
||||||
|
* @param iterations number of times erosion is applied.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*
|
||||||
|
* @param borderValue border value in case of a constant border
|
||||||
|
*/
|
||||||
|
export declare function erode(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
kernel: InputArray,
|
||||||
|
anchor?: Point,
|
||||||
|
iterations?: int,
|
||||||
|
borderType?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function applies an arbitrary linear filter to an image. In-place operation is supported. When
|
||||||
|
* the aperture is partially outside the image, the function interpolates outlier pixel values
|
||||||
|
* according to the specified border mode.
|
||||||
|
*
|
||||||
|
* The function does actually compute correlation, not the convolution:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\sum _{ \\stackrel{0\\leq x' < \\texttt{kernel.cols},}{0\\leq y' <
|
||||||
|
* \\texttt{kernel.rows}} } \\texttt{kernel} (x',y')* \\texttt{src} (x+x'- \\texttt{anchor.x} ,y+y'-
|
||||||
|
* \\texttt{anchor.y} )\\]`
|
||||||
|
*
|
||||||
|
* That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
|
||||||
|
* the kernel using [flip] and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
|
||||||
|
* anchor.y - 1)`.
|
||||||
|
*
|
||||||
|
* The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or
|
||||||
|
* larger) and the direct algorithm for small kernels.
|
||||||
|
*
|
||||||
|
* [sepFilter2D], [dft], [matchTemplate]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and the same number of channels as src.
|
||||||
|
*
|
||||||
|
* @param ddepth desired depth of the destination image, see combinations
|
||||||
|
*
|
||||||
|
* @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
|
||||||
|
* matrix; if you want to apply different kernels to different channels, split the image into separate
|
||||||
|
* color planes using split and process them individually.
|
||||||
|
*
|
||||||
|
* @param anchor anchor of the kernel that indicates the relative position of a filtered point within
|
||||||
|
* the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor is
|
||||||
|
* at the kernel center.
|
||||||
|
*
|
||||||
|
* @param delta optional value added to the filtered pixels before storing them in dst.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function filter2D(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
kernel: InputArray,
|
||||||
|
anchor?: Point,
|
||||||
|
delta?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function convolves the source image with the specified Gaussian kernel. In-place filtering is
|
||||||
|
* supported.
|
||||||
|
*
|
||||||
|
* [sepFilter2D], [filter2D], [blur], [boxFilter], [bilateralFilter], [medianBlur]
|
||||||
|
*
|
||||||
|
* @param src input image; the image can have any number of channels, which are processed
|
||||||
|
* independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
|
||||||
|
* positive and odd. Or, they can be zero's and then they are computed from sigma.
|
||||||
|
*
|
||||||
|
* @param sigmaX Gaussian kernel standard deviation in X direction.
|
||||||
|
*
|
||||||
|
* @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
|
||||||
|
* equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
|
||||||
|
* respectively (see getGaussianKernel for details); to fully control the result regardless of possible
|
||||||
|
* future modifications of all this semantics, it is recommended to specify all of ksize, sigmaX, and
|
||||||
|
* sigmaY.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function GaussianBlur(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ksize: Size,
|
||||||
|
sigmaX: double,
|
||||||
|
sigmaY?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes and returns the filter coefficients for spatial image derivatives. When
|
||||||
|
* `ksize=FILTER_SCHARR`, the Scharr `$3 \\times 3$` kernels are generated (see [Scharr]). Otherwise,
|
||||||
|
* Sobel kernels are generated (see [Sobel]). The filters are normally passed to [sepFilter2D] or to
|
||||||
|
*
|
||||||
|
* @param kx Output matrix of row filter coefficients. It has the type ktype .
|
||||||
|
*
|
||||||
|
* @param ky Output matrix of column filter coefficients. It has the type ktype .
|
||||||
|
*
|
||||||
|
* @param dx Derivative order in respect of x.
|
||||||
|
*
|
||||||
|
* @param dy Derivative order in respect of y.
|
||||||
|
*
|
||||||
|
* @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
|
||||||
|
*
|
||||||
|
* @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
|
||||||
|
* Theoretically, the coefficients should have the denominator $=2^{ksize*2-dx-dy-2}$. If you are going
|
||||||
|
* to filter floating-point images, you are likely to use the normalized kernels. But if you compute
|
||||||
|
* derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve all the
|
||||||
|
* fractional bits, you may want to set normalize=false .
|
||||||
|
*
|
||||||
|
* @param ktype Type of filter coefficients. It can be CV_32f or CV_64F .
|
||||||
|
*/
|
||||||
|
export declare function getDerivKernels(
|
||||||
|
kx: OutputArray,
|
||||||
|
ky: OutputArray,
|
||||||
|
dx: int,
|
||||||
|
dy: int,
|
||||||
|
ksize: int,
|
||||||
|
normalize?: bool,
|
||||||
|
ktype?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For more details about gabor filter equations and parameters, see: .
|
||||||
|
*
|
||||||
|
* @param ksize Size of the filter returned.
|
||||||
|
*
|
||||||
|
* @param sigma Standard deviation of the gaussian envelope.
|
||||||
|
*
|
||||||
|
* @param theta Orientation of the normal to the parallel stripes of a Gabor function.
|
||||||
|
*
|
||||||
|
* @param lambd Wavelength of the sinusoidal factor.
|
||||||
|
*
|
||||||
|
* @param gamma Spatial aspect ratio.
|
||||||
|
*
|
||||||
|
* @param psi Phase offset.
|
||||||
|
*
|
||||||
|
* @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
|
||||||
|
*/
|
||||||
|
export declare function getGaborKernel(
|
||||||
|
ksize: Size,
|
||||||
|
sigma: double,
|
||||||
|
theta: double,
|
||||||
|
lambd: double,
|
||||||
|
gamma: double,
|
||||||
|
psi?: double,
|
||||||
|
ktype?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes and returns the `$\\texttt{ksize} \\times 1$` matrix of Gaussian filter
|
||||||
|
* coefficients:
|
||||||
|
*
|
||||||
|
* `\\[G_i= \\alpha *e^{-(i-( \\texttt{ksize} -1)/2)^2/(2* \\texttt{sigma}^2)},\\]`
|
||||||
|
*
|
||||||
|
* where `$i=0..\\texttt{ksize}-1$` and `$\\alpha$` is the scale factor chosen so that `$\\sum_i
|
||||||
|
* G_i=1$`.
|
||||||
|
*
|
||||||
|
* Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
|
||||||
|
* smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
|
||||||
|
* You may also use the higher-level GaussianBlur.
|
||||||
|
*
|
||||||
|
* [sepFilter2D], [getDerivKernels], [getStructuringElement], [GaussianBlur]
|
||||||
|
*
|
||||||
|
* @param ksize Aperture size. It should be odd ( $\texttt{ksize} \mod 2 = 1$ ) and positive.
|
||||||
|
*
|
||||||
|
* @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as sigma
|
||||||
|
* = 0.3*((ksize-1)*0.5 - 1) + 0.8.
|
||||||
|
*
|
||||||
|
* @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
|
||||||
|
*/
|
||||||
|
export declare function getGaussianKernel(
|
||||||
|
ksize: int,
|
||||||
|
sigma: double,
|
||||||
|
ktype?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function constructs and returns the structuring element that can be further passed to [erode],
|
||||||
|
* [dilate] or [morphologyEx]. But you can also construct an arbitrary binary mask yourself and use it
|
||||||
|
* as the structuring element.
|
||||||
|
*
|
||||||
|
* @param shape Element shape that could be one of MorphShapes
|
||||||
|
*
|
||||||
|
* @param ksize Size of the structuring element.
|
||||||
|
*
|
||||||
|
* @param anchor Anchor position within the element. The default value $(-1, -1)$ means that the anchor
|
||||||
|
* is at the center. Note that only the shape of a cross-shaped element depends on the anchor position.
|
||||||
|
* In other cases the anchor just regulates how much the result of the morphological operation is
|
||||||
|
* shifted.
|
||||||
|
*/
|
||||||
|
export declare function getStructuringElement(
|
||||||
|
shape: int,
|
||||||
|
ksize: Size,
|
||||||
|
anchor?: Point,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the Laplacian of the source image by adding up the second x and y
|
||||||
|
* derivatives calculated using the Sobel operator:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} = \\Delta \\texttt{src} = \\frac{\\partial^2 \\texttt{src}}{\\partial x^2} +
|
||||||
|
* \\frac{\\partial^2 \\texttt{src}}{\\partial y^2}\\]`
|
||||||
|
*
|
||||||
|
* This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
|
||||||
|
* with the following `$3 \\times 3$` aperture:
|
||||||
|
*
|
||||||
|
* `\\[\\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\\]`
|
||||||
|
*
|
||||||
|
* [Sobel], [Scharr]
|
||||||
|
*
|
||||||
|
* @param src Source image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and the same number of channels as src .
|
||||||
|
*
|
||||||
|
* @param ddepth Desired depth of the destination image.
|
||||||
|
*
|
||||||
|
* @param ksize Aperture size used to compute the second-derivative filters. See getDerivKernels for
|
||||||
|
* details. The size must be positive and odd.
|
||||||
|
*
|
||||||
|
* @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
|
||||||
|
* applied. See getDerivKernels for details.
|
||||||
|
*
|
||||||
|
* @param delta Optional delta value that is added to the results prior to storing them in dst .
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function Laplacian(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
ksize?: int,
|
||||||
|
scale?: double,
|
||||||
|
delta?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function smoothes an image using the median filter with the `$\\texttt{ksize} \\times
|
||||||
|
* \\texttt{ksize}$` aperture. Each channel of a multi-channel image is processed independently.
|
||||||
|
* In-place operation is supported.
|
||||||
|
*
|
||||||
|
* The median filter uses [BORDER_REPLICATE] internally to cope with border pixels, see [BorderTypes]
|
||||||
|
*
|
||||||
|
* [bilateralFilter], [blur], [boxFilter], [GaussianBlur]
|
||||||
|
*
|
||||||
|
* @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be CV_8U,
|
||||||
|
* CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.
|
||||||
|
*
|
||||||
|
* @param dst destination array of the same size and type as src.
|
||||||
|
*
|
||||||
|
* @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
|
||||||
|
*/
|
||||||
|
export declare function medianBlur(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ksize: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function morphologyDefaultBorderValue(): Scalar;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::morphologyEx] can perform advanced morphological transformations using an erosion
|
||||||
|
* and dilation as basic operations.
|
||||||
|
*
|
||||||
|
* Any of the operations can be done in-place. In case of multi-channel images, each channel is
|
||||||
|
* processed independently.
|
||||||
|
*
|
||||||
|
* [dilate], [erode], [getStructuringElement]
|
||||||
|
*
|
||||||
|
* The number of iterations is the number of times erosion or dilatation operation will be applied. For
|
||||||
|
* instance, an opening operation ([MORPH_OPEN]) with two iterations is equivalent to apply
|
||||||
|
* successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate).
|
||||||
|
*
|
||||||
|
* @param src Source image. The number of channels can be arbitrary. The depth should be one of CV_8U,
|
||||||
|
* CV_16U, CV_16S, CV_32F or CV_64F.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and type as source image.
|
||||||
|
*
|
||||||
|
* @param op Type of a morphological operation, see MorphTypes
|
||||||
|
*
|
||||||
|
* @param kernel Structuring element. It can be created using getStructuringElement.
|
||||||
|
*
|
||||||
|
* @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the kernel
|
||||||
|
* center.
|
||||||
|
*
|
||||||
|
* @param iterations Number of times erosion and dilation are applied.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes
|
||||||
|
*
|
||||||
|
* @param borderValue Border value in case of a constant border. The default value has a special
|
||||||
|
* meaning.
|
||||||
|
*/
|
||||||
|
export declare function morphologyEx(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
op: int | MorphTypes,
|
||||||
|
kernel: InputArray,
|
||||||
|
anchor?: Point,
|
||||||
|
iterations?: int,
|
||||||
|
borderType?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in
|
||||||
|
* any case, the following conditions should be satisfied:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{array}{l} | \\texttt{dstsize.width} *2-src.cols| \\leq 2 \\\\ | \\texttt{dstsize.height}
|
||||||
|
* *2-src.rows| \\leq 2 \\end{array}\\]`
|
||||||
|
*
|
||||||
|
* The function performs the downsampling step of the Gaussian pyramid construction. First, it
|
||||||
|
* convolves the source image with the kernel:
|
||||||
|
*
|
||||||
|
* `\\[\\frac{1}{256} \\begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\\\ 4 & 16 & 24 & 16 & 4 \\\\ 6 & 24 & 36 &
|
||||||
|
* 24 & 6 \\\\ 4 & 16 & 24 & 16 & 4 \\\\ 1 & 4 & 6 & 4 & 1 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* Then, it downsamples the image by rejecting even rows and columns.
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image; it has the specified size and the same type as src.
|
||||||
|
*
|
||||||
|
* @param dstsize size of the output image.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes (BORDER_CONSTANT isn't supported)
|
||||||
|
*/
|
||||||
|
export declare function pyrDown(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
dstsize?: any,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements the filtering stage of meanshift segmentation, that is, the output of the
|
||||||
|
* function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
|
||||||
|
* At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
|
||||||
|
* meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
|
||||||
|
* considered:
|
||||||
|
*
|
||||||
|
* `\\[(x,y): X- \\texttt{sp} \\le x \\le X+ \\texttt{sp} , Y- \\texttt{sp} \\le y \\le Y+ \\texttt{sp}
|
||||||
|
* , ||(R,G,B)-(r,g,b)|| \\le \\texttt{sr}\\]`
|
||||||
|
*
|
||||||
|
* where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
|
||||||
|
* (though, the algorithm does not depend on the color space used, so any 3-component color space can
|
||||||
|
* be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
|
||||||
|
* (R',G',B') are found and they act as the neighborhood center on the next iteration:
|
||||||
|
*
|
||||||
|
* `\\[(X,Y)~(X',Y'), (R,G,B)~(R',G',B').\\]`
|
||||||
|
*
|
||||||
|
* After the iterations over, the color components of the initial pixel (that is, the pixel from where
|
||||||
|
* the iterations started) are set to the final value (average color at the last iteration):
|
||||||
|
*
|
||||||
|
* `\\[I(X,Y) <- (R*,G*,B*)\\]`
|
||||||
|
*
|
||||||
|
* When maxLevel > 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
|
||||||
|
* run on the smallest layer first. After that, the results are propagated to the larger layer and the
|
||||||
|
* iterations are run again only on those pixels where the layer colors differ by more than sr from the
|
||||||
|
* lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
|
||||||
|
* results will be actually different from the ones obtained by running the meanshift procedure on the
|
||||||
|
* whole original image (i.e. when maxLevel==0).
|
||||||
|
*
|
||||||
|
* @param src The source 8-bit, 3-channel image.
|
||||||
|
*
|
||||||
|
* @param dst The destination image of the same format and the same size as the source.
|
||||||
|
*
|
||||||
|
* @param sp The spatial window radius.
|
||||||
|
*
|
||||||
|
* @param sr The color window radius.
|
||||||
|
*
|
||||||
|
* @param maxLevel Maximum level of the pyramid for the segmentation.
|
||||||
|
*
|
||||||
|
* @param termcrit Termination criteria: when to stop meanshift iterations.
|
||||||
|
*/
|
||||||
|
export declare function pyrMeanShiftFiltering(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
sp: double,
|
||||||
|
sr: double,
|
||||||
|
maxLevel?: int,
|
||||||
|
termcrit?: TermCriteria,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* By default, size of the output image is computed as `Size(src.cols\\*2, (src.rows\\*2)`, but in any
|
||||||
|
* case, the following conditions should be satisfied:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{array}{l} | \\texttt{dstsize.width} -src.cols*2| \\leq ( \\texttt{dstsize.width} \\mod
|
||||||
|
* 2) \\\\ | \\texttt{dstsize.height} -src.rows*2| \\leq ( \\texttt{dstsize.height} \\mod 2)
|
||||||
|
* \\end{array}\\]`
|
||||||
|
*
|
||||||
|
* The function performs the upsampling step of the Gaussian pyramid construction, though it can
|
||||||
|
* actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
|
||||||
|
* injecting even zero rows and columns and then convolves the result with the same kernel as in
|
||||||
|
* pyrDown multiplied by 4.
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image. It has the specified size and the same type as src .
|
||||||
|
*
|
||||||
|
* @param dstsize size of the output image.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes (only BORDER_DEFAULT is supported)
|
||||||
|
*/
|
||||||
|
export declare function pyrUp(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
dstsize?: any,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes the first x- or y- spatial image derivative using the Scharr operator. The
|
||||||
|
* call
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\\]`
|
||||||
|
*
|
||||||
|
* is equivalent to
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\\]`
|
||||||
|
*
|
||||||
|
* [cartToPolar]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and the same number of channels as src.
|
||||||
|
*
|
||||||
|
* @param ddepth output image depth, see combinations
|
||||||
|
*
|
||||||
|
* @param dx order of the derivative x.
|
||||||
|
*
|
||||||
|
* @param dy order of the derivative y.
|
||||||
|
*
|
||||||
|
* @param scale optional scale factor for the computed derivative values; by default, no scaling is
|
||||||
|
* applied (see getDerivKernels for details).
|
||||||
|
*
|
||||||
|
* @param delta optional delta value that is added to the results prior to storing them in dst.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function Scharr(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
dx: int,
|
||||||
|
dy: int,
|
||||||
|
scale?: double,
|
||||||
|
delta?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function applies a separable linear filter to the image. That is, first, every row of src is
|
||||||
|
* filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D kernel
|
||||||
|
* kernelY. The final result shifted by delta is stored in dst .
|
||||||
|
*
|
||||||
|
* [filter2D], [Sobel], [GaussianBlur], [boxFilter], [blur]
|
||||||
|
*
|
||||||
|
* @param src Source image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and the same number of channels as src .
|
||||||
|
*
|
||||||
|
* @param ddepth Destination image depth, see combinations
|
||||||
|
*
|
||||||
|
* @param kernelX Coefficients for filtering each row.
|
||||||
|
*
|
||||||
|
* @param kernelY Coefficients for filtering each column.
|
||||||
|
*
|
||||||
|
* @param anchor Anchor position within the kernel. The default value $(-1,-1)$ means that the anchor
|
||||||
|
* is at the kernel center.
|
||||||
|
*
|
||||||
|
* @param delta Value added to the filtered results before storing them.
|
||||||
|
*
|
||||||
|
* @param borderType Pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function sepFilter2D(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
kernelX: InputArray,
|
||||||
|
kernelY: InputArray,
|
||||||
|
anchor?: Point,
|
||||||
|
delta?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In all cases except one, the `$\\texttt{ksize} \\times \\texttt{ksize}$` separable kernel is used to
|
||||||
|
* calculate the derivative. When `$\\texttt{ksize = 1}$`, the `$3 \\times 1$` or `$1 \\times 3$`
|
||||||
|
* kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
|
||||||
|
* or the second x- or y- derivatives.
|
||||||
|
*
|
||||||
|
* There is also the special value `ksize = [FILTER_SCHARR] (-1)` that corresponds to the `$3\\times3$`
|
||||||
|
* Scharr filter that may give more accurate results than the `$3\\times3$` Sobel. The Scharr aperture
|
||||||
|
* is
|
||||||
|
*
|
||||||
|
* `\\[\\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\\]`
|
||||||
|
*
|
||||||
|
* for the x-derivative, or transposed for the y-derivative.
|
||||||
|
*
|
||||||
|
* The function calculates an image derivative by convolving the image with the appropriate kernel:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} = \\frac{\\partial^{xorder+yorder} \\texttt{src}}{\\partial x^{xorder} \\partial
|
||||||
|
* y^{yorder}}\\]`
|
||||||
|
*
|
||||||
|
* The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
|
||||||
|
* resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
|
||||||
|
* or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
|
||||||
|
* case corresponds to a kernel of:
|
||||||
|
*
|
||||||
|
* `\\[\\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\\]`
|
||||||
|
*
|
||||||
|
* The second case corresponds to a kernel of:
|
||||||
|
*
|
||||||
|
* `\\[\\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\\]`
|
||||||
|
*
|
||||||
|
* [Scharr], [Laplacian], [sepFilter2D], [filter2D], [GaussianBlur], [cartToPolar]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and the same number of channels as src .
|
||||||
|
*
|
||||||
|
* @param ddepth output image depth, see combinations; in the case of 8-bit input images it will result
|
||||||
|
* in truncated derivatives.
|
||||||
|
*
|
||||||
|
* @param dx order of the derivative x.
|
||||||
|
*
|
||||||
|
* @param dy order of the derivative y.
|
||||||
|
*
|
||||||
|
* @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
|
||||||
|
*
|
||||||
|
* @param scale optional scale factor for the computed derivative values; by default, no scaling is
|
||||||
|
* applied (see getDerivKernels for details).
|
||||||
|
*
|
||||||
|
* @param delta optional delta value that is added to the results prior to storing them in dst.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function Sobel(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
dx: int,
|
||||||
|
dy: int,
|
||||||
|
ksize?: int,
|
||||||
|
scale?: double,
|
||||||
|
delta?: double,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Equivalent to calling:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* Sobel( src, dx, CV_16SC1, 1, 0, 3 );
|
||||||
|
* Sobel( src, dy, CV_16SC1, 0, 1, 3 );
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* [Sobel]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dx output image with first-order derivative in x.
|
||||||
|
*
|
||||||
|
* @param dy output image with first-order derivative in y.
|
||||||
|
*
|
||||||
|
* @param ksize size of Sobel kernel. It must be 3.
|
||||||
|
*
|
||||||
|
* @param borderType pixel extrapolation method, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function spatialGradient(
|
||||||
|
src: InputArray,
|
||||||
|
dx: OutputArray,
|
||||||
|
dy: OutputArray,
|
||||||
|
ksize?: int,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For every pixel `$ (x, y) $` in the source image, the function calculates the sum of squares of
|
||||||
|
* those neighboring pixel values which overlap the filter placed over the pixel `$ (x, y) $`.
|
||||||
|
*
|
||||||
|
* The unnormalized square box filter can be useful in computing local image statistics such as the the
|
||||||
|
* local variance and standard deviation around the neighborhood of a pixel.
|
||||||
|
*
|
||||||
|
* [boxFilter]
|
||||||
|
*
|
||||||
|
* @param src input image
|
||||||
|
*
|
||||||
|
* @param dst output image of the same size and type as _src
|
||||||
|
*
|
||||||
|
* @param ddepth the output image depth (-1 to use src.depth())
|
||||||
|
*
|
||||||
|
* @param ksize kernel size
|
||||||
|
*
|
||||||
|
* @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at
|
||||||
|
* the kernel center.
|
||||||
|
*
|
||||||
|
* @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
|
||||||
|
*
|
||||||
|
* @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes
|
||||||
|
*/
|
||||||
|
export declare function sqrBoxFilter(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ddepth: int,
|
||||||
|
ksize: Size,
|
||||||
|
anchor?: Point,
|
||||||
|
normalize?: bool,
|
||||||
|
borderType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const MORPH_RECT: MorphShapes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* a cross-shaped structuring element: `\\[E_{ij} = \\fork{1}{if i=\\texttt{anchor.y} or
|
||||||
|
* j=\\texttt{anchor.x}}{0}{otherwise}\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_CROSS: MorphShapes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* an elliptic structuring element, that is, a filled ellipse inscribed into the rectangle Rect(0, 0,
|
||||||
|
* esize.width, 0.esize.height)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_ELLIPSE: MorphShapes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const MORPH_ERODE: MorphTypes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const MORPH_DILATE: MorphTypes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* an opening operation `\\[\\texttt{dst} = \\mathrm{open} ( \\texttt{src} , \\texttt{element} )=
|
||||||
|
* \\mathrm{dilate} ( \\mathrm{erode} ( \\texttt{src} , \\texttt{element} ))\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_OPEN: MorphTypes; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* a closing operation `\\[\\texttt{dst} = \\mathrm{close} ( \\texttt{src} , \\texttt{element} )=
|
||||||
|
* \\mathrm{erode} ( \\mathrm{dilate} ( \\texttt{src} , \\texttt{element} ))\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_CLOSE: MorphTypes; // initializer: = 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* a morphological gradient `\\[\\texttt{dst} = \\mathrm{morph\\_grad} ( \\texttt{src} ,
|
||||||
|
* \\texttt{element} )= \\mathrm{dilate} ( \\texttt{src} , \\texttt{element} )- \\mathrm{erode} (
|
||||||
|
* \\texttt{src} , \\texttt{element} )\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_GRADIENT: MorphTypes; // initializer: = 4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* "top hat" `\\[\\texttt{dst} = \\mathrm{tophat} ( \\texttt{src} , \\texttt{element} )= \\texttt{src}
|
||||||
|
* - \\mathrm{open} ( \\texttt{src} , \\texttt{element} )\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_TOPHAT: MorphTypes; // initializer: = 5
|
||||||
|
|
||||||
|
/**
|
||||||
|
* "black hat" `\\[\\texttt{dst} = \\mathrm{blackhat} ( \\texttt{src} , \\texttt{element} )=
|
||||||
|
* \\mathrm{close} ( \\texttt{src} , \\texttt{element} )- \\texttt{src}\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_BLACKHAT: MorphTypes; // initializer: = 6
|
||||||
|
|
||||||
|
/**
|
||||||
|
* "hit or miss" .- Only supported for CV_8UC1 binary images. A tutorial can be found in the
|
||||||
|
* documentation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const MORPH_HITMISS: MorphTypes; // initializer: = 7
|
||||||
|
|
||||||
|
export declare const FILTER_SCHARR: SpecialFilter; // initializer: = -1
|
||||||
|
|
||||||
|
export type MorphShapes = any;
|
||||||
|
|
||||||
|
export type MorphTypes = any;
|
||||||
|
|
||||||
|
export type SpecialFilter = any;
|
||||||
399
opencv-js-4.10.0/src/types/opencv/imgproc_hist.ts
Normal file
399
opencv-js-4.10.0/src/types/opencv/imgproc_hist.ts
Normal file
|
|
@ -0,0 +1,399 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
float,
|
||||||
|
InputArray,
|
||||||
|
InputArrayOfArrays,
|
||||||
|
int,
|
||||||
|
OutputArray,
|
||||||
|
Size,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Histograms
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function [cv::calcBackProject] calculates the back project of the histogram. That is, similarly
|
||||||
|
* to [calcHist] , at each location (x, y) the function collects the values from the selected channels
|
||||||
|
* in the input images and finds the corresponding histogram bin. But instead of incrementing it, the
|
||||||
|
* function reads the bin value, scales it by scale , and stores in backProject(x,y) . In terms of
|
||||||
|
* statistics, the function computes probability of each element value in respect with the empirical
|
||||||
|
* probability distribution represented by the histogram. See how, for example, you can find and track
|
||||||
|
* a bright-colored object in a scene:
|
||||||
|
*
|
||||||
|
* Before tracking, show the object to the camera so that it covers almost the whole frame. Calculate a
|
||||||
|
* hue histogram. The histogram may have strong maximums, corresponding to the dominant colors in the
|
||||||
|
* object.
|
||||||
|
* When tracking, calculate a back projection of a hue plane of each input video frame using that
|
||||||
|
* pre-computed histogram. Threshold the back projection to suppress weak colors. It may also make
|
||||||
|
* sense to suppress pixels with non-sufficient color saturation and too dark or too bright pixels.
|
||||||
|
* Find connected components in the resulting picture and choose, for example, the largest component.
|
||||||
|
*
|
||||||
|
* This is an approximate algorithm of the CamShift color object tracker.
|
||||||
|
*
|
||||||
|
* [calcHist], [compareHist]
|
||||||
|
*
|
||||||
|
* @param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the
|
||||||
|
* same size. Each of them can have an arbitrary number of channels.
|
||||||
|
*
|
||||||
|
* @param nimages Number of source images.
|
||||||
|
*
|
||||||
|
* @param channels The list of channels used to compute the back projection. The number of channels
|
||||||
|
* must match the histogram dimensionality. The first array channels are numerated from 0 to
|
||||||
|
* images[0].channels()-1 , the second array channels are counted from images[0].channels() to
|
||||||
|
* images[0].channels() + images[1].channels()-1, and so on.
|
||||||
|
*
|
||||||
|
* @param hist Input histogram that can be dense or sparse.
|
||||||
|
*
|
||||||
|
* @param backProject Destination back projection array that is a single-channel array of the same size
|
||||||
|
* and depth as images[0] .
|
||||||
|
*
|
||||||
|
* @param ranges Array of arrays of the histogram bin boundaries in each dimension. See calcHist .
|
||||||
|
*
|
||||||
|
* @param scale Optional scale factor for the output back projection.
|
||||||
|
*
|
||||||
|
* @param uniform Flag indicating whether the histogram is uniform or not (see above).
|
||||||
|
*/
|
||||||
|
export declare function calcBackProject(
|
||||||
|
images: any,
|
||||||
|
nimages: int,
|
||||||
|
channels: any,
|
||||||
|
hist: InputArray,
|
||||||
|
backProject: OutputArray,
|
||||||
|
ranges: any,
|
||||||
|
scale?: double,
|
||||||
|
uniform?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function calcBackProject(
|
||||||
|
images: any,
|
||||||
|
nimages: int,
|
||||||
|
channels: any,
|
||||||
|
hist: any,
|
||||||
|
backProject: OutputArray,
|
||||||
|
ranges: any,
|
||||||
|
scale?: double,
|
||||||
|
uniform?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function calcBackProject(
|
||||||
|
images: InputArrayOfArrays,
|
||||||
|
channels: any,
|
||||||
|
hist: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
ranges: any,
|
||||||
|
scale: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::calcHist] calculates the histogram of one or more arrays. The elements of a tuple
|
||||||
|
* used to increment a histogram bin are taken from the corresponding input arrays at the same
|
||||||
|
* location. The sample below shows how to compute a 2D Hue-Saturation histogram for a color image. :
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* #include <opencv2/imgproc.hpp>
|
||||||
|
* #include <opencv2/highgui.hpp>
|
||||||
|
*
|
||||||
|
* using namespace cv;
|
||||||
|
*
|
||||||
|
* int main( int argc, char** argv )
|
||||||
|
* {
|
||||||
|
* Mat src, hsv;
|
||||||
|
* if( argc != 2 || !(src=imread(argv[1], 1)).data )
|
||||||
|
* return -1;
|
||||||
|
*
|
||||||
|
* cvtColor(src, hsv, COLOR_BGR2HSV);
|
||||||
|
*
|
||||||
|
* // Quantize the hue to 30 levels
|
||||||
|
* // and the saturation to 32 levels
|
||||||
|
* int hbins = 30, sbins = 32;
|
||||||
|
* int histSize[] = {hbins, sbins};
|
||||||
|
* // hue varies from 0 to 179, see cvtColor
|
||||||
|
* float hranges[] = { 0, 180 };
|
||||||
|
* // saturation varies from 0 (black-gray-white) to
|
||||||
|
* // 255 (pure spectrum color)
|
||||||
|
* float sranges[] = { 0, 256 };
|
||||||
|
* const float* ranges[] = { hranges, sranges };
|
||||||
|
* MatND hist;
|
||||||
|
* // we compute the histogram from the 0-th and 1-st channels
|
||||||
|
* int channels[] = {0, 1};
|
||||||
|
*
|
||||||
|
* calcHist( &hsv, 1, channels, Mat(), // do not use mask
|
||||||
|
* hist, 2, histSize, ranges,
|
||||||
|
* true, // the histogram is uniform
|
||||||
|
* false );
|
||||||
|
* double maxVal=0;
|
||||||
|
* minMaxLoc(hist, 0, &maxVal, 0, 0);
|
||||||
|
*
|
||||||
|
* int scale = 10;
|
||||||
|
* Mat histImg = Mat::zeros(sbins*scale, hbins*10, CV_8UC3);
|
||||||
|
*
|
||||||
|
* for( int h = 0; h < hbins; h++ )
|
||||||
|
* for( int s = 0; s < sbins; s++ )
|
||||||
|
* {
|
||||||
|
* float binVal = hist.at<float>(h, s);
|
||||||
|
* int intensity = cvRound(binVal*255/maxVal);
|
||||||
|
* rectangle( histImg, Point(h*scale, s*scale),
|
||||||
|
* Point( (h+1)*scale - 1, (s+1)*scale - 1),
|
||||||
|
* Scalar::all(intensity),
|
||||||
|
* -1 );
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* namedWindow( "Source", 1 );
|
||||||
|
* imshow( "Source", src );
|
||||||
|
*
|
||||||
|
* namedWindow( "H-S Histogram", 1 );
|
||||||
|
* imshow( "H-S Histogram", histImg );
|
||||||
|
* waitKey();
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the
|
||||||
|
* same size. Each of them can have an arbitrary number of channels.
|
||||||
|
*
|
||||||
|
* @param nimages Number of source images.
|
||||||
|
*
|
||||||
|
* @param channels List of the dims channels used to compute the histogram. The first array channels
|
||||||
|
* are numerated from 0 to images[0].channels()-1 , the second array channels are counted from
|
||||||
|
* images[0].channels() to images[0].channels() + images[1].channels()-1, and so on.
|
||||||
|
*
|
||||||
|
* @param mask Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as
|
||||||
|
* images[i] . The non-zero mask elements mark the array elements counted in the histogram.
|
||||||
|
*
|
||||||
|
* @param hist Output histogram, which is a dense or sparse dims -dimensional array.
|
||||||
|
*
|
||||||
|
* @param dims Histogram dimensionality that must be positive and not greater than CV_MAX_DIMS (equal
|
||||||
|
* to 32 in the current OpenCV version).
|
||||||
|
*
|
||||||
|
* @param histSize Array of histogram sizes in each dimension.
|
||||||
|
*
|
||||||
|
* @param ranges Array of the dims arrays of the histogram bin boundaries in each dimension. When the
|
||||||
|
* histogram is uniform ( uniform =true), then for each dimension i it is enough to specify the lower
|
||||||
|
* (inclusive) boundary $L_0$ of the 0-th histogram bin and the upper (exclusive) boundary
|
||||||
|
* $U_{\texttt{histSize}[i]-1}$ for the last histogram bin histSize[i]-1 . That is, in case of a
|
||||||
|
* uniform histogram each of ranges[i] is an array of 2 elements. When the histogram is not uniform (
|
||||||
|
* uniform=false ), then each of ranges[i] contains histSize[i]+1 elements: $L_0, U_0=L_1, U_1=L_2,
|
||||||
|
* ..., U_{\texttt{histSize[i]}-2}=L_{\texttt{histSize[i]}-1}, U_{\texttt{histSize[i]}-1}$ . The array
|
||||||
|
* elements, that are not between $L_0$ and $U_{\texttt{histSize[i]}-1}$ , are not counted in the
|
||||||
|
* histogram.
|
||||||
|
*
|
||||||
|
* @param uniform Flag indicating whether the histogram is uniform or not (see above).
|
||||||
|
*
|
||||||
|
* @param accumulate Accumulation flag. If it is set, the histogram is not cleared in the beginning
|
||||||
|
* when it is allocated. This feature enables you to compute a single histogram from several sets of
|
||||||
|
* arrays, or to update the histogram in time.
|
||||||
|
*/
|
||||||
|
export declare function calcHist(
|
||||||
|
images: any,
|
||||||
|
nimages: int,
|
||||||
|
channels: any,
|
||||||
|
mask: InputArray,
|
||||||
|
hist: OutputArray,
|
||||||
|
dims: int,
|
||||||
|
histSize: any,
|
||||||
|
ranges: any,
|
||||||
|
uniform?: bool,
|
||||||
|
accumulate?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* this variant uses SparseMat for output
|
||||||
|
*/
|
||||||
|
export declare function calcHist(
|
||||||
|
images: any,
|
||||||
|
nimages: int,
|
||||||
|
channels: any,
|
||||||
|
mask: InputArray,
|
||||||
|
hist: any,
|
||||||
|
dims: int,
|
||||||
|
histSize: any,
|
||||||
|
ranges: any,
|
||||||
|
uniform?: bool,
|
||||||
|
accumulate?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function calcHist(
|
||||||
|
images: InputArrayOfArrays,
|
||||||
|
channels: any,
|
||||||
|
mask: InputArray,
|
||||||
|
hist: OutputArray,
|
||||||
|
histSize: any,
|
||||||
|
ranges: any,
|
||||||
|
accumulate?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::compareHist] compares two dense or two sparse histograms using the specified
|
||||||
|
* method.
|
||||||
|
*
|
||||||
|
* The function returns `$d(H_1, H_2)$` .
|
||||||
|
*
|
||||||
|
* While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable
|
||||||
|
* for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling
|
||||||
|
* problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms
|
||||||
|
* or more general sparse configurations of weighted points, consider using the [EMD] function.
|
||||||
|
*
|
||||||
|
* @param H1 First compared histogram.
|
||||||
|
*
|
||||||
|
* @param H2 Second compared histogram of the same size as H1 .
|
||||||
|
*
|
||||||
|
* @param method Comparison method, see HistCompMethods
|
||||||
|
*/
|
||||||
|
export declare function compareHist(
|
||||||
|
H1: InputArray,
|
||||||
|
H2: InputArray,
|
||||||
|
method: int,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function compareHist(H1: any, H2: any, method: int): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param clipLimit Threshold for contrast limiting.
|
||||||
|
*
|
||||||
|
* @param tileGridSize Size of grid for histogram equalization. Input image will be divided into
|
||||||
|
* equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
|
||||||
|
*/
|
||||||
|
export declare function createCLAHE(
|
||||||
|
clipLimit?: double,
|
||||||
|
tileGridSize?: Size,
|
||||||
|
): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes the earth mover distance and/or a lower boundary of the distance between the
|
||||||
|
* two weighted point configurations. One of the applications described in RubnerSept98, Rubner2000 is
|
||||||
|
* multi-dimensional histogram comparison for image retrieval. EMD is a transportation problem that is
|
||||||
|
* solved using some modification of a simplex algorithm, thus the complexity is exponential in the
|
||||||
|
* worst case, though, on average it is much faster. In the case of a real metric the lower boundary
|
||||||
|
* can be calculated even faster (using linear-time algorithm) and it can be used to determine roughly
|
||||||
|
* whether the two signatures are far enough so that they cannot relate to the same object.
|
||||||
|
*
|
||||||
|
* @param signature1 First signature, a $\texttt{size1}\times \texttt{dims}+1$ floating-point matrix.
|
||||||
|
* Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a
|
||||||
|
* single column (weights only) if the user-defined cost matrix is used. The weights must be
|
||||||
|
* non-negative and have at least one non-zero value.
|
||||||
|
*
|
||||||
|
* @param signature2 Second signature of the same format as signature1 , though the number of rows may
|
||||||
|
* be different. The total weights may be different. In this case an extra "dummy" point is added to
|
||||||
|
* either signature1 or signature2. The weights must be non-negative and have at least one non-zero
|
||||||
|
* value.
|
||||||
|
*
|
||||||
|
* @param distType Used metric. See DistanceTypes.
|
||||||
|
*
|
||||||
|
* @param cost User-defined $\texttt{size1}\times \texttt{size2}$ cost matrix. Also, if a cost matrix
|
||||||
|
* is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
|
||||||
|
*
|
||||||
|
* @param lowerBound Optional input/output parameter: lower boundary of a distance between the two
|
||||||
|
* signatures that is a distance between mass centers. The lower boundary may not be calculated if the
|
||||||
|
* user-defined cost matrix is used, the total weights of point configurations are not equal, or if the
|
||||||
|
* signatures consist of weights only (the signature matrices have a single column). You must**
|
||||||
|
* initialize *lowerBound . If the calculated distance between mass centers is greater or equal to
|
||||||
|
* *lowerBound (it means that the signatures are far enough), the function does not calculate EMD. In
|
||||||
|
* any case *lowerBound is set to the calculated distance between mass centers on return. Thus, if you
|
||||||
|
* want to calculate both distance between mass centers and EMD, *lowerBound should be set to 0.
|
||||||
|
*
|
||||||
|
* @param flow Resultant $\texttt{size1} \times \texttt{size2}$ flow matrix: $\texttt{flow}_{i,j}$ is a
|
||||||
|
* flow from $i$ -th point of signature1 to $j$ -th point of signature2 .
|
||||||
|
*/
|
||||||
|
export declare function EMD(
|
||||||
|
signature1: InputArray,
|
||||||
|
signature2: InputArray,
|
||||||
|
distType: int,
|
||||||
|
cost?: InputArray,
|
||||||
|
lowerBound?: any,
|
||||||
|
flow?: OutputArray,
|
||||||
|
): float;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function equalizes the histogram of the input image using the following algorithm:
|
||||||
|
*
|
||||||
|
* Calculate the histogram `$H$` for src .
|
||||||
|
* Normalize the histogram so that the sum of histogram bins is 255.
|
||||||
|
* Compute the integral of the histogram: `\\[H'_i = \\sum _{0 \\le j < i} H(j)\\]`
|
||||||
|
* Transform the image using `$H'$` as a look-up table: `$\\texttt{dst}(x,y) = H'(\\texttt{src}(x,y))$`
|
||||||
|
*
|
||||||
|
* The algorithm normalizes the brightness and increases the contrast of the image.
|
||||||
|
*
|
||||||
|
* @param src Source 8-bit single channel image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and type as src .
|
||||||
|
*/
|
||||||
|
export declare function equalizeHist(src: InputArray, dst: OutputArray): void;
|
||||||
|
|
||||||
|
export declare function wrapperEMD(
|
||||||
|
signature1: InputArray,
|
||||||
|
signature2: InputArray,
|
||||||
|
distType: int,
|
||||||
|
cost?: InputArray,
|
||||||
|
lowerBound?: any,
|
||||||
|
flow?: OutputArray,
|
||||||
|
): float;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Correlation `\\[d(H_1,H_2) = \\frac{\\sum_I (H_1(I) - \\bar{H_1}) (H_2(I) -
|
||||||
|
* \\bar{H_2})}{\\sqrt{\\sum_I(H_1(I) - \\bar{H_1})^2 \\sum_I(H_2(I) - \\bar{H_2})^2}}\\]` where
|
||||||
|
* `\\[\\bar{H_k} = \\frac{1}{N} \\sum _J H_k(J)\\]` and `$N$` is a total number of histogram bins.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_CORREL: HistCompMethods; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Chi-Square `\\[d(H_1,H_2) = \\sum _I \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)}\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_CHISQR: HistCompMethods; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Intersection `\\[d(H_1,H_2) = \\sum _I \\min (H_1(I), H_2(I))\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_INTERSECT: HistCompMethods; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bhattacharyya distance (In fact, OpenCV computes Hellinger distance, which is related to
|
||||||
|
* Bhattacharyya coefficient.) `\\[d(H_1,H_2) = \\sqrt{1 - \\frac{1}{\\sqrt{\\bar{H_1} \\bar{H_2} N^2}}
|
||||||
|
* \\sum_I \\sqrt{H_1(I) \\cdot H_2(I)}}\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_BHATTACHARYYA: HistCompMethods; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const HISTCMP_HELLINGER: HistCompMethods; // initializer: = HISTCMP_BHATTACHARYYA
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alternative Chi-Square `\\[d(H_1,H_2) = 2 * \\sum _I
|
||||||
|
* \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)+H_2(I)}\\]` This alternative formula is regularly used
|
||||||
|
* for texture comparison. See e.g. Puzicha1997
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_CHISQR_ALT: HistCompMethods; // initializer: = 4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kullback-Leibler divergence `\\[d(H_1,H_2) = \\sum _I H_1(I) \\log
|
||||||
|
* \\left(\\frac{H_1(I)}{H_2(I)}\\right)\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const HISTCMP_KL_DIV: HistCompMethods; // initializer: = 5
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Histogram comparison methods
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type HistCompMethods = any;
|
||||||
616
opencv-js-4.10.0/src/types/opencv/imgproc_misc.ts
Normal file
616
opencv-js-4.10.0/src/types/opencv/imgproc_misc.ts
Normal file
|
|
@ -0,0 +1,616 @@
|
||||||
|
import type {
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
OutputArray,
|
||||||
|
Point,
|
||||||
|
Rect,
|
||||||
|
Scalar,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Miscellaneous Image Transformations
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function transforms a grayscale image to a binary image according to the formulae:
|
||||||
|
*
|
||||||
|
* **THRESH_BINARY** `\\[dst(x,y) = \\fork{\\texttt{maxValue}}{if \\(src(x,y) >
|
||||||
|
* T(x,y)\\)}{0}{otherwise}\\]`
|
||||||
|
* **THRESH_BINARY_INV** `\\[dst(x,y) = \\fork{0}{if \\(src(x,y) >
|
||||||
|
* T(x,y)\\)}{\\texttt{maxValue}}{otherwise}\\]` where `$T(x,y)$` is a threshold calculated
|
||||||
|
* individually for each pixel (see adaptiveMethod parameter).
|
||||||
|
*
|
||||||
|
* The function can process the image in-place.
|
||||||
|
*
|
||||||
|
* [threshold], [blur], [GaussianBlur]
|
||||||
|
*
|
||||||
|
* @param src Source 8-bit single-channel image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image of the same size and the same type as src.
|
||||||
|
*
|
||||||
|
* @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied
|
||||||
|
*
|
||||||
|
* @param adaptiveMethod Adaptive thresholding algorithm to use, see AdaptiveThresholdTypes. The
|
||||||
|
* BORDER_REPLICATE | BORDER_ISOLATED is used to process boundaries.
|
||||||
|
*
|
||||||
|
* @param thresholdType Thresholding type that must be either THRESH_BINARY or THRESH_BINARY_INV, see
|
||||||
|
* ThresholdTypes.
|
||||||
|
*
|
||||||
|
* @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the
|
||||||
|
* pixel: 3, 5, 7, and so on.
|
||||||
|
*
|
||||||
|
* @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it is
|
||||||
|
* positive but may be zero or negative as well.
|
||||||
|
*/
|
||||||
|
export declare function adaptiveThreshold(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
maxValue: double,
|
||||||
|
adaptiveMethod: int,
|
||||||
|
thresholdType: int,
|
||||||
|
blockSize: int,
|
||||||
|
C: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs linear blending of two images: `\\[ \\texttt{dst}(i,j) =
|
||||||
|
* \\texttt{weights1}(i,j)*\\texttt{src1}(i,j) + \\texttt{weights2}(i,j)*\\texttt{src2}(i,j) \\]`
|
||||||
|
*
|
||||||
|
* @param src1 It has a type of CV_8UC(n) or CV_32FC(n), where n is a positive integer.
|
||||||
|
*
|
||||||
|
* @param src2 It has the same type and size as src1.
|
||||||
|
*
|
||||||
|
* @param weights1 It has a type of CV_32FC1 and the same size with src1.
|
||||||
|
*
|
||||||
|
* @param weights2 It has a type of CV_32FC1 and the same size with src1.
|
||||||
|
*
|
||||||
|
* @param dst It is created if it does not have the same size and type with src1.
|
||||||
|
*/
|
||||||
|
export declare function blendLinear(
|
||||||
|
src1: InputArray,
|
||||||
|
src2: InputArray,
|
||||||
|
weights1: InputArray,
|
||||||
|
weights2: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::distanceTransform] calculates the approximate or precise distance from every
|
||||||
|
* binary image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be
|
||||||
|
* zero.
|
||||||
|
*
|
||||||
|
* When maskSize == [DIST_MASK_PRECISE] and distanceType == [DIST_L2] , the function runs the algorithm
|
||||||
|
* described in Felzenszwalb04 . This algorithm is parallelized with the TBB library.
|
||||||
|
*
|
||||||
|
* In other cases, the algorithm Borgefors86 is used. This means that for a pixel the function finds
|
||||||
|
* the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
|
||||||
|
* diagonal, or knight's move (the latest is available for a `$5\\times 5$` mask). The overall distance
|
||||||
|
* is calculated as a sum of these basic distances. Since the distance function should be symmetric,
|
||||||
|
* all of the horizontal and vertical shifts must have the same cost (denoted as a ), all the diagonal
|
||||||
|
* shifts must have the same cost (denoted as `b`), and all knight's moves must have the same cost
|
||||||
|
* (denoted as `c`). For the [DIST_C] and [DIST_L1] types, the distance is calculated precisely,
|
||||||
|
* whereas for [DIST_L2] (Euclidean distance) the distance can be calculated only with a relative error
|
||||||
|
* (a `$5\\times 5$` mask gives more accurate results). For `a`,`b`, and `c`, OpenCV uses the values
|
||||||
|
* suggested in the original paper:
|
||||||
|
*
|
||||||
|
* DIST_L1: `a = 1, b = 2`
|
||||||
|
* DIST_L2:
|
||||||
|
*
|
||||||
|
* `3 x 3`: `a=0.955, b=1.3693`
|
||||||
|
* `5 x 5`: `a=1, b=1.4, c=2.1969`
|
||||||
|
*
|
||||||
|
* DIST_C: `a = 1, b = 1`
|
||||||
|
*
|
||||||
|
* Typically, for a fast, coarse distance estimation [DIST_L2], a `$3\\times 3$` mask is used. For a
|
||||||
|
* more accurate distance estimation [DIST_L2], a `$5\\times 5$` mask or the precise algorithm is used.
|
||||||
|
* Note that both the precise and the approximate algorithms are linear on the number of pixels.
|
||||||
|
*
|
||||||
|
* This variant of the function does not only compute the minimum distance for each pixel `$(x, y)$`
|
||||||
|
* but also identifies the nearest connected component consisting of zero pixels
|
||||||
|
* (labelType==[DIST_LABEL_CCOMP]) or the nearest zero pixel (labelType==[DIST_LABEL_PIXEL]). Index of
|
||||||
|
* the component/pixel is stored in `labels(x, y)`. When labelType==[DIST_LABEL_CCOMP], the function
|
||||||
|
* automatically finds connected components of zero pixels in the input image and marks them with
|
||||||
|
* distinct labels. When labelType==[DIST_LABEL_CCOMP], the function scans through the input image and
|
||||||
|
* marks all the zero pixels with distinct labels.
|
||||||
|
*
|
||||||
|
* In this mode, the complexity is still linear. That is, the function provides a very fast way to
|
||||||
|
* compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
|
||||||
|
* approximate distance transform algorithm, i.e. maskSize=[DIST_MASK_PRECISE] is not supported yet.
|
||||||
|
*
|
||||||
|
* @param src 8-bit, single-channel (binary) source image.
|
||||||
|
*
|
||||||
|
* @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
|
||||||
|
* single-channel image of the same size as src.
|
||||||
|
*
|
||||||
|
* @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type CV_32SC1 and
|
||||||
|
* the same size as src.
|
||||||
|
*
|
||||||
|
* @param distanceType Type of distance, see DistanceTypes
|
||||||
|
*
|
||||||
|
* @param maskSize Size of the distance transform mask, see DistanceTransformMasks. DIST_MASK_PRECISE
|
||||||
|
* is not supported by this variant. In case of the DIST_L1 or DIST_C distance type, the parameter is
|
||||||
|
* forced to 3 because a $3\times 3$ mask gives the same result as $5\times 5$ or any larger aperture.
|
||||||
|
*
|
||||||
|
* @param labelType Type of the label array to build, see DistanceTransformLabelTypes.
|
||||||
|
*/
|
||||||
|
export declare function distanceTransform(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
labels: OutputArray,
|
||||||
|
distanceType: int,
|
||||||
|
maskSize: int,
|
||||||
|
labelType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param src 8-bit, single-channel (binary) source image.
|
||||||
|
*
|
||||||
|
* @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
|
||||||
|
* single-channel image of the same size as src .
|
||||||
|
*
|
||||||
|
* @param distanceType Type of distance, see DistanceTypes
|
||||||
|
*
|
||||||
|
* @param maskSize Size of the distance transform mask, see DistanceTransformMasks. In case of the
|
||||||
|
* DIST_L1 or DIST_C distance type, the parameter is forced to 3 because a $3\times 3$ mask gives the
|
||||||
|
* same result as $5\times 5$ or any larger aperture.
|
||||||
|
*
|
||||||
|
* @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for the
|
||||||
|
* first variant of the function and distanceType == DIST_L1.
|
||||||
|
*/
|
||||||
|
export declare function distanceTransform(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
distanceType: int,
|
||||||
|
maskSize: int,
|
||||||
|
dstType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* variant without `mask` parameter
|
||||||
|
*/
|
||||||
|
export declare function floodFill(
|
||||||
|
image: InputOutputArray,
|
||||||
|
seedPoint: Point,
|
||||||
|
newVal: Scalar,
|
||||||
|
rect?: any,
|
||||||
|
loDiff?: Scalar,
|
||||||
|
upDiff?: Scalar,
|
||||||
|
flags?: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::floodFill] fills a connected component starting from the seed point with the
|
||||||
|
* specified color. The connectivity is determined by the color/brightness closeness of the neighbor
|
||||||
|
* pixels. The pixel at `$(x,y)$` is considered to belong to the repainted domain if:
|
||||||
|
*
|
||||||
|
* in case of a grayscale image and floating range `\\[\\texttt{src} (x',y')- \\texttt{loDiff} \\leq
|
||||||
|
* \\texttt{src} (x,y) \\leq \\texttt{src} (x',y')+ \\texttt{upDiff}\\]`
|
||||||
|
* in case of a grayscale image and fixed range `\\[\\texttt{src} ( \\texttt{seedPoint} .x,
|
||||||
|
* \\texttt{seedPoint} .y)- \\texttt{loDiff} \\leq \\texttt{src} (x,y) \\leq \\texttt{src} (
|
||||||
|
* \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)+ \\texttt{upDiff}\\]`
|
||||||
|
* in case of a color image and floating range `\\[\\texttt{src} (x',y')_r- \\texttt{loDiff} _r \\leq
|
||||||
|
* \\texttt{src} (x,y)_r \\leq \\texttt{src} (x',y')_r+ \\texttt{upDiff} _r,\\]` `\\[\\texttt{src}
|
||||||
|
* (x',y')_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g \\leq \\texttt{src} (x',y')_g+
|
||||||
|
* \\texttt{upDiff} _g\\]` and `\\[\\texttt{src} (x',y')_b- \\texttt{loDiff} _b \\leq \\texttt{src}
|
||||||
|
* (x,y)_b \\leq \\texttt{src} (x',y')_b+ \\texttt{upDiff} _b\\]`
|
||||||
|
* in case of a color image and fixed range `\\[\\texttt{src} ( \\texttt{seedPoint} .x,
|
||||||
|
* \\texttt{seedPoint} .y)_r- \\texttt{loDiff} _r \\leq \\texttt{src} (x,y)_r \\leq \\texttt{src} (
|
||||||
|
* \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_r+ \\texttt{upDiff} _r,\\]` `\\[\\texttt{src} (
|
||||||
|
* \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g
|
||||||
|
* \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g+ \\texttt{upDiff} _g\\]` and
|
||||||
|
* `\\[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b- \\texttt{loDiff} _b \\leq
|
||||||
|
* \\texttt{src} (x,y)_b \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b+
|
||||||
|
* \\texttt{upDiff} _b\\]`
|
||||||
|
*
|
||||||
|
* where `$src(x',y')$` is the value of one of pixel neighbors that is already known to belong to the
|
||||||
|
* component. That is, to be added to the connected component, a color/brightness of the pixel should
|
||||||
|
* be close enough to:
|
||||||
|
*
|
||||||
|
* Color/brightness of one of its neighbors that already belong to the connected component in case of a
|
||||||
|
* floating range.
|
||||||
|
* Color/brightness of the seed point in case of a fixed range.
|
||||||
|
*
|
||||||
|
* Use these functions to either mark a connected component with the specified color in-place, or build
|
||||||
|
* a mask and then extract the contour, or copy the region to another image, and so on.
|
||||||
|
*
|
||||||
|
* Since the mask is larger than the filled image, a pixel `$(x, y)$` in image corresponds to the pixel
|
||||||
|
* `$(x+1, y+1)$` in the mask .
|
||||||
|
*
|
||||||
|
* [findContours]
|
||||||
|
*
|
||||||
|
* @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
|
||||||
|
* function unless the FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See the
|
||||||
|
* details below.
|
||||||
|
*
|
||||||
|
* @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
|
||||||
|
* taller than image. Since this is both an input and output parameter, you must take responsibility of
|
||||||
|
* initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example, an
|
||||||
|
* edge detector output can be used as a mask to stop filling at edges. On output, pixels in the mask
|
||||||
|
* corresponding to filled pixels in the image are set to 1 or to the a value specified in flags as
|
||||||
|
* described below. Additionally, the function fills the border of the mask with ones to simplify
|
||||||
|
* internal processing. It is therefore possible to use the same mask in multiple calls to the function
|
||||||
|
* to make sure the filled areas do not overlap.
|
||||||
|
*
|
||||||
|
* @param seedPoint Starting point.
|
||||||
|
*
|
||||||
|
* @param newVal New value of the repainted domain pixels.
|
||||||
|
*
|
||||||
|
* @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
|
||||||
|
* repainted domain.
|
||||||
|
*
|
||||||
|
* @param loDiff Maximal lower brightness/color difference between the currently observed pixel and one
|
||||||
|
* of its neighbors belonging to the component, or a seed pixel being added to the component.
|
||||||
|
*
|
||||||
|
* @param upDiff Maximal upper brightness/color difference between the currently observed pixel and one
|
||||||
|
* of its neighbors belonging to the component, or a seed pixel being added to the component.
|
||||||
|
*
|
||||||
|
* @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of 4
|
||||||
|
* means that only the four nearest neighbor pixels (those that share an edge) are considered. A
|
||||||
|
* connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
|
||||||
|
* will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill the
|
||||||
|
* mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest neighbours and
|
||||||
|
* fill the mask with a value of 255. The following additional options occupy higher bits and therefore
|
||||||
|
* may be further combined with the connectivity and mask fill values using bit-wise or (|), see
|
||||||
|
* FloodFillFlags.
|
||||||
|
*/
|
||||||
|
export declare function floodFill(
|
||||||
|
image: InputOutputArray,
|
||||||
|
mask: InputOutputArray,
|
||||||
|
seedPoint: Point,
|
||||||
|
newVal: Scalar,
|
||||||
|
rect?: any,
|
||||||
|
loDiff?: Scalar,
|
||||||
|
upDiff?: Scalar,
|
||||||
|
flags?: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements the .
|
||||||
|
*
|
||||||
|
* @param img Input 8-bit 3-channel image.
|
||||||
|
*
|
||||||
|
* @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
|
||||||
|
* mode is set to GC_INIT_WITH_RECT. Its elements may have one of the GrabCutClasses.
|
||||||
|
*
|
||||||
|
* @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious
|
||||||
|
* background". The parameter is only used when mode==GC_INIT_WITH_RECT .
|
||||||
|
*
|
||||||
|
* @param bgdModel Temporary array for the background model. Do not modify it while you are processing
|
||||||
|
* the same image.
|
||||||
|
*
|
||||||
|
* @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are processing
|
||||||
|
* the same image.
|
||||||
|
*
|
||||||
|
* @param iterCount Number of iterations the algorithm should make before returning the result. Note
|
||||||
|
* that the result can be refined with further calls with mode==GC_INIT_WITH_MASK or mode==GC_EVAL .
|
||||||
|
*
|
||||||
|
* @param mode Operation mode that could be one of the GrabCutModes
|
||||||
|
*/
|
||||||
|
export declare function grabCut(
|
||||||
|
img: InputArray,
|
||||||
|
mask: InputOutputArray,
|
||||||
|
rect: Rect,
|
||||||
|
bgdModel: InputOutputArray,
|
||||||
|
fgdModel: InputOutputArray,
|
||||||
|
iterCount: int,
|
||||||
|
mode?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function integral(
|
||||||
|
src: InputArray,
|
||||||
|
sum: OutputArray,
|
||||||
|
sdepth?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function integral(
|
||||||
|
src: InputArray,
|
||||||
|
sum: OutputArray,
|
||||||
|
sqsum: OutputArray,
|
||||||
|
sdepth?: int,
|
||||||
|
sqdepth?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates one or more integral images for the source image as follows:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{sum} (X,Y) = \\sum _{x<X,y<Y} \\texttt{image} (x,y)\\]`
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{sqsum} (X,Y) = \\sum _{x<X,y<Y} \\texttt{image} (x,y)^2\\]`
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{tilted} (X,Y) = \\sum _{y<Y,abs(x-X+1) \\leq Y-y-1} \\texttt{image} (x,y)\\]`
|
||||||
|
*
|
||||||
|
* Using these integral images, you can calculate sum, mean, and standard deviation over a specific
|
||||||
|
* up-right or rotated rectangular region of the image in a constant time, for example:
|
||||||
|
*
|
||||||
|
* `\\[\\sum _{x_1 \\leq x < x_2, \\, y_1 \\leq y < y_2} \\texttt{image} (x,y) = \\texttt{sum}
|
||||||
|
* (x_2,y_2)- \\texttt{sum} (x_1,y_2)- \\texttt{sum} (x_2,y_1)+ \\texttt{sum} (x_1,y_1)\\]`
|
||||||
|
*
|
||||||
|
* It makes possible to do a fast blurring or fast block correlation with a variable window size, for
|
||||||
|
* example. In case of multi-channel images, sums for each channel are accumulated independently.
|
||||||
|
*
|
||||||
|
* As a practical example, the next figure shows the calculation of the integral of a straight
|
||||||
|
* rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
|
||||||
|
* original image are shown, as well as the relative pixels in the integral images sum and tilted .
|
||||||
|
*
|
||||||
|
* @param src input image as $W \times H$, 8-bit or floating-point (32f or 64f).
|
||||||
|
*
|
||||||
|
* @param sum integral image as $(W+1)\times (H+1)$ , 32-bit integer or floating-point (32f or 64f).
|
||||||
|
*
|
||||||
|
* @param sqsum integral image for squared pixel values; it is $(W+1)\times (H+1)$, double-precision
|
||||||
|
* floating-point (64f) array.
|
||||||
|
*
|
||||||
|
* @param tilted integral for the image rotated by 45 degrees; it is $(W+1)\times (H+1)$ array with the
|
||||||
|
* same data type as sum.
|
||||||
|
*
|
||||||
|
* @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
|
||||||
|
* CV_64F.
|
||||||
|
*
|
||||||
|
* @param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.
|
||||||
|
*/
|
||||||
|
export declare function integral(
|
||||||
|
src: InputArray,
|
||||||
|
sum: OutputArray,
|
||||||
|
sqsum: OutputArray,
|
||||||
|
tilted: OutputArray,
|
||||||
|
sdepth?: int,
|
||||||
|
sqdepth?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function applies fixed-level thresholding to a multiple-channel array. The function is typically
|
||||||
|
* used to get a bi-level (binary) image out of a grayscale image ( [compare] could be also used for
|
||||||
|
* this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
|
||||||
|
* values. There are several types of thresholding supported by the function. They are determined by
|
||||||
|
* type parameter.
|
||||||
|
*
|
||||||
|
* Also, the special values [THRESH_OTSU] or [THRESH_TRIANGLE] may be combined with one of the above
|
||||||
|
* values. In these cases, the function determines the optimal threshold value using the Otsu's or
|
||||||
|
* Triangle algorithm and uses it instead of the specified thresh.
|
||||||
|
*
|
||||||
|
* Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images.
|
||||||
|
*
|
||||||
|
* the computed threshold value if Otsu's or Triangle methods used.
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold], [findContours], [compare], [min], [max]
|
||||||
|
*
|
||||||
|
* @param src input array (multiple-channel, 8-bit or 32-bit floating point).
|
||||||
|
*
|
||||||
|
* @param dst output array of the same size and type and the same number of channels as src.
|
||||||
|
*
|
||||||
|
* @param thresh threshold value.
|
||||||
|
*
|
||||||
|
* @param maxval maximum value to use with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
|
||||||
|
*
|
||||||
|
* @param type thresholding type (see ThresholdTypes).
|
||||||
|
*/
|
||||||
|
export declare function threshold(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
thresh: double,
|
||||||
|
maxval: double,
|
||||||
|
type: int,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements one of the variants of watershed, non-parametric marker-based segmentation
|
||||||
|
* algorithm, described in Meyer92 .
|
||||||
|
*
|
||||||
|
* Before passing the image to the function, you have to roughly outline the desired regions in the
|
||||||
|
* image markers with positive (>0) indices. So, every region is represented as one or more connected
|
||||||
|
* components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary
|
||||||
|
* mask using [findContours] and [drawContours] (see the watershed.cpp demo). The markers are "seeds"
|
||||||
|
* of the future image regions. All the other pixels in markers , whose relation to the outlined
|
||||||
|
* regions is not known and should be defined by the algorithm, should be set to 0's. In the function
|
||||||
|
* output, each pixel in markers is set to a value of the "seed" components or to -1 at boundaries
|
||||||
|
* between the regions.
|
||||||
|
*
|
||||||
|
* Any two neighbor connected components are not necessarily separated by a watershed boundary (-1's
|
||||||
|
* pixels); for example, they can touch each other in the initial marker image passed to the function.
|
||||||
|
*
|
||||||
|
* [findContours]
|
||||||
|
*
|
||||||
|
* @param image Input 8-bit 3-channel image.
|
||||||
|
*
|
||||||
|
* @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same
|
||||||
|
* size as image .
|
||||||
|
*/
|
||||||
|
export declare function watershed(
|
||||||
|
image: InputArray,
|
||||||
|
markers: InputOutputArray,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the threshold value `$T(x,y)$` is a mean of the `$\\texttt{blockSize} \\times \\texttt{blockSize}$`
|
||||||
|
* neighborhood of `$(x, y)$` minus C
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const ADAPTIVE_THRESH_MEAN_C: AdaptiveThresholdTypes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the threshold value `$T(x, y)$` is a weighted sum (cross-correlation with a Gaussian window) of the
|
||||||
|
* `$\\texttt{blockSize} \\times \\texttt{blockSize}$` neighborhood of `$(x, y)$` minus C . The default
|
||||||
|
* sigma (standard deviation) is used for the specified blockSize . See [getGaussianKernel]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const ADAPTIVE_THRESH_GAUSSIAN_C: AdaptiveThresholdTypes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* each connected component of zeros in src (as well as all the non-zero pixels closest to the
|
||||||
|
* connected component) will be assigned the same label
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DIST_LABEL_CCOMP: DistanceTransformLabelTypes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* each zero pixel (and all the non-zero pixels closest to it) gets its own label.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const DIST_LABEL_PIXEL: DistanceTransformLabelTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const DIST_MASK_3: DistanceTransformMasks; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const DIST_MASK_5: DistanceTransformMasks; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const DIST_MASK_PRECISE: DistanceTransformMasks; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const DIST_USER: DistanceTypes; // initializer: = -1
|
||||||
|
|
||||||
|
export declare const DIST_L1: DistanceTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const DIST_L2: DistanceTypes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const DIST_C: DistanceTypes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const DIST_L12: DistanceTypes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const DIST_FAIR: DistanceTypes; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const DIST_WELSCH: DistanceTypes; // initializer: = 6
|
||||||
|
|
||||||
|
export declare const DIST_HUBER: DistanceTypes; // initializer: = 7
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If set, the difference between the current pixel and seed pixel is considered. Otherwise, the
|
||||||
|
* difference between neighbor pixels is considered (that is, the range is floating).
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const FLOODFILL_FIXED_RANGE: FloodFillFlags; // initializer: = 1 << 16
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If set, the function does not change the image ( newVal is ignored), and only fills the mask with
|
||||||
|
* the value specified in bits 8-16 of flags as described above. This option only make sense in
|
||||||
|
* function variants that have the mask parameter.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const FLOODFILL_MASK_ONLY: FloodFillFlags; // initializer: = 1 << 17
|
||||||
|
|
||||||
|
export declare const GC_BGD: GrabCutClasses; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const GC_FGD: GrabCutClasses; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const GC_PR_BGD: GrabCutClasses; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const GC_PR_FGD: GrabCutClasses; // initializer: = 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function initializes the state and the mask using the provided rectangle. After that it runs
|
||||||
|
* iterCount iterations of the algorithm.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const GC_INIT_WITH_RECT: GrabCutModes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function initializes the state using the provided mask. Note that GC_INIT_WITH_RECT and
|
||||||
|
* GC_INIT_WITH_MASK can be combined. Then, all the pixels outside of the ROI are automatically
|
||||||
|
* initialized with GC_BGD .
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const GC_INIT_WITH_MASK: GrabCutModes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The value means that the algorithm should just resume.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const GC_EVAL: GrabCutModes; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The value means that the algorithm should just run the grabCut algorithm (a single iteration) with
|
||||||
|
* the fixed model
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const GC_EVAL_FREEZE_MODEL: GrabCutModes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const THRESH_BINARY: ThresholdTypes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const THRESH_BINARY_INV: ThresholdTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const THRESH_TRUNC: ThresholdTypes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const THRESH_TOZERO: ThresholdTypes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const THRESH_TOZERO_INV: ThresholdTypes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const THRESH_MASK: ThresholdTypes; // initializer: = 7
|
||||||
|
|
||||||
|
export declare const THRESH_OTSU: ThresholdTypes; // initializer: = 8
|
||||||
|
|
||||||
|
export declare const THRESH_TRIANGLE: ThresholdTypes; // initializer: = 16
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type AdaptiveThresholdTypes = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type DistanceTransformLabelTypes = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type DistanceTransformMasks = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type DistanceTypes = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type FloodFillFlags = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type GrabCutClasses = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type GrabCutModes = any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adaptive threshold algorithm
|
||||||
|
*
|
||||||
|
* [adaptiveThreshold]
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export type ThresholdTypes = any;
|
||||||
58
opencv-js-4.10.0/src/types/opencv/imgproc_object.ts
Normal file
58
opencv-js-4.10.0/src/types/opencv/imgproc_object.ts
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
import type { InputArray, int, OutputArray } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Object Detection
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function slides through image , compares the overlapped patches of size `$w \\times h$` against
|
||||||
|
* templ using the specified method and stores the comparison results in result . Here are the formulae
|
||||||
|
* for the available comparison methods ( `$I$` denotes image, `$T$` template, `$R$` result ). The
|
||||||
|
* summation is done over template and/or the image patch: `$x' = 0...w-1, y' = 0...h-1$`
|
||||||
|
*
|
||||||
|
* After the function finishes the comparison, the best matches can be found as global minimums (when
|
||||||
|
* [TM_SQDIFF] was used) or maximums (when [TM_CCORR] or [TM_CCOEFF] was used) using the [minMaxLoc]
|
||||||
|
* function. In case of a color image, template summation in the numerator and each sum in the
|
||||||
|
* denominator is done over all of the channels and separate mean values are used for each channel.
|
||||||
|
* That is, the function can take a color template and a color image. The result will still be a
|
||||||
|
* single-channel image, which is easier to analyze.
|
||||||
|
*
|
||||||
|
* @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
|
||||||
|
*
|
||||||
|
* @param templ Searched template. It must be not greater than the source image and have the same data
|
||||||
|
* type.
|
||||||
|
*
|
||||||
|
* @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
|
||||||
|
* is $W \times H$ and templ is $w \times h$ , then result is $(W-w+1) \times (H-h+1)$ .
|
||||||
|
*
|
||||||
|
* @param method Parameter specifying the comparison method, see TemplateMatchModes
|
||||||
|
*
|
||||||
|
* @param mask Mask of searched template. It must have the same datatype and size with templ. It is not
|
||||||
|
* set by default. Currently, only the TM_SQDIFF and TM_CCORR_NORMED methods are supported.
|
||||||
|
*/
|
||||||
|
export declare function matchTemplate(
|
||||||
|
image: InputArray,
|
||||||
|
templ: InputArray,
|
||||||
|
result: OutputArray,
|
||||||
|
method: int,
|
||||||
|
mask?: InputArray,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const TM_SQDIFF: TemplateMatchModes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const TM_SQDIFF_NORMED: TemplateMatchModes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const TM_CCORR: TemplateMatchModes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const TM_CCORR_NORMED: TemplateMatchModes; // initializer: = 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* `\\[R(x,y)= \\sum _{x',y'} (T'(x',y') \\cdot I'(x+x',y+y'))\\]` where `\\[\\begin{array}{l}
|
||||||
|
* T'(x',y')=T(x',y') - 1/(w \\cdot h) \\cdot \\sum _{x'',y''} T(x'',y'') \\\\
|
||||||
|
* I'(x+x',y+y')=I(x+x',y+y') - 1/(w \\cdot h) \\cdot \\sum _{x'',y''} I(x+x'',y+y'') \\end{array}\\]`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const TM_CCOEFF: TemplateMatchModes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const TM_CCOEFF_NORMED: TemplateMatchModes; // initializer: = 5
|
||||||
|
|
||||||
|
export type TemplateMatchModes = any;
|
||||||
730
opencv-js-4.10.0/src/types/opencv/imgproc_shape.ts
Normal file
730
opencv-js-4.10.0/src/types/opencv/imgproc_shape.ts
Normal file
|
|
@ -0,0 +1,730 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
Circle,
|
||||||
|
double,
|
||||||
|
float,
|
||||||
|
InputArray,
|
||||||
|
int,
|
||||||
|
Moments,
|
||||||
|
OutputArray,
|
||||||
|
OutputArrayOfArrays,
|
||||||
|
Point,
|
||||||
|
Point2f,
|
||||||
|
Rect,
|
||||||
|
RotatedRect,
|
||||||
|
} from "./_types";
|
||||||
|
|
||||||
|
/*
|
||||||
|
* # Structural Analysis and Shape Descriptors
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function [cv::approxPolyDP] approximates a curve or a polygon with another curve/polygon with
|
||||||
|
* less vertices so that the distance between them is less or equal to the specified precision. It uses
|
||||||
|
* the Douglas-Peucker algorithm
|
||||||
|
*
|
||||||
|
* @param curve Input vector of a 2D point stored in std::vector or Mat
|
||||||
|
*
|
||||||
|
* @param approxCurve Result of the approximation. The type should match the type of the input curve.
|
||||||
|
*
|
||||||
|
* @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance between
|
||||||
|
* the original curve and its approximation.
|
||||||
|
*
|
||||||
|
* @param closed If true, the approximated curve is closed (its first and last vertices are connected).
|
||||||
|
* Otherwise, it is not closed.
|
||||||
|
*/
|
||||||
|
export declare function approxPolyDP(
|
||||||
|
curve: InputArray,
|
||||||
|
approxCurve: OutputArray,
|
||||||
|
epsilon: double,
|
||||||
|
closed: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes a curve length or a closed contour perimeter.
|
||||||
|
*
|
||||||
|
* @param curve Input vector of 2D points, stored in std::vector or Mat.
|
||||||
|
*
|
||||||
|
* @param closed Flag indicating whether the curve is closed or not.
|
||||||
|
*/
|
||||||
|
export declare function arcLength(curve: InputArray, closed: bool): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates and returns the minimal up-right bounding rectangle for the specified point
|
||||||
|
* set or non-zero pixels of gray-scale image.
|
||||||
|
*
|
||||||
|
* @param array Input gray-scale image or 2D point set, stored in std::vector or Mat.
|
||||||
|
*/
|
||||||
|
export declare function boundingRect(array: InputArray): Rect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds the four vertices of a rotated rectangle. This function is useful to draw the
|
||||||
|
* rectangle. In C++, instead of using this function, you can directly use [RotatedRect::points]
|
||||||
|
* method. Please visit the [tutorial on Creating Bounding rotated boxes and ellipses for contours] for
|
||||||
|
* more information.
|
||||||
|
*
|
||||||
|
* @param box The input rotated rectangle. It may be the output of
|
||||||
|
*
|
||||||
|
* @param points The output array of four vertices of rectangles.
|
||||||
|
*/
|
||||||
|
export declare function boxPoints(box: RotatedRect, points: OutputArray): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
|
||||||
|
* represents the background label. ltype specifies the output label image type, an important
|
||||||
|
* consideration based on the total number of labels or alternatively the total number of pixels in the
|
||||||
|
* source image. ccltype specifies the connected components labeling algorithm to use, currently Grana
|
||||||
|
* (BBDT) and Wu's (SAUF) algorithms are supported, see the [ConnectedComponentsAlgorithmsTypes] for
|
||||||
|
* details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not. This
|
||||||
|
* function uses parallel version of both Grana and Wu's algorithms if at least one allowed parallel
|
||||||
|
* framework is enabled and if the rows of the image are at least twice the number returned by
|
||||||
|
* [getNumberOfCPUs].
|
||||||
|
*
|
||||||
|
* @param image the 8-bit single-channel image to be labeled
|
||||||
|
*
|
||||||
|
* @param labels destination labeled image
|
||||||
|
*
|
||||||
|
* @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
|
||||||
|
*
|
||||||
|
* @param ltype output image label type. Currently CV_32S and CV_16U are supported.
|
||||||
|
*
|
||||||
|
* @param ccltype connected components algorithm type (see the ConnectedComponentsAlgorithmsTypes).
|
||||||
|
*/
|
||||||
|
export declare function connectedComponents(
|
||||||
|
image: InputArray,
|
||||||
|
labels: OutputArray,
|
||||||
|
connectivity: int,
|
||||||
|
ltype: int,
|
||||||
|
ccltype: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param image the 8-bit single-channel image to be labeled
|
||||||
|
*
|
||||||
|
* @param labels destination labeled image
|
||||||
|
*
|
||||||
|
* @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
|
||||||
|
*
|
||||||
|
* @param ltype output image label type. Currently CV_32S and CV_16U are supported.
|
||||||
|
*/
|
||||||
|
export declare function connectedComponents(
|
||||||
|
image: InputArray,
|
||||||
|
labels: OutputArray,
|
||||||
|
connectivity?: int,
|
||||||
|
ltype?: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
|
||||||
|
* represents the background label. ltype specifies the output label image type, an important
|
||||||
|
* consideration based on the total number of labels or alternatively the total number of pixels in the
|
||||||
|
* source image. ccltype specifies the connected components labeling algorithm to use, currently
|
||||||
|
* Grana's (BBDT) and Wu's (SAUF) algorithms are supported, see the
|
||||||
|
* [ConnectedComponentsAlgorithmsTypes] for details. Note that SAUF algorithm forces a row major
|
||||||
|
* ordering of labels while BBDT does not. This function uses parallel version of both Grana and Wu's
|
||||||
|
* algorithms (statistics included) if at least one allowed parallel framework is enabled and if the
|
||||||
|
* rows of the image are at least twice the number returned by [getNumberOfCPUs].
|
||||||
|
*
|
||||||
|
* @param image the 8-bit single-channel image to be labeled
|
||||||
|
*
|
||||||
|
* @param labels destination labeled image
|
||||||
|
*
|
||||||
|
* @param stats statistics output for each label, including the background label, see below for
|
||||||
|
* available statistics. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
|
||||||
|
* ConnectedComponentsTypes. The data type is CV_32S.
|
||||||
|
*
|
||||||
|
* @param centroids centroid output for each label, including the background label. Centroids are
|
||||||
|
* accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
|
||||||
|
*
|
||||||
|
* @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
|
||||||
|
*
|
||||||
|
* @param ltype output image label type. Currently CV_32S and CV_16U are supported.
|
||||||
|
*
|
||||||
|
* @param ccltype connected components algorithm type (see ConnectedComponentsAlgorithmsTypes).
|
||||||
|
*/
|
||||||
|
export declare function connectedComponentsWithStats(
|
||||||
|
image: InputArray,
|
||||||
|
labels: OutputArray,
|
||||||
|
stats: OutputArray,
|
||||||
|
centroids: OutputArray,
|
||||||
|
connectivity: int,
|
||||||
|
ltype: int,
|
||||||
|
ccltype: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*
|
||||||
|
* @param image the 8-bit single-channel image to be labeled
|
||||||
|
*
|
||||||
|
* @param labels destination labeled image
|
||||||
|
*
|
||||||
|
* @param stats statistics output for each label, including the background label, see below for
|
||||||
|
* available statistics. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
|
||||||
|
* ConnectedComponentsTypes. The data type is CV_32S.
|
||||||
|
*
|
||||||
|
* @param centroids centroid output for each label, including the background label. Centroids are
|
||||||
|
* accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
|
||||||
|
*
|
||||||
|
* @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
|
||||||
|
*
|
||||||
|
* @param ltype output image label type. Currently CV_32S and CV_16U are supported.
|
||||||
|
*/
|
||||||
|
export declare function connectedComponentsWithStats(
|
||||||
|
image: InputArray,
|
||||||
|
labels: OutputArray,
|
||||||
|
stats: OutputArray,
|
||||||
|
centroids: OutputArray,
|
||||||
|
connectivity?: int,
|
||||||
|
ltype?: int,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes a contour area. Similarly to moments , the area is computed using the Green
|
||||||
|
* formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
|
||||||
|
* [drawContours] or [fillPoly] , can be different. Also, the function will most certainly give a wrong
|
||||||
|
* results for contours with self-intersections.
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* vector<Point> contour;
|
||||||
|
* contour.push_back(Point2f(0, 0));
|
||||||
|
* contour.push_back(Point2f(10, 0));
|
||||||
|
* contour.push_back(Point2f(10, 10));
|
||||||
|
* contour.push_back(Point2f(5, 4));
|
||||||
|
*
|
||||||
|
* double area0 = contourArea(contour);
|
||||||
|
* vector<Point> approx;
|
||||||
|
* approxPolyDP(contour, approx, 5, true);
|
||||||
|
* double area1 = contourArea(approx);
|
||||||
|
*
|
||||||
|
* cout << "area0 =" << area0 << endl <<
|
||||||
|
* "area1 =" << area1 << endl <<
|
||||||
|
* "approx poly vertices" << approx.size() << endl;
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
|
||||||
|
*
|
||||||
|
* @param oriented Oriented area flag. If it is true, the function returns a signed area value,
|
||||||
|
* depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
|
||||||
|
* determine orientation of a contour by taking the sign of an area. By default, the parameter is
|
||||||
|
* false, which means that the absolute value is returned.
|
||||||
|
*/
|
||||||
|
export declare function contourArea(
|
||||||
|
contour: InputArray,
|
||||||
|
oriented?: bool,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function [cv::convexHull] finds the convex hull of a 2D point set using the Sklansky's algorithm
|
||||||
|
* Sklansky82 that has *O(N logN)* complexity in the current implementation.
|
||||||
|
*
|
||||||
|
* `points` and `hull` should be different arrays, inplace processing isn't supported.
|
||||||
|
* Check [the corresponding tutorial] for more details.
|
||||||
|
*
|
||||||
|
* useful links:
|
||||||
|
*
|
||||||
|
* @param points Input 2D point set, stored in std::vector or Mat.
|
||||||
|
*
|
||||||
|
* @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
|
||||||
|
* the first case, the hull elements are 0-based indices of the convex hull points in the original
|
||||||
|
* array (since the set of convex hull points is a subset of the original point set). In the second
|
||||||
|
* case, hull elements are the convex hull points themselves.
|
||||||
|
*
|
||||||
|
* @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.
|
||||||
|
* Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
|
||||||
|
* to the right, and its Y axis pointing upwards.
|
||||||
|
*
|
||||||
|
* @param returnPoints Operation flag. In case of a matrix, when the flag is true, the function returns
|
||||||
|
* convex hull points. Otherwise, it returns indices of the convex hull points. When the output array
|
||||||
|
* is std::vector, the flag is ignored, and the output depends on the type of the vector:
|
||||||
|
* std::vector<int> implies returnPoints=false, std::vector<Point> implies returnPoints=true.
|
||||||
|
*/
|
||||||
|
export declare function convexHull(
|
||||||
|
points: InputArray,
|
||||||
|
hull: OutputArray,
|
||||||
|
clockwise?: bool,
|
||||||
|
returnPoints?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The figure below displays convexity defects of a hand contour:
|
||||||
|
*
|
||||||
|
* @param contour Input contour.
|
||||||
|
*
|
||||||
|
* @param convexhull Convex hull obtained using convexHull that should contain indices of the contour
|
||||||
|
* points that make the hull.
|
||||||
|
*
|
||||||
|
* @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java
|
||||||
|
* interface each convexity defect is represented as 4-element integer vector (a.k.a. Vec4i):
|
||||||
|
* (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices in the
|
||||||
|
* original contour of the convexity defect beginning, end and the farthest point, and fixpt_depth is
|
||||||
|
* fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour
|
||||||
|
* point and the hull. That is, to get the floating-point value of the depth will be fixpt_depth/256.0.
|
||||||
|
*/
|
||||||
|
export declare function convexityDefects(
|
||||||
|
contour: InputArray,
|
||||||
|
convexhull: InputArray,
|
||||||
|
convexityDefects: OutputArray,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function createGeneralizedHoughBallard(): any;
|
||||||
|
|
||||||
|
export declare function createGeneralizedHoughGuil(): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function retrieves contours from the binary image using the algorithm Suzuki85 . The contours
|
||||||
|
* are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
|
||||||
|
* OpenCV sample directory.
|
||||||
|
*
|
||||||
|
* Since opencv 3.2 source image is not modified by this function.
|
||||||
|
*
|
||||||
|
* @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels
|
||||||
|
* remain 0's, so the image is treated as binary . You can use compare, inRange, threshold ,
|
||||||
|
* adaptiveThreshold, Canny, and others to create a binary image out of a grayscale or color one. If
|
||||||
|
* mode equals to RETR_CCOMP or RETR_FLOODFILL, the input can also be a 32-bit integer image of labels
|
||||||
|
* (CV_32SC1).
|
||||||
|
*
|
||||||
|
* @param contours Detected contours. Each contour is stored as a vector of points (e.g.
|
||||||
|
* std::vector<std::vector<cv::Point> >).
|
||||||
|
*
|
||||||
|
* @param hierarchy Optional output vector (e.g. std::vector<cv::Vec4i>), containing information about
|
||||||
|
* the image topology. It has as many elements as the number of contours. For each i-th contour
|
||||||
|
* contours[i], the elements hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3]
|
||||||
|
* are set to 0-based indices in contours of the next and previous contours at the same hierarchical
|
||||||
|
* level, the first child contour and the parent contour, respectively. If for the contour i there are
|
||||||
|
* no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be
|
||||||
|
* negative.
|
||||||
|
*
|
||||||
|
* @param mode Contour retrieval mode, see RetrievalModes
|
||||||
|
*
|
||||||
|
* @param method Contour approximation method, see ContourApproximationModes
|
||||||
|
*
|
||||||
|
* @param offset Optional offset by which every contour point is shifted. This is useful if the
|
||||||
|
* contours are extracted from the image ROI and then they should be analyzed in the whole image
|
||||||
|
* context.
|
||||||
|
*/
|
||||||
|
export declare function findContours(
|
||||||
|
image: InputArray,
|
||||||
|
contours: OutputArrayOfArrays,
|
||||||
|
hierarchy: OutputArray,
|
||||||
|
mode: int,
|
||||||
|
method: int,
|
||||||
|
offset?: Point,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function findContours(
|
||||||
|
image: InputArray,
|
||||||
|
contours: OutputArrayOfArrays,
|
||||||
|
mode: int,
|
||||||
|
method: int,
|
||||||
|
offset?: Point,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of
|
||||||
|
* all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm
|
||||||
|
* described by Fitzgibbon95 is used. Developer should keep in mind that it is possible that the
|
||||||
|
* returned ellipse/rotatedRect data contains negative indices, due to the data points being close to
|
||||||
|
* the border of the containing [Mat] element.
|
||||||
|
*
|
||||||
|
* @param points Input 2D point set, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function fitEllipse(points: InputArray): RotatedRect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the ellipse that fits a set of 2D points. It returns the rotated rectangle
|
||||||
|
* in which the ellipse is inscribed. The Approximate Mean Square (AMS) proposed by Taubin1991 is used.
|
||||||
|
*
|
||||||
|
* For an ellipse, this basis set is `$ \\chi= \\left(x^2, x y, y^2, x, y, 1\\right) $`, which is a set
|
||||||
|
* of six free coefficients `$
|
||||||
|
* A^T=\\left\\{A_{\\text{xx}},A_{\\text{xy}},A_{\\text{yy}},A_x,A_y,A_0\\right\\} $`. However, to
|
||||||
|
* specify an ellipse, all that is needed is five numbers; the major and minor axes lengths `$ (a,b)
|
||||||
|
* $`, the position `$ (x_0,y_0) $`, and the orientation `$ \\theta $`. This is because the basis set
|
||||||
|
* includes lines, quadratics, parabolic and hyperbolic functions as well as elliptical functions as
|
||||||
|
* possible fits. If the fit is found to be a parabolic or hyperbolic function then the standard
|
||||||
|
* [fitEllipse] method is used. The AMS method restricts the fit to parabolic, hyperbolic and
|
||||||
|
* elliptical curves by imposing the condition that `$ A^T ( D_x^T D_x + D_y^T D_y) A = 1 $` where the
|
||||||
|
* matrices `$ Dx $` and `$ Dy $` are the partial derivatives of the design matrix `$ D $` with respect
|
||||||
|
* to x and y. The matrices are formed row by row applying the following to each of the points in the
|
||||||
|
* set: `\\begin{align*} D(i,:)&=\\left\\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\\right\\} &
|
||||||
|
* D_x(i,:)&=\\left\\{2 x_i,y_i,0,1,0,0\\right\\} & D_y(i,:)&=\\left\\{0,x_i,2 y_i,0,1,0\\right\\}
|
||||||
|
* \\end{align*}` The AMS method minimizes the cost function `\\begin{equation*} \\epsilon ^2=\\frac{
|
||||||
|
* A^T D^T D A }{ A^T (D_x^T D_x + D_y^T D_y) A^T } \\end{equation*}`
|
||||||
|
*
|
||||||
|
* The minimum cost is found by solving the generalized eigenvalue problem.
|
||||||
|
*
|
||||||
|
* `\\begin{equation*} D^T D A = \\lambda \\left( D_x^T D_x + D_y^T D_y\\right) A \\end{equation*}`
|
||||||
|
*
|
||||||
|
* @param points Input 2D point set, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function fitEllipseAMS(points: InputArray): RotatedRect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the ellipse that fits a set of 2D points. It returns the rotated rectangle
|
||||||
|
* in which the ellipse is inscribed. The Direct least square (Direct) method by Fitzgibbon1999 is
|
||||||
|
* used.
|
||||||
|
*
|
||||||
|
* For an ellipse, this basis set is `$ \\chi= \\left(x^2, x y, y^2, x, y, 1\\right) $`, which is a set
|
||||||
|
* of six free coefficients `$
|
||||||
|
* A^T=\\left\\{A_{\\text{xx}},A_{\\text{xy}},A_{\\text{yy}},A_x,A_y,A_0\\right\\} $`. However, to
|
||||||
|
* specify an ellipse, all that is needed is five numbers; the major and minor axes lengths `$ (a,b)
|
||||||
|
* $`, the position `$ (x_0,y_0) $`, and the orientation `$ \\theta $`. This is because the basis set
|
||||||
|
* includes lines, quadratics, parabolic and hyperbolic functions as well as elliptical functions as
|
||||||
|
* possible fits. The Direct method confines the fit to ellipses by ensuring that `$ 4 A_{xx} A_{yy}-
|
||||||
|
* A_{xy}^2 > 0 $`. The condition imposed is that `$ 4 A_{xx} A_{yy}- A_{xy}^2=1 $` which satisfies the
|
||||||
|
* inequality and as the coefficients can be arbitrarily scaled is not overly restrictive.
|
||||||
|
*
|
||||||
|
* `\\begin{equation*} \\epsilon ^2= A^T D^T D A \\quad \\text{with} \\quad A^T C A =1 \\quad
|
||||||
|
* \\text{and} \\quad C=\\left(\\begin{matrix} 0 & 0 & 2 & 0 & 0 & 0 \\\\ 0 & -1 & 0 & 0 & 0 & 0 \\\\ 2
|
||||||
|
* & 0 & 0 & 0 & 0 & 0 \\\\ 0 & 0 & 0 & 0 & 0 & 0 \\\\ 0 & 0 & 0 & 0 & 0 & 0 \\\\ 0 & 0 & 0 & 0 & 0 & 0
|
||||||
|
* \\end{matrix} \\right) \\end{equation*}`
|
||||||
|
*
|
||||||
|
* The minimum cost is found by solving the generalized eigenvalue problem.
|
||||||
|
*
|
||||||
|
* `\\begin{equation*} D^T D A = \\lambda \\left( C\\right) A \\end{equation*}`
|
||||||
|
*
|
||||||
|
* The system produces only one positive eigenvalue `$ \\lambda$` which is chosen as the solution with
|
||||||
|
* its eigenvector `$\\mathbf{u}$`. These are used to find the coefficients
|
||||||
|
*
|
||||||
|
* `\\begin{equation*} A = \\sqrt{\\frac{1}{\\mathbf{u}^T C \\mathbf{u}}} \\mathbf{u} \\end{equation*}`
|
||||||
|
* The scaling factor guarantees that `$A^T C A =1$`.
|
||||||
|
*
|
||||||
|
* @param points Input 2D point set, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function fitEllipseDirect(points: InputArray): RotatedRect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function fitLine fits a line to a 2D or 3D point set by minimizing `$\\sum_i \\rho(r_i)$` where
|
||||||
|
* `$r_i$` is a distance between the `$i^{th}$` point, the line and `$\\rho(r)$` is a distance
|
||||||
|
* function, one of the following:
|
||||||
|
*
|
||||||
|
* DIST_L2 `\\[\\rho (r) = r^2/2 \\quad \\text{(the simplest and the fastest least-squares method)}\\]`
|
||||||
|
* DIST_L1 `\\[\\rho (r) = r\\]`
|
||||||
|
* DIST_L12 `\\[\\rho (r) = 2 \\cdot ( \\sqrt{1 + \\frac{r^2}{2}} - 1)\\]`
|
||||||
|
* DIST_FAIR `\\[\\rho \\left (r \\right ) = C^2 \\cdot \\left ( \\frac{r}{C} - \\log{\\left(1 +
|
||||||
|
* \\frac{r}{C}\\right)} \\right ) \\quad \\text{where} \\quad C=1.3998\\]`
|
||||||
|
* DIST_WELSCH `\\[\\rho \\left (r \\right ) = \\frac{C^2}{2} \\cdot \\left ( 1 -
|
||||||
|
* \\exp{\\left(-\\left(\\frac{r}{C}\\right)^2\\right)} \\right ) \\quad \\text{where} \\quad
|
||||||
|
* C=2.9846\\]`
|
||||||
|
* DIST_HUBER `\\[\\rho (r) = \\fork{r^2/2}{if \\(r < C\\)}{C \\cdot (r-C/2)}{otherwise} \\quad
|
||||||
|
* \\text{where} \\quad C=1.345\\]`
|
||||||
|
*
|
||||||
|
* The algorithm is based on the M-estimator ( ) technique that iteratively fits the line using the
|
||||||
|
* weighted least-squares algorithm. After each iteration the weights `$w_i$` are adjusted to be
|
||||||
|
* inversely proportional to `$\\rho(r_i)$` .
|
||||||
|
*
|
||||||
|
* @param points Input vector of 2D or 3D points, stored in std::vector<> or Mat.
|
||||||
|
*
|
||||||
|
* @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements (like
|
||||||
|
* Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and (x0, y0)
|
||||||
|
* is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like Vec6f) -
|
||||||
|
* (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line and (x0,
|
||||||
|
* y0, z0) is a point on the line.
|
||||||
|
*
|
||||||
|
* @param distType Distance used by the M-estimator, see DistanceTypes
|
||||||
|
*
|
||||||
|
* @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is
|
||||||
|
* chosen.
|
||||||
|
*
|
||||||
|
* @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
|
||||||
|
* line).
|
||||||
|
*
|
||||||
|
* @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
|
||||||
|
*/
|
||||||
|
export declare function fitLine(
|
||||||
|
points: InputArray,
|
||||||
|
line: OutputArray,
|
||||||
|
distType: int,
|
||||||
|
param: double,
|
||||||
|
reps: double,
|
||||||
|
aeps: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates seven Hu invariants (introduced in Hu62; see also ) defined as:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{array}{l} hu[0]= \\eta _{20}+ \\eta _{02} \\\\ hu[1]=( \\eta _{20}- \\eta _{02})^{2}+4
|
||||||
|
* \\eta _{11}^{2} \\\\ hu[2]=( \\eta _{30}-3 \\eta _{12})^{2}+ (3 \\eta _{21}- \\eta _{03})^{2} \\\\
|
||||||
|
* hu[3]=( \\eta _{30}+ \\eta _{12})^{2}+ ( \\eta _{21}+ \\eta _{03})^{2} \\\\ hu[4]=( \\eta _{30}-3
|
||||||
|
* \\eta _{12})( \\eta _{30}+ \\eta _{12})[( \\eta _{30}+ \\eta _{12})^{2}-3( \\eta _{21}+ \\eta
|
||||||
|
* _{03})^{2}]+(3 \\eta _{21}- \\eta _{03})( \\eta _{21}+ \\eta _{03})[3( \\eta _{30}+ \\eta
|
||||||
|
* _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}] \\\\ hu[5]=( \\eta _{20}- \\eta _{02})[( \\eta _{30}+
|
||||||
|
* \\eta _{12})^{2}- ( \\eta _{21}+ \\eta _{03})^{2}]+4 \\eta _{11}( \\eta _{30}+ \\eta _{12})( \\eta
|
||||||
|
* _{21}+ \\eta _{03}) \\\\ hu[6]=(3 \\eta _{21}- \\eta _{03})( \\eta _{21}+ \\eta _{03})[3( \\eta
|
||||||
|
* _{30}+ \\eta _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}]-( \\eta _{30}-3 \\eta _{12})( \\eta _{21}+
|
||||||
|
* \\eta _{03})[3( \\eta _{30}+ \\eta _{12})^{2}-( \\eta _{21}+ \\eta _{03})^{2}] \\\\ \\end{array}\\]`
|
||||||
|
*
|
||||||
|
* where `$\\eta_{ji}$` stands for `$\\texttt{Moments::nu}_{ji}$` .
|
||||||
|
*
|
||||||
|
* These values are proved to be invariants to the image scale, rotation, and reflection except the
|
||||||
|
* seventh one, whose sign is changed by reflection. This invariance is proved with the assumption of
|
||||||
|
* infinite image resolution. In case of raster images, the computed Hu invariants for the original and
|
||||||
|
* transformed images are a bit different.
|
||||||
|
*
|
||||||
|
* [matchShapes]
|
||||||
|
*
|
||||||
|
* @param moments Input moments computed with moments .
|
||||||
|
*
|
||||||
|
* @param hu Output Hu invariants.
|
||||||
|
*/
|
||||||
|
export declare function HuMoments(moments: any, hu: double): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function HuMoments(m: any, hu: OutputArray): void;
|
||||||
|
|
||||||
|
export declare function intersectConvexConvex(
|
||||||
|
_p1: InputArray,
|
||||||
|
_p2: InputArray,
|
||||||
|
_p12: OutputArray,
|
||||||
|
handleNested?: bool,
|
||||||
|
): float;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function tests whether the input contour is convex or not. The contour must be simple, that is,
|
||||||
|
* without self-intersections. Otherwise, the function output is undefined.
|
||||||
|
*
|
||||||
|
* @param contour Input vector of 2D points, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function isContourConvex(contour: InputArray): bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function compares two shapes. All three implemented methods use the Hu invariants (see
|
||||||
|
* [HuMoments])
|
||||||
|
*
|
||||||
|
* @param contour1 First contour or grayscale image.
|
||||||
|
*
|
||||||
|
* @param contour2 Second contour or grayscale image.
|
||||||
|
*
|
||||||
|
* @param method Comparison method, see ShapeMatchModes
|
||||||
|
*
|
||||||
|
* @param parameter Method-specific parameter (not supported now).
|
||||||
|
*/
|
||||||
|
export declare function matchShapes(
|
||||||
|
contour1: InputArray,
|
||||||
|
contour2: InputArray,
|
||||||
|
method: int,
|
||||||
|
parameter: double,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a
|
||||||
|
* specified point set. Developer should keep in mind that the returned [RotatedRect] can contain
|
||||||
|
* negative indices when data is close to the containing [Mat] element boundary.
|
||||||
|
*
|
||||||
|
* @param points Input vector of 2D points, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function minAreaRect(points: InputArray): RotatedRect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm.
|
||||||
|
*
|
||||||
|
* @param points Input vector of 2D points, stored in std::vector<> or Mat
|
||||||
|
*/
|
||||||
|
export declare function minEnclosingCircle(
|
||||||
|
points: InputArray,
|
||||||
|
): Circle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds a triangle of minimum area enclosing the given set of 2D points and returns its
|
||||||
|
* area. The output for a given 2D point set is shown in the image below. 2D points are depicted in
|
||||||
|
* red* and the enclosing triangle in *yellow*.
|
||||||
|
*
|
||||||
|
* The implementation of the algorithm is based on O'Rourke's ORourke86 and Klee and Laskowski's
|
||||||
|
* KleeLaskowski85 papers. O'Rourke provides a `$\\theta(n)$` algorithm for finding the minimal
|
||||||
|
* enclosing triangle of a 2D convex polygon with n vertices. Since the [minEnclosingTriangle] function
|
||||||
|
* takes a 2D point set as input an additional preprocessing step of computing the convex hull of the
|
||||||
|
* 2D point set is required. The complexity of the [convexHull] function is `$O(n log(n))$` which is
|
||||||
|
* higher than `$\\theta(n)$`. Thus the overall complexity of the function is `$O(n log(n))$`.
|
||||||
|
*
|
||||||
|
* @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector<> or Mat
|
||||||
|
*
|
||||||
|
* @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth of
|
||||||
|
* the OutputArray must be CV_32F.
|
||||||
|
*/
|
||||||
|
export declare function minEnclosingTriangle(
|
||||||
|
points: InputArray,
|
||||||
|
triangle: OutputArray,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
|
||||||
|
* results are returned in the structure [cv::Moments].
|
||||||
|
*
|
||||||
|
* moments.
|
||||||
|
*
|
||||||
|
* Only applicable to contour moments calculations from Python bindings: Note that the numpy type for
|
||||||
|
* the input array should be either np.int32 or np.float32.
|
||||||
|
*
|
||||||
|
* [contourArea], [arcLength]
|
||||||
|
*
|
||||||
|
* @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( $1 \times
|
||||||
|
* N$ or $N \times 1$ ) of 2D points (Point or Point2f ).
|
||||||
|
*
|
||||||
|
* @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is
|
||||||
|
* used for images only.
|
||||||
|
*/
|
||||||
|
export declare function moments(array: InputArray, binaryImage?: bool): Moments;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function determines whether the point is inside a contour, outside, or lies on an edge (or
|
||||||
|
* coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)
|
||||||
|
* value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.
|
||||||
|
* Otherwise, the return value is a signed distance between the point and the nearest contour edge.
|
||||||
|
*
|
||||||
|
* See below a sample output of the function where each image pixel is tested against the contour:
|
||||||
|
*
|
||||||
|
* @param contour Input contour.
|
||||||
|
*
|
||||||
|
* @param pt Point tested against the contour.
|
||||||
|
*
|
||||||
|
* @param measureDist If true, the function estimates the signed distance from the point to the nearest
|
||||||
|
* contour edge. Otherwise, the function only checks if the point is inside a contour or not.
|
||||||
|
*/
|
||||||
|
export declare function pointPolygonTest(
|
||||||
|
contour: InputArray,
|
||||||
|
pt: Point2f,
|
||||||
|
measureDist: bool,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If there is then the vertices of the intersecting region are returned as well.
|
||||||
|
*
|
||||||
|
* Below are some examples of intersection configurations. The hatched pattern indicates the
|
||||||
|
* intersecting region and the red vertices are returned by the function.
|
||||||
|
*
|
||||||
|
* One of [RectanglesIntersectTypes]
|
||||||
|
*
|
||||||
|
* @param rect1 First rectangle
|
||||||
|
*
|
||||||
|
* @param rect2 Second rectangle
|
||||||
|
*
|
||||||
|
* @param intersectingRegion The output array of the vertices of the intersecting region. It returns at
|
||||||
|
* most 8 vertices. Stored as std::vector<cv::Point2f> or cv::Mat as Mx1 of type CV_32FC2.
|
||||||
|
*/
|
||||||
|
export declare function rotatedRectangleIntersection(
|
||||||
|
rect1: any,
|
||||||
|
rect2: any,
|
||||||
|
intersectingRegion: OutputArray,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
export declare const CCL_WU: ConnectedComponentsAlgorithmsTypes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const CCL_DEFAULT: ConnectedComponentsAlgorithmsTypes; // initializer: = -1
|
||||||
|
|
||||||
|
export declare const CCL_GRANA: ConnectedComponentsAlgorithmsTypes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The leftmost (x) coordinate which is the inclusive start of the bounding box in the horizontal
|
||||||
|
* direction.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CC_STAT_LEFT: ConnectedComponentsTypes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The topmost (y) coordinate which is the inclusive start of the bounding box in the vertical
|
||||||
|
* direction.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CC_STAT_TOP: ConnectedComponentsTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const CC_STAT_WIDTH: ConnectedComponentsTypes; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const CC_STAT_HEIGHT: ConnectedComponentsTypes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const CC_STAT_AREA: ConnectedComponentsTypes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const CC_STAT_MAX: ConnectedComponentsTypes; // initializer: = 5
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stores absolutely all the contour points. That is, any 2 subsequent points (x1,y1) and (x2,y2) of
|
||||||
|
* the contour will be either horizontal, vertical or diagonal neighbors, that is,
|
||||||
|
* max(abs(x1-x2),abs(y2-y1))==1.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CHAIN_APPROX_NONE: ContourApproximationModes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* compresses horizontal, vertical, and diagonal segments and leaves only their end points. For
|
||||||
|
* example, an up-right rectangular contour is encoded with 4 points.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CHAIN_APPROX_SIMPLE: ContourApproximationModes; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* applies one of the flavors of the Teh-Chin chain approximation algorithm TehChin89
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CHAIN_APPROX_TC89_L1: ContourApproximationModes; // initializer: = 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* applies one of the flavors of the Teh-Chin chain approximation algorithm TehChin89
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const CHAIN_APPROX_TC89_KCOS: ContourApproximationModes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const INTERSECT_NONE: RectanglesIntersectTypes; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const INTERSECT_PARTIAL: RectanglesIntersectTypes; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const INTERSECT_FULL: RectanglesIntersectTypes; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* retrieves only the extreme outer contours. It sets `hierarchy[i][2]=hierarchy[i][3]=-1` for all the
|
||||||
|
* contours.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const RETR_EXTERNAL: RetrievalModes; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* retrieves all of the contours without establishing any hierarchical relationships.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const RETR_LIST: RetrievalModes; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* retrieves all of the contours and organizes them into a two-level hierarchy. At the top level, there
|
||||||
|
* are external boundaries of the components. At the second level, there are boundaries of the holes.
|
||||||
|
* If there is another contour inside a hole of a connected component, it is still put at the top
|
||||||
|
* level.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const RETR_CCOMP: RetrievalModes; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* retrieves all of the contours and reconstructs a full hierarchy of nested contours.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const RETR_TREE: RetrievalModes; // initializer: = 3
|
||||||
|
|
||||||
|
export declare const RETR_FLOODFILL: RetrievalModes; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const CONTOURS_MATCH_I1: ShapeMatchModes; // initializer: =1
|
||||||
|
|
||||||
|
export declare const CONTOURS_MATCH_I2: ShapeMatchModes; // initializer: =2
|
||||||
|
|
||||||
|
export declare const CONTOURS_MATCH_I3: ShapeMatchModes; // initializer: =3
|
||||||
|
|
||||||
|
export type ConnectedComponentsAlgorithmsTypes = any;
|
||||||
|
|
||||||
|
export type ConnectedComponentsTypes = any;
|
||||||
|
|
||||||
|
export type ContourApproximationModes = any;
|
||||||
|
|
||||||
|
export type RectanglesIntersectTypes = any;
|
||||||
|
|
||||||
|
export type RetrievalModes = any;
|
||||||
|
|
||||||
|
export type ShapeMatchModes = any;
|
||||||
574
opencv-js-4.10.0/src/types/opencv/imgproc_transform.ts
Normal file
574
opencv-js-4.10.0/src/types/opencv/imgproc_transform.ts
Normal file
|
|
@ -0,0 +1,574 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
OutputArray,
|
||||||
|
Point2f,
|
||||||
|
Size,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Geometric Image Transformations
|
||||||
|
* The functions in this section perform various geometrical transformations of 2D images. They do not change the image content but deform the pixel grid and map this deformed grid to the destination image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from destination to the source. That is, for each pixel `$(x, y)$` of the destination image, the functions compute coordinates of the corresponding "donor" pixel in the source image and copy the pixel value:
|
||||||
|
*
|
||||||
|
* `\[\texttt{dst} (x,y)= \texttt{src} (f_x(x,y), f_y(x,y))\]`
|
||||||
|
*
|
||||||
|
* In case when you specify the forward mapping `$\left<g_x, g_y\right>: \texttt{src} \rightarrow \texttt{dst}$`, the OpenCV functions first compute the corresponding inverse mapping `$\left<f_x, f_y\right>: \texttt{dst} \rightarrow \texttt{src}$` and then use the above formula.
|
||||||
|
*
|
||||||
|
* The actual implementations of the geometrical transformations, from the most generic remap and to the simplest and the fastest resize, need to solve two main problems with the above formula:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* * Extrapolation of non-existing pixels. Similarly to the filtering functions described in the previous section, for some `$(x,y)$`, either one of `$f_x(x,y)$`, or `$f_y(x,y)$`, or both of them may fall outside of the image. In this case, an extrapolation method needs to be used. OpenCV provides the same selection of extrapolation methods as in the filtering functions. In addition, it provides the method [BORDER_TRANSPARENT]. This means that the corresponding pixels in the destination image will not be modified at all.
|
||||||
|
* * Interpolation of pixel values. Usually `$f_x(x,y)$` and `$f_y(x,y)$` are floating-point numbers. This means that `$\left<f_x, f_y\right>$` can be either an affine or perspective transformation, or radial lens distortion correction, and so on. So, a pixel value at fractional coordinates needs to be retrieved. In the simplest case, the coordinates can be just rounded to the nearest integer coordinates and the corresponding pixel can be used. This is called a nearest-neighbor interpolation. However, a better result can be achieved by using more sophisticated , where a polynomial function is fit into some neighborhood of the computed pixel `$(f_x(x,y), f_y(x,y))$`, and then the value of the polynomial at `$(f_x(x,y), f_y(x,y))$` is taken as the interpolated pixel value. In OpenCV, you can choose between several interpolation methods. See resize for details.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* The geometrical transformations do not work with `CV_8S` or `CV_32S` images.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function converts a pair of maps for remap from one representation to another. The following
|
||||||
|
* options ( (map1.type(), map2.type()) `$\\rightarrow$` (dstmap1.type(), dstmap2.type()) ) are
|
||||||
|
* supported:
|
||||||
|
*
|
||||||
|
* `$\\texttt{(CV_32FC1, CV_32FC1)} \\rightarrow \\texttt{(CV_16SC2, CV_16UC1)}$`. This is the most
|
||||||
|
* frequently used conversion operation, in which the original floating-point maps (see remap ) are
|
||||||
|
* converted to a more compact and much faster fixed-point representation. The first output array
|
||||||
|
* contains the rounded coordinates and the second array (created only when nninterpolation=false )
|
||||||
|
* contains indices in the interpolation tables.
|
||||||
|
* `$\\texttt{(CV_32FC2)} \\rightarrow \\texttt{(CV_16SC2, CV_16UC1)}$`. The same as above but the
|
||||||
|
* original maps are stored in one 2-channel matrix.
|
||||||
|
* Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same as
|
||||||
|
* the originals.
|
||||||
|
*
|
||||||
|
* [remap], [undistort], [initUndistortRectifyMap]
|
||||||
|
*
|
||||||
|
* @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
|
||||||
|
*
|
||||||
|
* @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix), respectively.
|
||||||
|
*
|
||||||
|
* @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
|
||||||
|
*
|
||||||
|
* @param dstmap2 The second output map.
|
||||||
|
*
|
||||||
|
* @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or CV_32FC2 .
|
||||||
|
*
|
||||||
|
* @param nninterpolation Flag indicating whether the fixed-point maps are used for the
|
||||||
|
* nearest-neighbor or for a more complex interpolation.
|
||||||
|
*/
|
||||||
|
export declare function convertMaps(
|
||||||
|
map1: InputArray,
|
||||||
|
map2: InputArray,
|
||||||
|
dstmap1: OutputArray,
|
||||||
|
dstmap2: OutputArray,
|
||||||
|
dstmap1type: int,
|
||||||
|
nninterpolation?: bool,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the `$2 \\times 3$` matrix of an affine transform so that:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{bmatrix} x'_i \\\\ y'_i \\end{bmatrix} = \\texttt{map_matrix} \\cdot \\begin{bmatrix}
|
||||||
|
* x_i \\\\ y_i \\\\ 1 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* where
|
||||||
|
*
|
||||||
|
* `\\[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2\\]`
|
||||||
|
*
|
||||||
|
* [warpAffine], [transform]
|
||||||
|
*
|
||||||
|
* @param src Coordinates of triangle vertices in the source image.
|
||||||
|
*
|
||||||
|
* @param dst Coordinates of the corresponding triangle vertices in the destination image.
|
||||||
|
*/
|
||||||
|
export declare function getAffineTransform(src: any, dst: any): Mat;
|
||||||
|
|
||||||
|
export declare function getAffineTransform(
|
||||||
|
src: InputArray,
|
||||||
|
dst: InputArray,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the `$3 \\times 3$` matrix of a perspective transform so that:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{bmatrix} t_i x'_i \\\\ t_i y'_i \\\\ t_i \\end{bmatrix} = \\texttt{map_matrix} \\cdot
|
||||||
|
* \\begin{bmatrix} x_i \\\\ y_i \\\\ 1 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* where
|
||||||
|
*
|
||||||
|
* `\\[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\\]`
|
||||||
|
*
|
||||||
|
* [findHomography], [warpPerspective], [perspectiveTransform]
|
||||||
|
*
|
||||||
|
* @param src Coordinates of quadrangle vertices in the source image.
|
||||||
|
*
|
||||||
|
* @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
|
||||||
|
*
|
||||||
|
* @param solveMethod method passed to cv::solve (DecompTypes)
|
||||||
|
*/
|
||||||
|
export declare function getPerspectiveTransform(
|
||||||
|
src: InputArray,
|
||||||
|
dst: InputArray,
|
||||||
|
solveMethod?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function getPerspectiveTransform(
|
||||||
|
src: any,
|
||||||
|
dst: any,
|
||||||
|
solveMethod?: int,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function getRectSubPix extracts pixels from src:
|
||||||
|
*
|
||||||
|
* `\\[patch(x, y) = src(x + \\texttt{center.x} - ( \\texttt{dst.cols} -1)*0.5, y + \\texttt{center.y}
|
||||||
|
* - ( \\texttt{dst.rows} -1)*0.5)\\]`
|
||||||
|
*
|
||||||
|
* where the values of the pixels at non-integer coordinates are retrieved using bilinear
|
||||||
|
* interpolation. Every channel of multi-channel images is processed independently. Also the image
|
||||||
|
* should be a single channel or three channel image. While the center of the rectangle must be inside
|
||||||
|
* the image, parts of the rectangle may be outside.
|
||||||
|
*
|
||||||
|
* [warpAffine], [warpPerspective]
|
||||||
|
*
|
||||||
|
* @param image Source image.
|
||||||
|
*
|
||||||
|
* @param patchSize Size of the extracted patch.
|
||||||
|
*
|
||||||
|
* @param center Floating point coordinates of the center of the extracted rectangle within the source
|
||||||
|
* image. The center must be inside the image.
|
||||||
|
*
|
||||||
|
* @param patch Extracted patch that has the size patchSize and the same number of channels as src .
|
||||||
|
*
|
||||||
|
* @param patchType Depth of the extracted pixels. By default, they have the same depth as src .
|
||||||
|
*/
|
||||||
|
export declare function getRectSubPix(
|
||||||
|
image: InputArray,
|
||||||
|
patchSize: Size,
|
||||||
|
center: Point2f,
|
||||||
|
patch: OutputArray,
|
||||||
|
patchType?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function calculates the following matrix:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{bmatrix} \\alpha & \\beta & (1- \\alpha ) \\cdot \\texttt{center.x} - \\beta \\cdot
|
||||||
|
* \\texttt{center.y} \\\\ - \\beta & \\alpha & \\beta \\cdot \\texttt{center.x} + (1- \\alpha ) \\cdot
|
||||||
|
* \\texttt{center.y} \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* where
|
||||||
|
*
|
||||||
|
* `\\[\\begin{array}{l} \\alpha = \\texttt{scale} \\cdot \\cos \\texttt{angle} , \\\\ \\beta =
|
||||||
|
* \\texttt{scale} \\cdot \\sin \\texttt{angle} \\end{array}\\]`
|
||||||
|
*
|
||||||
|
* The transformation maps the rotation center to itself. If this is not the target, adjust the shift.
|
||||||
|
*
|
||||||
|
* [getAffineTransform], [warpAffine], [transform]
|
||||||
|
*
|
||||||
|
* @param center Center of the rotation in the source image.
|
||||||
|
*
|
||||||
|
* @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the
|
||||||
|
* coordinate origin is assumed to be the top-left corner).
|
||||||
|
*
|
||||||
|
* @param scale Isotropic scale factor.
|
||||||
|
*/
|
||||||
|
export declare function getRotationMatrix2D(
|
||||||
|
center: Point2f,
|
||||||
|
angle: double,
|
||||||
|
scale: double,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function computes an inverse affine transformation represented by `$2 \\times 3$` matrix M:
|
||||||
|
*
|
||||||
|
* `\\[\\begin{bmatrix} a_{11} & a_{12} & b_1 \\\\ a_{21} & a_{22} & b_2 \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* The result is also a `$2 \\times 3$` matrix of the same type as M.
|
||||||
|
*
|
||||||
|
* @param M Original affine transformation.
|
||||||
|
*
|
||||||
|
* @param iM Output reverse affine transformation.
|
||||||
|
*/
|
||||||
|
export declare function invertAffineTransform(
|
||||||
|
M: InputArray,
|
||||||
|
iM: OutputArray,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function linearPolar(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
center: Point2f,
|
||||||
|
maxRadius: double,
|
||||||
|
flags: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare function logPolar(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
center: Point2f,
|
||||||
|
M: double,
|
||||||
|
flags: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function remap transforms the source image using the specified map:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\texttt{src} (map_x(x,y),map_y(x,y))\\]`
|
||||||
|
*
|
||||||
|
* where values of pixels with non-integer coordinates are computed using one of available
|
||||||
|
* interpolation methods. `$map_x$` and `$map_y$` can be encoded as separate floating-point maps in
|
||||||
|
* `$map_1$` and `$map_2$` respectively, or interleaved floating-point maps of `$(x,y)$` in `$map_1$`,
|
||||||
|
* or fixed-point maps created by using convertMaps. The reason you might want to convert from floating
|
||||||
|
* to fixed-point representations of a map is that they can yield much faster (2x) remapping
|
||||||
|
* operations. In the converted case, `$map_1$` contains pairs (cvFloor(x), cvFloor(y)) and `$map_2$`
|
||||||
|
* contains indices in a table of interpolation coefficients.
|
||||||
|
*
|
||||||
|
* This function cannot operate in-place.
|
||||||
|
*
|
||||||
|
* Due to current implementation limitations the size of an input and output images should be less than
|
||||||
|
* 32767x32767.
|
||||||
|
*
|
||||||
|
* @param src Source image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image. It has the same size as map1 and the same type as src .
|
||||||
|
*
|
||||||
|
* @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
|
||||||
|
* CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point representation to
|
||||||
|
* fixed-point for speed.
|
||||||
|
*
|
||||||
|
* @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if
|
||||||
|
* map1 is (x,y) points), respectively.
|
||||||
|
*
|
||||||
|
* @param interpolation Interpolation method (see InterpolationFlags). The method INTER_AREA is not
|
||||||
|
* supported by this function.
|
||||||
|
*
|
||||||
|
* @param borderMode Pixel extrapolation method (see BorderTypes). When borderMode=BORDER_TRANSPARENT,
|
||||||
|
* it means that the pixels in the destination image that corresponds to the "outliers" in the source
|
||||||
|
* image are not modified by the function.
|
||||||
|
*
|
||||||
|
* @param borderValue Value used in case of a constant border. By default, it is 0.
|
||||||
|
*/
|
||||||
|
export declare function remap(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
map1: InputArray,
|
||||||
|
map2: InputArray,
|
||||||
|
interpolation: int,
|
||||||
|
borderMode?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function resize resizes the image src down to or up to the specified size. Note that the initial
|
||||||
|
* dst type or size are not taken into account. Instead, the size and type are derived from the
|
||||||
|
* `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst, you may
|
||||||
|
* call the function as follows:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
|
||||||
|
* resize(src, dst, dst.size(), 0, 0, interpolation);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* If you want to decimate the image by factor of 2 in each direction, you can call the function this
|
||||||
|
* way:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* // specify fx and fy and let the function compute the destination image size.
|
||||||
|
* resize(src, dst, Size(), 0.5, 0.5, interpolation);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* To shrink an image, it will generally look best with [INTER_AREA] interpolation, whereas to enlarge
|
||||||
|
* an image, it will generally look best with c::INTER_CUBIC (slow) or [INTER_LINEAR] (faster but still
|
||||||
|
* looks OK).
|
||||||
|
*
|
||||||
|
* [warpAffine], [warpPerspective], [remap]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
|
||||||
|
* src.size(), fx, and fy; the type of dst is the same as of src.
|
||||||
|
*
|
||||||
|
* @param dsize output image size; if it equals zero, it is computed as: \[\texttt{dsize =
|
||||||
|
* Size(round(fx*src.cols), round(fy*src.rows))}\] Either dsize or both fx and fy must be non-zero.
|
||||||
|
*
|
||||||
|
* @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
|
||||||
|
* \[\texttt{(double)dsize.width/src.cols}\]
|
||||||
|
*
|
||||||
|
* @param fy scale factor along the vertical axis; when it equals 0, it is computed as
|
||||||
|
* \[\texttt{(double)dsize.height/src.rows}\]
|
||||||
|
*
|
||||||
|
* @param interpolation interpolation method, see InterpolationFlags
|
||||||
|
*/
|
||||||
|
export declare function resize(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
dsize: Size,
|
||||||
|
fx?: double,
|
||||||
|
fy?: double,
|
||||||
|
interpolation?: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function warpAffine transforms the source image using the specified matrix:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\texttt{src} ( \\texttt{M} _{11} x + \\texttt{M} _{12} y + \\texttt{M}
|
||||||
|
* _{13}, \\texttt{M} _{21} x + \\texttt{M} _{22} y + \\texttt{M} _{23})\\]`
|
||||||
|
*
|
||||||
|
* when the flag [WARP_INVERSE_MAP] is set. Otherwise, the transformation is first inverted with
|
||||||
|
* [invertAffineTransform] and then put in the formula above instead of M. The function cannot operate
|
||||||
|
* in-place.
|
||||||
|
*
|
||||||
|
* [warpPerspective], [resize], [remap], [getRectSubPix], [transform]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image that has the size dsize and the same type as src .
|
||||||
|
*
|
||||||
|
* @param M $2\times 3$ transformation matrix.
|
||||||
|
*
|
||||||
|
* @param dsize size of the output image.
|
||||||
|
*
|
||||||
|
* @param flags combination of interpolation methods (see InterpolationFlags) and the optional flag
|
||||||
|
* WARP_INVERSE_MAP that means that M is the inverse transformation (
|
||||||
|
* $\texttt{dst}\rightarrow\texttt{src}$ ).
|
||||||
|
*
|
||||||
|
* @param borderMode pixel extrapolation method (see BorderTypes); when borderMode=BORDER_TRANSPARENT,
|
||||||
|
* it means that the pixels in the destination image corresponding to the "outliers" in the source
|
||||||
|
* image are not modified by the function.
|
||||||
|
*
|
||||||
|
* @param borderValue value used in case of a constant border; by default, it is 0.
|
||||||
|
*/
|
||||||
|
export declare function warpAffine(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
M: InputArray,
|
||||||
|
dsize: Size,
|
||||||
|
flags?: int,
|
||||||
|
borderMode?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function warpPerspective transforms the source image using the specified matrix:
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{dst} (x,y) = \\texttt{src} \\left ( \\frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x +
|
||||||
|
* M_{32} y + M_{33}} , \\frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \\right
|
||||||
|
* )\\]`
|
||||||
|
*
|
||||||
|
* when the flag [WARP_INVERSE_MAP] is set. Otherwise, the transformation is first inverted with invert
|
||||||
|
* and then put in the formula above instead of M. The function cannot operate in-place.
|
||||||
|
*
|
||||||
|
* [warpAffine], [resize], [remap], [getRectSubPix], [perspectiveTransform]
|
||||||
|
*
|
||||||
|
* @param src input image.
|
||||||
|
*
|
||||||
|
* @param dst output image that has the size dsize and the same type as src .
|
||||||
|
*
|
||||||
|
* @param M $3\times 3$ transformation matrix.
|
||||||
|
*
|
||||||
|
* @param dsize size of the output image.
|
||||||
|
*
|
||||||
|
* @param flags combination of interpolation methods (INTER_LINEAR or INTER_NEAREST) and the optional
|
||||||
|
* flag WARP_INVERSE_MAP, that sets M as the inverse transformation (
|
||||||
|
* $\texttt{dst}\rightarrow\texttt{src}$ ).
|
||||||
|
*
|
||||||
|
* @param borderMode pixel extrapolation method (BORDER_CONSTANT or BORDER_REPLICATE).
|
||||||
|
*
|
||||||
|
* @param borderValue value used in case of a constant border; by default, it equals 0.
|
||||||
|
*/
|
||||||
|
export declare function warpPerspective(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
M: InputArray,
|
||||||
|
dsize: Size,
|
||||||
|
flags?: int,
|
||||||
|
borderMode?: int,
|
||||||
|
borderValue?: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <a name="da/d54/group__imgproc__transform_1polar_remaps_reference_image"></a>
|
||||||
|
* Transform the source image using the following transformation: `\\[ dst(\\rho , \\phi ) = src(x,y)
|
||||||
|
* \\]`
|
||||||
|
*
|
||||||
|
* where `\\[ \\begin{array}{l} \\vec{I} = (x - center.x, \\;y - center.y) \\\\ \\phi = Kangle \\cdot
|
||||||
|
* \\texttt{angle} (\\vec{I}) \\\\ \\rho = \\left\\{\\begin{matrix} Klin \\cdot \\texttt{magnitude}
|
||||||
|
* (\\vec{I}) & default \\\\ Klog \\cdot log_e(\\texttt{magnitude} (\\vec{I})) & if \\; semilog \\\\
|
||||||
|
* \\end{matrix}\\right. \\end{array} \\]`
|
||||||
|
*
|
||||||
|
* and `\\[ \\begin{array}{l} Kangle = dsize.height / 2\\Pi \\\\ Klin = dsize.width / maxRadius \\\\
|
||||||
|
* Klog = dsize.width / log_e(maxRadius) \\\\ \\end{array} \\]`
|
||||||
|
*
|
||||||
|
* Polar mapping can be linear or semi-log. Add one of [WarpPolarMode] to `flags` to specify the polar
|
||||||
|
* mapping mode.
|
||||||
|
*
|
||||||
|
* Linear is the default mode.
|
||||||
|
*
|
||||||
|
* The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of
|
||||||
|
* sight (central vision) in contrast to peripheral vision where acuity is minor.
|
||||||
|
*
|
||||||
|
* if both values in `dsize <=0` (default), the destination image will have (almost) same area of
|
||||||
|
* source bounding circle: `\\[\\begin{array}{l} dsize.area \\leftarrow (maxRadius^2 \\cdot \\Pi) \\\\
|
||||||
|
* dsize.width = \\texttt{cvRound}(maxRadius) \\\\ dsize.height = \\texttt{cvRound}(maxRadius \\cdot
|
||||||
|
* \\Pi) \\\\ \\end{array}\\]`
|
||||||
|
* if only `dsize.height <= 0`, the destination image area will be proportional to the bounding circle
|
||||||
|
* area but scaled by `Kx * Kx`: `\\[\\begin{array}{l} dsize.height = \\texttt{cvRound}(dsize.width
|
||||||
|
* \\cdot \\Pi) \\\\ \\end{array} \\]`
|
||||||
|
* if both values in `dsize > 0`, the destination image will have the given size therefore the area of
|
||||||
|
* the bounding circle will be scaled to `dsize`.
|
||||||
|
*
|
||||||
|
* You can get reverse mapping adding [WARP_INVERSE_MAP] to `flags`
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* // direct transform
|
||||||
|
* warpPolar(src, lin_polar_img, Size(),center, maxRadius, flags); //
|
||||||
|
* linear Polar
|
||||||
|
* warpPolar(src, log_polar_img, Size(),center, maxRadius, flags + WARP_POLAR_LOG); //
|
||||||
|
* semilog Polar
|
||||||
|
* // inverse transform
|
||||||
|
* warpPolar(lin_polar_img, recovered_lin_polar_img, src.size(), center, maxRadius, flags +
|
||||||
|
* WARP_INVERSE_MAP);
|
||||||
|
* warpPolar(log_polar_img, recovered_log_polar, src.size(), center, maxRadius, flags +
|
||||||
|
* WARP_POLAR_LOG + WARP_INVERSE_MAP);
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* In addiction, to calculate the original coordinate from a polar mapped coordinate `$(rho, phi)->(x,
|
||||||
|
* y)$`:
|
||||||
|
*
|
||||||
|
* ```cpp
|
||||||
|
* double angleRad, magnitude;
|
||||||
|
* double Kangle = dst.rows / CV_2PI;
|
||||||
|
* angleRad = phi / Kangle;
|
||||||
|
* if (flags & WARP_POLAR_LOG)
|
||||||
|
* {
|
||||||
|
* double Klog = dst.cols / std::log(maxRadius);
|
||||||
|
* magnitude = std::exp(rho / Klog);
|
||||||
|
* }
|
||||||
|
* else
|
||||||
|
* {
|
||||||
|
* double Klin = dst.cols / maxRadius;
|
||||||
|
* magnitude = rho / Klin;
|
||||||
|
* }
|
||||||
|
* int x = cvRound(center.x + magnitude * cos(angleRad));
|
||||||
|
* int y = cvRound(center.y + magnitude * sin(angleRad));
|
||||||
|
* ```
|
||||||
|
*
|
||||||
|
* The function can not operate in-place.
|
||||||
|
* To calculate magnitude and angle in degrees [cartToPolar] is used internally thus angles are
|
||||||
|
* measured from 0 to 360 with accuracy about 0.3 degrees.
|
||||||
|
* This function uses [remap]. Due to current implementation limitations the size of an input and
|
||||||
|
* output images should be less than 32767x32767.
|
||||||
|
*
|
||||||
|
* [cv::remap]
|
||||||
|
*
|
||||||
|
* @param src Source image.
|
||||||
|
*
|
||||||
|
* @param dst Destination image. It will have same type as src.
|
||||||
|
*
|
||||||
|
* @param dsize The destination image size (see description for valid options).
|
||||||
|
*
|
||||||
|
* @param center The transformation center.
|
||||||
|
*
|
||||||
|
* @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude
|
||||||
|
* scale parameter too.
|
||||||
|
*
|
||||||
|
* @param flags A combination of interpolation methods, InterpolationFlags + WarpPolarMode.
|
||||||
|
* Add WARP_POLAR_LINEAR to select linear polar mapping (default)Add WARP_POLAR_LOG to select semilog
|
||||||
|
* polar mappingAdd WARP_INVERSE_MAP for reverse mapping.
|
||||||
|
*/
|
||||||
|
export declare function warpPolar(
|
||||||
|
src: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
dsize: Size,
|
||||||
|
center: Point2f,
|
||||||
|
maxRadius: double,
|
||||||
|
flags: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nearest neighbor interpolation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_NEAREST: InterpolationFlags; // initializer: = 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bilinear interpolation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_LINEAR: InterpolationFlags; // initializer: = 1
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bicubic interpolation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_CUBIC: InterpolationFlags; // initializer: = 2
|
||||||
|
|
||||||
|
/**
|
||||||
|
* resampling using pixel area relation. It may be a preferred method for image decimation, as it gives
|
||||||
|
* moire'-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_AREA: InterpolationFlags; // initializer: = 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lanczos interpolation over 8x8 neighborhood
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_LANCZOS4: InterpolationFlags; // initializer: = 4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bit exact bilinear interpolation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_LINEAR_EXACT: InterpolationFlags; // initializer: = 5
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mask for interpolation codes
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const INTER_MAX: InterpolationFlags; // initializer: = 7
|
||||||
|
|
||||||
|
/**
|
||||||
|
* flag, fills all of the destination image pixels. If some of them correspond to outliers in the
|
||||||
|
* source image, they are set to zero
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const WARP_FILL_OUTLIERS: InterpolationFlags; // initializer: = 8
|
||||||
|
|
||||||
|
/**
|
||||||
|
* flag, inverse transformation
|
||||||
|
*
|
||||||
|
* For example, [linearPolar] or [logPolar] transforms:
|
||||||
|
*
|
||||||
|
* flag is **not** set: `$dst( \\rho , \\phi ) = src(x,y)$`
|
||||||
|
* flag is set: `$dst(x,y) = src( \\rho , \\phi )$`
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
export declare const WARP_INVERSE_MAP: InterpolationFlags; // initializer: = 16
|
||||||
|
|
||||||
|
export declare const INTER_BITS: InterpolationMasks; // initializer: = 5
|
||||||
|
|
||||||
|
export declare const INTER_BITS2: InterpolationMasks; // initializer: = INTER_BITS * 2
|
||||||
|
|
||||||
|
export declare const INTER_TAB_SIZE: InterpolationMasks; // initializer: = 1 << INTER_BITS
|
||||||
|
|
||||||
|
export declare const INTER_TAB_SIZE2: InterpolationMasks; // initializer: = INTER_TAB_SIZE * INTER_TAB_SIZE
|
||||||
|
|
||||||
|
export declare const WARP_POLAR_LINEAR: WarpPolarMode; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const WARP_POLAR_LOG: WarpPolarMode; // initializer: = 256
|
||||||
|
|
||||||
|
export type InterpolationFlags = any;
|
||||||
|
|
||||||
|
export type InterpolationMasks = any;
|
||||||
|
|
||||||
|
export type WarpPolarMode = any;
|
||||||
4
opencv-js-4.10.0/src/types/opencv/index.ts
Normal file
4
opencv-js-4.10.0/src/types/opencv/index.ts
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
import * as _cv from "./_types";
|
||||||
|
export type CV = typeof _cv;
|
||||||
|
|
||||||
|
export * from "./_types";
|
||||||
103
opencv-js-4.10.0/src/types/opencv/objdetect.ts
Normal file
103
opencv-js-4.10.0/src/types/opencv/objdetect.ts
Normal file
|
|
@ -0,0 +1,103 @@
|
||||||
|
import type { double, int, Size } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Object Detection
|
||||||
|
* ## Haar Feature-based Cascade Classifier for Object Detection
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* The object detector described below has been initially proposed by Paul Viola Viola01 and improved by Rainer Lienhart Lienhart02 .
|
||||||
|
*
|
||||||
|
* First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is trained with a few hundred sample views of a particular object (i.e., a face or a car), called positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary images of the same size.
|
||||||
|
*
|
||||||
|
* After a classifier is trained, it can be applied to a region of interest (of the same size as used during the training) in an input image. The classifier outputs a "1" if the region is likely to show the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can move the search window across the image and check every location using the classifier. The classifier is designed so that it can be easily "resized" in order to be able to find the objects of interest at different sizes, which is more efficient than resizing the image itself. So, to find an object of an unknown size in the image the scan procedure should be done several times at different scales.
|
||||||
|
*
|
||||||
|
* The word "cascade" in the classifier name means that the resultant classifier consists of several simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some stage the candidate is rejected or all the stages are passed. The word "boosted" means that the classifiers at every stage of the cascade are complex themselves and they are built out of basic classifiers using one of four different boosting techniques (weighted voting). Currently Discrete Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic classifiers, and are calculated as described below. The current algorithm uses the following Haar-like features:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the integral description).
|
||||||
|
*
|
||||||
|
* To see the object detector at work, have a look at the facedetect demo:
|
||||||
|
*
|
||||||
|
* The following reference is for the detection part only. There is a separate application called opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* In the new C++ interface it is also possible to use LBP (local binary pattern) features in addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
|
||||||
|
*/
|
||||||
|
export declare function createFaceDetectionMaskGenerator(): any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function is a wrapper for the generic function partition . It clusters all the input rectangles
|
||||||
|
* using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
|
||||||
|
* locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
|
||||||
|
* `$\\texttt{eps}\\rightarrow +\\inf$` , all the rectangles are put in one cluster. Then, the small
|
||||||
|
* clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
|
||||||
|
* cluster, the average rectangle is computed and put into the output rectangle list.
|
||||||
|
*
|
||||||
|
* @param rectList Input/output vector of rectangles. Output vector includes retained and grouped
|
||||||
|
* rectangles. (The Python list is not modified in place.)
|
||||||
|
*
|
||||||
|
* @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
|
||||||
|
* group of rectangles to retain it.
|
||||||
|
*
|
||||||
|
* @param eps Relative difference between sides of the rectangles to merge them into a group.
|
||||||
|
*/
|
||||||
|
export declare function groupRectangles(
|
||||||
|
rectList: any,
|
||||||
|
groupThreshold: int,
|
||||||
|
eps?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function groupRectangles(
|
||||||
|
rectList: any,
|
||||||
|
weights: any,
|
||||||
|
groupThreshold: int,
|
||||||
|
eps?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function groupRectangles(
|
||||||
|
rectList: any,
|
||||||
|
groupThreshold: int,
|
||||||
|
eps: double,
|
||||||
|
weights: any,
|
||||||
|
levelWeights: any,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function groupRectangles(
|
||||||
|
rectList: any,
|
||||||
|
rejectLevels: any,
|
||||||
|
levelWeights: any,
|
||||||
|
groupThreshold: int,
|
||||||
|
eps?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function groupRectangles_meanshift(
|
||||||
|
rectList: any,
|
||||||
|
foundWeights: any,
|
||||||
|
foundScales: any,
|
||||||
|
detectThreshold?: double,
|
||||||
|
winDetSize?: Size,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const CASCADE_DO_CANNY_PRUNING: any; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const CASCADE_SCALE_IMAGE: any; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const CASCADE_FIND_BIGGEST_OBJECT: any; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const CASCADE_DO_ROUGH_SEARCH: any; // initializer: = 8
|
||||||
37
opencv-js-4.10.0/src/types/opencv/photo_inpaint.ts
Normal file
37
opencv-js-4.10.0/src/types/opencv/photo_inpaint.ts
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
import type { double, InputArray, int, OutputArray } from "./_types";
|
||||||
|
/*
|
||||||
|
* # Inpainting
|
||||||
|
* the inpainting algorithm
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* The function reconstructs the selected image area from the pixel near the area boundary. The
|
||||||
|
* function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
|
||||||
|
* objects from still images or video. See for more details.
|
||||||
|
*
|
||||||
|
* An example using the inpainting technique can be found at opencv_source_code/samples/cpp/inpaint.cpp
|
||||||
|
* (Python) An example using the inpainting technique can be found at
|
||||||
|
* opencv_source_code/samples/python/inpaint.py
|
||||||
|
*
|
||||||
|
* @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
|
||||||
|
*
|
||||||
|
* @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
|
||||||
|
* needs to be inpainted.
|
||||||
|
*
|
||||||
|
* @param dst Output image with the same size and type as src .
|
||||||
|
*
|
||||||
|
* @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered by
|
||||||
|
* the algorithm.
|
||||||
|
*
|
||||||
|
* @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
|
||||||
|
*/
|
||||||
|
export declare function inpaint(
|
||||||
|
src: InputArray,
|
||||||
|
inpaintMask: InputArray,
|
||||||
|
dst: OutputArray,
|
||||||
|
inpaintRadius: double,
|
||||||
|
flags: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
export declare const INPAINT_NS: any; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const INPAINT_TELEA: any; // initializer: = 1
|
||||||
64
opencv-js-4.10.0/src/types/opencv/softdouble.ts
Normal file
64
opencv-js-4.10.0/src/types/opencv/softdouble.ts
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
import type { bool, int, int32_t, int64_t, uint32_t, uint64_t } from "./_types";
|
||||||
|
|
||||||
|
export declare class softdouble {
|
||||||
|
public v: uint64_t;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(c: softdouble);
|
||||||
|
|
||||||
|
public constructor(arg159: uint32_t);
|
||||||
|
|
||||||
|
public constructor(arg160: uint64_t);
|
||||||
|
|
||||||
|
public constructor(arg161: int32_t);
|
||||||
|
|
||||||
|
public constructor(arg162: int64_t);
|
||||||
|
|
||||||
|
public constructor(a: any);
|
||||||
|
|
||||||
|
public getExp(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a number 1 <= x < 2 with the same significand
|
||||||
|
*/
|
||||||
|
public getFrac(): softdouble;
|
||||||
|
|
||||||
|
public getSign(): bool;
|
||||||
|
|
||||||
|
public isInf(): bool;
|
||||||
|
|
||||||
|
public isNaN(): bool;
|
||||||
|
|
||||||
|
public isSubnormal(): bool;
|
||||||
|
|
||||||
|
public setExp(e: int): softdouble;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a copy of a number with significand taken from parameter
|
||||||
|
*/
|
||||||
|
public setFrac(s: softdouble): softdouble;
|
||||||
|
|
||||||
|
public setSign(sign: bool): softdouble;
|
||||||
|
|
||||||
|
public static eps(): softdouble;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds new value from raw binary representation
|
||||||
|
*/
|
||||||
|
public static fromRaw(a: uint64_t): softdouble;
|
||||||
|
|
||||||
|
public static inf(): softdouble;
|
||||||
|
|
||||||
|
public static max(): softdouble;
|
||||||
|
|
||||||
|
public static min(): softdouble;
|
||||||
|
|
||||||
|
public static nan(): softdouble;
|
||||||
|
|
||||||
|
public static one(): softdouble;
|
||||||
|
|
||||||
|
public static pi(): softdouble;
|
||||||
|
|
||||||
|
public static zero(): softdouble;
|
||||||
|
}
|
||||||
64
opencv-js-4.10.0/src/types/opencv/softfloat.ts
Normal file
64
opencv-js-4.10.0/src/types/opencv/softfloat.ts
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
import type { bool, int, int32_t, int64_t, uint32_t, uint64_t } from "./_types";
|
||||||
|
|
||||||
|
export declare class softfloat {
|
||||||
|
public v: uint32_t;
|
||||||
|
|
||||||
|
public constructor();
|
||||||
|
|
||||||
|
public constructor(c: softfloat);
|
||||||
|
|
||||||
|
public constructor(arg174: uint32_t);
|
||||||
|
|
||||||
|
public constructor(arg175: uint64_t);
|
||||||
|
|
||||||
|
public constructor(arg176: int32_t);
|
||||||
|
|
||||||
|
public constructor(arg177: int64_t);
|
||||||
|
|
||||||
|
public constructor(a: any);
|
||||||
|
|
||||||
|
public getExp(): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a number 1 <= x < 2 with the same significand
|
||||||
|
*/
|
||||||
|
public getFrac(): softfloat;
|
||||||
|
|
||||||
|
public getSign(): bool;
|
||||||
|
|
||||||
|
public isInf(): bool;
|
||||||
|
|
||||||
|
public isNaN(): bool;
|
||||||
|
|
||||||
|
public isSubnormal(): bool;
|
||||||
|
|
||||||
|
public setExp(e: int): softfloat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a copy of a number with significand taken from parameter
|
||||||
|
*/
|
||||||
|
public setFrac(s: softfloat): softfloat;
|
||||||
|
|
||||||
|
public setSign(sign: bool): softfloat;
|
||||||
|
|
||||||
|
public static eps(): softfloat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds new value from raw binary representation
|
||||||
|
*/
|
||||||
|
public static fromRaw(a: uint32_t): softfloat;
|
||||||
|
|
||||||
|
public static inf(): softfloat;
|
||||||
|
|
||||||
|
public static max(): softfloat;
|
||||||
|
|
||||||
|
public static min(): softfloat;
|
||||||
|
|
||||||
|
public static nan(): softfloat;
|
||||||
|
|
||||||
|
public static one(): softfloat;
|
||||||
|
|
||||||
|
public static pi(): softfloat;
|
||||||
|
|
||||||
|
public static zero(): softfloat;
|
||||||
|
}
|
||||||
372
opencv-js-4.10.0/src/types/opencv/video_track.ts
Normal file
372
opencv-js-4.10.0/src/types/opencv/video_track.ts
Normal file
|
|
@ -0,0 +1,372 @@
|
||||||
|
import type {
|
||||||
|
bool,
|
||||||
|
double,
|
||||||
|
InputArray,
|
||||||
|
InputOutputArray,
|
||||||
|
int,
|
||||||
|
Mat,
|
||||||
|
OutputArray,
|
||||||
|
OutputArrayOfArrays,
|
||||||
|
RotatedRect,
|
||||||
|
Size,
|
||||||
|
TermCriteria,
|
||||||
|
} from "./_types";
|
||||||
|
/*
|
||||||
|
* # Object Tracking
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* number of levels in constructed pyramid. Can be less than maxLevel.
|
||||||
|
*
|
||||||
|
* @param img 8-bit input image.
|
||||||
|
*
|
||||||
|
* @param pyramid output pyramid.
|
||||||
|
*
|
||||||
|
* @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
|
||||||
|
* calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
|
||||||
|
*
|
||||||
|
* @param maxLevel 0-based maximal pyramid level number.
|
||||||
|
*
|
||||||
|
* @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
|
||||||
|
* constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
|
||||||
|
*
|
||||||
|
* @param pyrBorder the border mode for pyramid layers.
|
||||||
|
*
|
||||||
|
* @param derivBorder the border mode for gradients.
|
||||||
|
*
|
||||||
|
* @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false to
|
||||||
|
* force data copying.
|
||||||
|
*/
|
||||||
|
export declare function buildOpticalFlowPyramid(
|
||||||
|
img: InputArray,
|
||||||
|
pyramid: OutputArrayOfArrays,
|
||||||
|
winSize: Size,
|
||||||
|
maxLevel: int,
|
||||||
|
withDerivatives?: bool,
|
||||||
|
pyrBorder?: int,
|
||||||
|
derivBorder?: int,
|
||||||
|
tryReuseInputImage?: bool,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds an optical flow for each prev pixel using the Farneback2003 algorithm so that
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{prev} (y,x) \\sim \\texttt{next} ( y + \\texttt{flow} (y,x)[1], x + \\texttt{flow}
|
||||||
|
* (y,x)[0])\\]`
|
||||||
|
*
|
||||||
|
* An example using the optical flow algorithm described by Gunnar Farneback can be found at
|
||||||
|
* opencv_source_code/samples/cpp/fback.cpp
|
||||||
|
* (Python) An example using the optical flow algorithm described by Gunnar Farneback can be found at
|
||||||
|
* opencv_source_code/samples/python/opt_flow.py
|
||||||
|
*
|
||||||
|
* @param prev first 8-bit single-channel input image.
|
||||||
|
*
|
||||||
|
* @param next second input image of the same size and the same type as prev.
|
||||||
|
*
|
||||||
|
* @param flow computed flow image that has the same size as prev and type CV_32FC2.
|
||||||
|
*
|
||||||
|
* @param pyr_scale parameter, specifying the image scale (<1) to build pyramids for each image;
|
||||||
|
* pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
|
||||||
|
* one.
|
||||||
|
*
|
||||||
|
* @param levels number of pyramid layers including the initial image; levels=1 means that no extra
|
||||||
|
* layers are created and only the original images are used.
|
||||||
|
*
|
||||||
|
* @param winsize averaging window size; larger values increase the algorithm robustness to image noise
|
||||||
|
* and give more chances for fast motion detection, but yield more blurred motion field.
|
||||||
|
*
|
||||||
|
* @param iterations number of iterations the algorithm does at each pyramid level.
|
||||||
|
*
|
||||||
|
* @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel; larger
|
||||||
|
* values mean that the image will be approximated with smoother surfaces, yielding more robust
|
||||||
|
* algorithm and more blurred motion field, typically poly_n =5 or 7.
|
||||||
|
*
|
||||||
|
* @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
|
||||||
|
* basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a good
|
||||||
|
* value would be poly_sigma=1.5.
|
||||||
|
*
|
||||||
|
* @param flags operation flags that can be a combination of the following:
|
||||||
|
* OPTFLOW_USE_INITIAL_FLOW uses the input flow as an initial flow
|
||||||
|
* approximation.OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian $\texttt{winsize}\times\texttt{winsize}$
|
||||||
|
* filter instead of a box filter of the same size for optical flow estimation; usually, this option
|
||||||
|
* gives z more accurate flow than with a box filter, at the cost of lower speed; normally, winsize for
|
||||||
|
* a Gaussian window should be set to a larger value to achieve the same level of robustness.
|
||||||
|
*/
|
||||||
|
export declare function calcOpticalFlowFarneback(
|
||||||
|
prev: InputArray,
|
||||||
|
next: InputArray,
|
||||||
|
flow: InputOutputArray,
|
||||||
|
pyr_scale: double,
|
||||||
|
levels: int,
|
||||||
|
winsize: int,
|
||||||
|
iterations: int,
|
||||||
|
poly_n: int,
|
||||||
|
poly_sigma: double,
|
||||||
|
flags: int,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
|
||||||
|
* Bouguet00 . The function is parallelized with the TBB library.
|
||||||
|
*
|
||||||
|
* An example using the Lucas-Kanade optical flow algorithm can be found at
|
||||||
|
* opencv_source_code/samples/cpp/lkdemo.cpp
|
||||||
|
* (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
|
||||||
|
* opencv_source_code/samples/python/lk_track.py
|
||||||
|
* (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
|
||||||
|
* opencv_source_code/samples/python/lk_homography.py
|
||||||
|
*
|
||||||
|
* @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
|
||||||
|
*
|
||||||
|
* @param nextImg second input image or pyramid of the same size and the same type as prevImg.
|
||||||
|
*
|
||||||
|
* @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
|
||||||
|
* single-precision floating-point numbers.
|
||||||
|
*
|
||||||
|
* @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
|
||||||
|
* containing the calculated new positions of input features in the second image; when
|
||||||
|
* OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
|
||||||
|
*
|
||||||
|
* @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
|
||||||
|
* the flow for the corresponding features has been found, otherwise, it is set to 0.
|
||||||
|
*
|
||||||
|
* @param err output vector of errors; each element of the vector is set to an error for the
|
||||||
|
* corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
|
||||||
|
* found then the error is not defined (use the status parameter to find such cases).
|
||||||
|
*
|
||||||
|
* @param winSize size of the search window at each pyramid level.
|
||||||
|
*
|
||||||
|
* @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
|
||||||
|
* level), if set to 1, two levels are used, and so on; if pyramids are passed to input then algorithm
|
||||||
|
* will use as many levels as pyramids have but no more than maxLevel.
|
||||||
|
*
|
||||||
|
* @param criteria parameter, specifying the termination criteria of the iterative search algorithm
|
||||||
|
* (after the specified maximum number of iterations criteria.maxCount or when the search window moves
|
||||||
|
* by less than criteria.epsilon.
|
||||||
|
*
|
||||||
|
* @param flags operation flags:
|
||||||
|
* OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in nextPts; if the flag is not set, then
|
||||||
|
* prevPts is copied to nextPts and is considered the initial estimate.OPTFLOW_LK_GET_MIN_EIGENVALS use
|
||||||
|
* minimum eigen values as an error measure (see minEigThreshold description); if the flag is not set,
|
||||||
|
* then L1 distance between patches around the original and a moved point, divided by number of pixels
|
||||||
|
* in a window, is used as a error measure.
|
||||||
|
*
|
||||||
|
* @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
|
||||||
|
* optical flow equations (this matrix is called a spatial gradient matrix in Bouguet00), divided by
|
||||||
|
* number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
|
||||||
|
* feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
|
||||||
|
* performance boost.
|
||||||
|
*/
|
||||||
|
export declare function calcOpticalFlowPyrLK(
|
||||||
|
prevImg: InputArray,
|
||||||
|
nextImg: InputArray,
|
||||||
|
prevPts: InputArray,
|
||||||
|
nextPts: InputOutputArray,
|
||||||
|
status: OutputArray,
|
||||||
|
err: OutputArray,
|
||||||
|
winSize?: Size,
|
||||||
|
maxLevel?: int,
|
||||||
|
criteria?: TermCriteria,
|
||||||
|
flags?: int,
|
||||||
|
minEigThreshold?: double,
|
||||||
|
): void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See the OpenCV sample camshiftdemo.c that tracks colored objects.
|
||||||
|
*
|
||||||
|
* (Python) A sample explaining the camshift tracking algorithm can be found at
|
||||||
|
* opencv_source_code/samples/python/camshift.py
|
||||||
|
*
|
||||||
|
* @param probImage Back projection of the object histogram. See calcBackProject.
|
||||||
|
*
|
||||||
|
* @param window Initial search window.
|
||||||
|
*
|
||||||
|
* @param criteria Stop criteria for the underlying meanShift. returns (in old interfaces) Number of
|
||||||
|
* iterations CAMSHIFT took to converge The function implements the CAMSHIFT object tracking algorithm
|
||||||
|
* Bradski98 . First, it finds an object center using meanShift and then adjusts the window size and
|
||||||
|
* finds the optimal rotation. The function returns the rotated rectangle structure that includes the
|
||||||
|
* object position, size, and orientation. The next position of the search window can be obtained with
|
||||||
|
* RotatedRect::boundingRect()
|
||||||
|
*/
|
||||||
|
export declare function CamShift(
|
||||||
|
probImage: InputArray,
|
||||||
|
window: any,
|
||||||
|
criteria: TermCriteria,
|
||||||
|
): RotatedRect;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [findTransformECC]
|
||||||
|
*
|
||||||
|
* @param templateImage single-channel template image; CV_8U or CV_32F array.
|
||||||
|
*
|
||||||
|
* @param inputImage single-channel input image to be warped to provide an image similar to
|
||||||
|
* templateImage, same type as templateImage.
|
||||||
|
*
|
||||||
|
* @param inputMask An optional mask to indicate valid values of inputImage.
|
||||||
|
*/
|
||||||
|
export declare function computeECC(
|
||||||
|
templateImage: InputArray,
|
||||||
|
inputImage: InputArray,
|
||||||
|
inputMask?: InputArray,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that
|
||||||
|
* approximates best the affine transformation between: In case of point sets, the problem is
|
||||||
|
* formulated as follows: you need to find a 2x2 matrix *A* and 2x1 vector *b* so that:
|
||||||
|
*
|
||||||
|
* `\\[[A^*|b^*] = arg \\min _{[A|b]} \\sum _i \\| \\texttt{dst}[i] - A { \\texttt{src}[i]}^T - b \\|
|
||||||
|
* ^2\\]` where src[i] and dst[i] are the i-th points in src and dst, respectively `$[A|b]$` can be
|
||||||
|
* either arbitrary (when fullAffine=true ) or have a form of `\\[\\begin{bmatrix} a_{11} & a_{12} &
|
||||||
|
* b_1 \\\\ -a_{12} & a_{11} & b_2 \\end{bmatrix}\\]` when fullAffine=false.
|
||||||
|
*
|
||||||
|
* [estimateAffine2D], [estimateAffinePartial2D], [getAffineTransform], [getPerspectiveTransform],
|
||||||
|
* [findHomography]
|
||||||
|
*
|
||||||
|
* @param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat.
|
||||||
|
*
|
||||||
|
* @param dst Second input 2D point set of the same size and the same type as A, or another image.
|
||||||
|
*
|
||||||
|
* @param fullAffine If true, the function finds an optimal affine transformation with no additional
|
||||||
|
* restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is
|
||||||
|
* limited to combinations of translation, rotation, and uniform scaling (4 degrees of freedom).
|
||||||
|
*/
|
||||||
|
export declare function estimateRigidTransform(
|
||||||
|
src: InputArray,
|
||||||
|
dst: InputArray,
|
||||||
|
fullAffine: bool,
|
||||||
|
): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion (EP08),
|
||||||
|
* that is
|
||||||
|
*
|
||||||
|
* `\\[\\texttt{warpMatrix} = \\texttt{warpMatrix} = \\arg\\max_{W}
|
||||||
|
* \\texttt{ECC}(\\texttt{templateImage}(x,y),\\texttt{inputImage}(x',y'))\\]`
|
||||||
|
*
|
||||||
|
* where
|
||||||
|
*
|
||||||
|
* `\\[\\begin{bmatrix} x' \\\\ y' \\end{bmatrix} = W \\cdot \\begin{bmatrix} x \\\\ y \\\\ 1
|
||||||
|
* \\end{bmatrix}\\]`
|
||||||
|
*
|
||||||
|
* (the equation holds with homogeneous coordinates for homography). It returns the final enhanced
|
||||||
|
* correlation coefficient, that is the correlation coefficient between the template image and the
|
||||||
|
* final warped input image. When a `$3\\times 3$` matrix is given with motionType =0, 1 or 2, the
|
||||||
|
* third row is ignored.
|
||||||
|
*
|
||||||
|
* Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
|
||||||
|
* area-based alignment that builds on intensity similarities. In essence, the function updates the
|
||||||
|
* initial transformation that roughly aligns the images. If this information is missing, the identity
|
||||||
|
* warp (unity matrix) is used as an initialization. Note that if images undergo strong
|
||||||
|
* displacements/rotations, an initial transformation that roughly aligns the images is necessary
|
||||||
|
* (e.g., a simple euclidean/similarity transform that allows for the images showing the same image
|
||||||
|
* content approximately). Use inverse warping in the second image to take an image close to the first
|
||||||
|
* one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
|
||||||
|
* sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
|
||||||
|
* an exception if algorithm does not converges.
|
||||||
|
*
|
||||||
|
* [computeECC], [estimateAffine2D], [estimateAffinePartial2D], [findHomography]
|
||||||
|
*
|
||||||
|
* @param templateImage single-channel template image; CV_8U or CV_32F array.
|
||||||
|
*
|
||||||
|
* @param inputImage single-channel input image which should be warped with the final warpMatrix in
|
||||||
|
* order to provide an image similar to templateImage, same type as templateImage.
|
||||||
|
*
|
||||||
|
* @param warpMatrix floating-point $2\times 3$ or $3\times 3$ mapping matrix (warp).
|
||||||
|
*
|
||||||
|
* @param motionType parameter, specifying the type of motion:
|
||||||
|
* MOTION_TRANSLATION sets a translational motion model; warpMatrix is $2\times 3$ with the first
|
||||||
|
* $2\times 2$ part being the unity matrix and the rest two parameters being estimated.MOTION_EUCLIDEAN
|
||||||
|
* sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; warpMatrix
|
||||||
|
* is $2\times 3$.MOTION_AFFINE sets an affine motion model (DEFAULT); six parameters are estimated;
|
||||||
|
* warpMatrix is $2\times 3$.MOTION_HOMOGRAPHY sets a homography as a motion model; eight parameters
|
||||||
|
* are estimated;`warpMatrix` is $3\times 3$.
|
||||||
|
*
|
||||||
|
* @param criteria parameter, specifying the termination criteria of the ECC algorithm;
|
||||||
|
* criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
|
||||||
|
* iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
|
||||||
|
* Default values are shown in the declaration above.
|
||||||
|
*
|
||||||
|
* @param inputMask An optional mask to indicate valid values of inputImage.
|
||||||
|
*
|
||||||
|
* @param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
|
||||||
|
*/
|
||||||
|
export declare function findTransformECC(
|
||||||
|
templateImage: InputArray,
|
||||||
|
inputImage: InputArray,
|
||||||
|
warpMatrix: InputOutputArray,
|
||||||
|
motionType: int,
|
||||||
|
criteria: TermCriteria,
|
||||||
|
inputMask: InputArray,
|
||||||
|
gaussFiltSize: int,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is an overloaded member function, provided for convenience. It differs from the above function
|
||||||
|
* only in what argument(s) it accepts.
|
||||||
|
*/
|
||||||
|
export declare function findTransformECC(
|
||||||
|
templateImage: InputArray,
|
||||||
|
inputImage: InputArray,
|
||||||
|
warpMatrix: InputOutputArray,
|
||||||
|
motionType?: int,
|
||||||
|
criteria?: TermCriteria,
|
||||||
|
inputMask?: InputArray,
|
||||||
|
): double;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param probImage Back projection of the object histogram. See calcBackProject for details.
|
||||||
|
*
|
||||||
|
* @param window Initial search window.
|
||||||
|
*
|
||||||
|
* @param criteria Stop criteria for the iterative search algorithm. returns : Number of iterations
|
||||||
|
* CAMSHIFT took to converge. The function implements the iterative object search algorithm. It takes
|
||||||
|
* the input back projection of an object and the initial position. The mass center in window of the
|
||||||
|
* back projection image is computed and the search window center shifts to the mass center. The
|
||||||
|
* procedure is repeated until the specified number of iterations criteria.maxCount is done or until
|
||||||
|
* the window center shifts by less than criteria.epsilon. The algorithm is used inside CamShift and,
|
||||||
|
* unlike CamShift , the search window size or orientation do not change during the search. You can
|
||||||
|
* simply pass the output of calcBackProject to this function. But better results can be obtained if
|
||||||
|
* you pre-filter the back projection and remove the noise. For example, you can do this by retrieving
|
||||||
|
* connected components with findContours , throwing away contours with small area ( contourArea ), and
|
||||||
|
* rendering the remaining contours with drawContours.
|
||||||
|
*/
|
||||||
|
export declare function meanShift(
|
||||||
|
probImage: InputArray,
|
||||||
|
window: any,
|
||||||
|
criteria: TermCriteria,
|
||||||
|
): int;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function readOpticalFlow loads a flow field from a file and returns it as a single matrix.
|
||||||
|
* Resulting [Mat] has a type CV_32FC2 - floating-point, 2-channel. First channel corresponds to the
|
||||||
|
* flow in the horizontal direction (u), second - vertical (v).
|
||||||
|
*
|
||||||
|
* @param path Path to the file to be loaded
|
||||||
|
*/
|
||||||
|
export declare function readOpticalFlow(path: any): Mat;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function stores a flow field in a file, returns true on success, false otherwise. The flow field
|
||||||
|
* must be a 2-channel, floating-point matrix (CV_32FC2). First channel corresponds to the flow in the
|
||||||
|
* horizontal direction (u), second - vertical (v).
|
||||||
|
*
|
||||||
|
* @param path Path to the file to be written
|
||||||
|
*
|
||||||
|
* @param flow Flow field to be stored
|
||||||
|
*/
|
||||||
|
export declare function writeOpticalFlow(path: any, flow: InputArray): bool;
|
||||||
|
|
||||||
|
export declare const OPTFLOW_USE_INITIAL_FLOW: any; // initializer: = 4
|
||||||
|
|
||||||
|
export declare const OPTFLOW_LK_GET_MIN_EIGENVALS: any; // initializer: = 8
|
||||||
|
|
||||||
|
export declare const OPTFLOW_FARNEBACK_GAUSSIAN: any; // initializer: = 256
|
||||||
|
|
||||||
|
export declare const MOTION_TRANSLATION: any; // initializer: = 0
|
||||||
|
|
||||||
|
export declare const MOTION_EUCLIDEAN: any; // initializer: = 1
|
||||||
|
|
||||||
|
export declare const MOTION_AFFINE: any; // initializer: = 2
|
||||||
|
|
||||||
|
export declare const MOTION_HOMOGRAPHY: any; // initializer: = 3
|
||||||
BIN
opencv-js-4.10.0/test/Lenna.png
Normal file
BIN
opencv-js-4.10.0/test/Lenna.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 463 KiB |
58
opencv-js-4.10.0/test/Mat.test.ts
Normal file
58
opencv-js-4.10.0/test/Mat.test.ts
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
import Jimp from "jimp";
|
||||||
|
import path from "path";
|
||||||
|
import { setupOpenCv, translateException } from "./cv";
|
||||||
|
|
||||||
|
beforeAll(setupOpenCv);
|
||||||
|
|
||||||
|
describe("Mat", () => {
|
||||||
|
it("shoud pass TS type validations", async () => {
|
||||||
|
try {
|
||||||
|
// load local image file with jimp. It supports jpg, png, bmp, tiff and gif:
|
||||||
|
const jimpSrc = await Jimp.read(path.resolve(__dirname, "Lenna.png"));
|
||||||
|
|
||||||
|
// `jimpImage.bitmap` property has the decoded ImageData that we can use to create a cv:Mat
|
||||||
|
const img = cv.matFromImageData(jimpSrc.bitmap);
|
||||||
|
expect(img.channels()).toEqual(4);
|
||||||
|
|
||||||
|
const imgGray = new cv.Mat();
|
||||||
|
cv.cvtColor(img, imgGray, cv.COLOR_RGBA2GRAY);
|
||||||
|
expect(imgGray.channels()).toEqual(1);
|
||||||
|
|
||||||
|
const imgBlur = new cv.Mat();
|
||||||
|
cv.GaussianBlur(
|
||||||
|
imgGray,
|
||||||
|
imgBlur,
|
||||||
|
new cv.Size(5, 5),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
cv.BORDER_DEFAULT,
|
||||||
|
);
|
||||||
|
|
||||||
|
const imgThresh = new cv.Mat();
|
||||||
|
cv.threshold(
|
||||||
|
imgBlur,
|
||||||
|
imgThresh,
|
||||||
|
0,
|
||||||
|
255,
|
||||||
|
cv.THRESH_BINARY + cv.THRESH_OTSU,
|
||||||
|
);
|
||||||
|
|
||||||
|
const contours = new cv.MatVector();
|
||||||
|
const hierarchy = new cv.Mat();
|
||||||
|
|
||||||
|
cv.findContours(
|
||||||
|
imgThresh,
|
||||||
|
contours,
|
||||||
|
hierarchy,
|
||||||
|
cv.RETR_CCOMP,
|
||||||
|
cv.CHAIN_APPROX_SIMPLE,
|
||||||
|
);
|
||||||
|
|
||||||
|
const channels = new cv.MatVector();
|
||||||
|
cv.split(img, channels);
|
||||||
|
cv.merge(channels, img);
|
||||||
|
} catch (err) {
|
||||||
|
throw translateException(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
15
opencv-js-4.10.0/test/Tracker.test.ts
Normal file
15
opencv-js-4.10.0/test/Tracker.test.ts
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
import Jimp from "jimp";
|
||||||
|
import path from "path";
|
||||||
|
import { setupOpenCv, translateException } from "./cv";
|
||||||
|
|
||||||
|
beforeAll(setupOpenCv);
|
||||||
|
|
||||||
|
describe("Tracker", () => {
|
||||||
|
it("shoud pass TS type validations", async () => {
|
||||||
|
try {
|
||||||
|
const tracker = new cv.TrackerMIL();
|
||||||
|
} catch (err) {
|
||||||
|
throw translateException(err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
21
opencv-js-4.10.0/test/cv.ts
Normal file
21
opencv-js-4.10.0/test/cv.ts
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
import "../src";
|
||||||
|
|
||||||
|
export async function setupOpenCv() {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const _cv = require("../dist/opencv");
|
||||||
|
_cv.onRuntimeInitialized = resolve;
|
||||||
|
global.cv = _cv;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
export function translateException(err: any) {
|
||||||
|
if (typeof err === "number") {
|
||||||
|
try {
|
||||||
|
const exception = cv.exceptionFromPtr(err);
|
||||||
|
return exception;
|
||||||
|
} catch (error) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
42
opencv-js-4.10.0/test/cvKeys.test.ts
Normal file
42
opencv-js-4.10.0/test/cvKeys.test.ts
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
import path from "path";
|
||||||
|
import fs from "fs";
|
||||||
|
import * as ts from "typescript";
|
||||||
|
import { setupOpenCv } from "./cv";
|
||||||
|
|
||||||
|
beforeAll(setupOpenCv);
|
||||||
|
|
||||||
|
describe("CV keys", () => {
|
||||||
|
function getObjectKeys(obj: any) {
|
||||||
|
const keys: string[] = [];
|
||||||
|
for (let key in obj) {
|
||||||
|
if (!key.startsWith("dynCall")) {
|
||||||
|
keys.push(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// console.log(keys);
|
||||||
|
return keys;
|
||||||
|
}
|
||||||
|
|
||||||
|
it("output CV keys", async () => {
|
||||||
|
const objectNameMap: { [key: string]: any } = {
|
||||||
|
cv: cv,
|
||||||
|
"cv.Mat": new cv.Mat(),
|
||||||
|
};
|
||||||
|
|
||||||
|
const objectKeyMap: { [key: string]: string[] } = {
|
||||||
|
buildInformation: cv.getBuildInformation(),
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const objName in objectNameMap) {
|
||||||
|
const obj = objectNameMap[objName];
|
||||||
|
const keys = getObjectKeys(obj);
|
||||||
|
objectKeyMap[objName] = keys;
|
||||||
|
}
|
||||||
|
|
||||||
|
// write the objectKeyMap to JSON file
|
||||||
|
const jsonString = JSON.stringify(objectKeyMap, null, 2);
|
||||||
|
const fileName = "../doc/cvKeys.json";
|
||||||
|
const filePath = path.join(__dirname, fileName);
|
||||||
|
fs.writeFileSync(filePath, jsonString);
|
||||||
|
});
|
||||||
|
});
|
||||||
15
opencv-js-4.10.0/tsconfig.json
Normal file
15
opencv-js-4.10.0/tsconfig.json
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "esnext",
|
||||||
|
"module": "commonjs",
|
||||||
|
"lib": ["esnext", "dom"],
|
||||||
|
"strict": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"sourceMap": true,
|
||||||
|
"outDir": "./dist",
|
||||||
|
"rootDir": ".",
|
||||||
|
"skipLibCheck": true,
|
||||||
|
"declaration": true
|
||||||
|
},
|
||||||
|
"include": ["src"]
|
||||||
|
}
|
||||||
Loading…
Reference in a new issue