diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..64c64f4e6c6eb08e703b5b06ace269240b8d40f1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +NATURE_texture.png filter=lfs diff=lfs merge=lfs -text +diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so filter=lfs diff=lfs merge=lfs -text +diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o filter=lfs diff=lfs merge=lfs -text +diffvg/dist/diffvg-0.0.1-py3.8-linux-x86_64.egg filter=lfs diff=lfs merge=lfs -text diff --git a/.gradio/certificate.pem b/.gradio/certificate.pem new file mode 100644 index 0000000000000000000000000000000000000000..b85c8037f6b60976b2546fdbae88312c5246d9a3 --- /dev/null +++ b/.gradio/certificate.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..db3cd019a8d8fbb3c72db260d72342899742dd98 --- /dev/null +++ b/LICENSE @@ -0,0 +1,437 @@ +Attribution-NonCommercial-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International +Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial-ShareAlike 4.0 International Public License +("Public License"). To the extent this Public License may be +interpreted as a contract, You are granted the Licensed Rights in +consideration of Your acceptance of these terms and conditions, and the +Licensor grants You such rights in consideration of benefits the +Licensor receives from making the Licensed Material available under +these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-NC-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution, NonCommercial, and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + l. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + m. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + n. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-NC-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + including for purposes of Section 3(b); and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the β€œLicensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/NATURE_texture.png b/NATURE_texture.png new file mode 100644 index 0000000000000000000000000000000000000000..b6b74bb3c5596d07b5e08c5e17c3990a86932272 --- /dev/null +++ b/NATURE_texture.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97f6439f62391e14b148c9c9f5192e6e9c01b4050a9116d0b7a516f98d9a28f7 +size 1317867 diff --git a/README.md b/README.md index 7a8d703e573a023d90fa58f72e72d69f9f2a95f6..8466af9c456436a11af4eef23f3ebe3d9ddaf9c6 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,166 @@ --- -title: Textured Word Illustration -emoji: πŸ† -colorFrom: gray -colorTo: red +title: Textured_Word_Illustration +app_file: app.py sdk: gradio sdk_version: 5.9.0 -app_file: app.py -pinned: false --- +# Word-As-Image for Semantic Typography (SIGGRAPH 2023 - Honorable Mention Award) + + + +[![arXiv](https://img.shields.io/badge/πŸ“ƒ-arXiv%20-red.svg)](https://arxiv.org/abs/2303.01818) +[![webpage](https://img.shields.io/badge/🌐-Website%20-blue.svg)](https://wordasimage.github.io/Word-As-Image-Page/) +[![Huggingface space](https://img.shields.io/badge/πŸ€—-Demo%20-yellow.svg)](https://huggingface.co/spaces/SemanticTypography/Word-As-Image) +[![Youtube](https://img.shields.io/badge/πŸ“½οΈ-Video%20-orchid.svg)](https://www.youtube.com/watch?v=9D12a6RCQaw) + +
+
+ +
+

+A few examples of our Word-As-Image illustrations in various fonts and for different textual concept. The semantically adjusted letters are created +completely automatically using our method, and can then be used for further creative design as we illustrate here.

+ +> Shir Iluz*, Yael Vinker*, Amir Hertz, Daniel Berio, Daniel Cohen-Or, Ariel Shamir +> \* Denotes equal contribution +> +>A word-as-image is a semantic typography technique where a word illustration +presents a visualization of the meaning of the word, while also +preserving its readability. We present a method to create word-as-image +illustrations automatically. This task is highly challenging as it requires +semantic understanding of the word and a creative idea of where and how to +depict these semantics in a visually pleasing and legible manner. We rely on +the remarkable ability of recent large pretrained language-vision models to +distill textual concepts visually. We target simple, concise, black-and-white +designs that convey the semantics clearly.We deliberately do not change the +color or texture of the letters and do not use embellishments. Our method +optimizes the outline of each letter to convey the desired concept, guided by +a pretrained Stable Diffusion model. We incorporate additional loss terms +to ensure the legibility of the text and the preservation of the style of the +font. We show high quality and engaging results on numerous examples +and compare to alternative techniques. + + +## Description +Official implementation of Word-As-Image for Semantic Typography paper. +
+ +## Setup + +1. Clone the repo: +```bash +git clone https://github.com/WordAsImage/Word-As-Image.git +cd Word-As-Image +``` +2. Create a new conda environment and install the libraries: +```bash +conda create --name word python=3.8.15 +conda activate word +pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113 +conda install -y numpy scikit-image +conda install -y -c anaconda cmake +conda install -y -c conda-forge ffmpeg +pip install svgwrite svgpathtools cssutils numba torch-tools scikit-fmm easydict visdom freetype-py shapely +pip install opencv-python==4.5.4.60 +pip install kornia==0.6.8 +pip install wandb +pip install shapely +``` + +3. Install diffusers: +```bash +pip install diffusers==0.8 +pip install transformers scipy ftfy accelerate +``` +4. Install diffvg: +```bash +git clone https://github.com/BachiLi/diffvg.git +cd diffvg +git submodule update --init --recursive +python setup.py install +``` + +5. Paste your HuggingFace [access token](https://huggingface.co/settings/tokens) for StableDiffusion in the TOKEN file. +## Run Experiments +```bash +conda activate word +cd Word-As-Image + +# Please modify the parameters accordingly in the file and run: +bash run_word_as_image.sh + +# Or run : +python code/main.py --experiment --semantic_concept --optimized_letter --seed --font --use_wandb <0/1> --wandb_user +``` +* ```--semantic_concept``` : the semantic concept to insert +* ```--optimized_letter``` : one letter in the word to optimize +* ```--font``` : font name, the .ttf file should be located in code/data/fonts/ + +Optional arguments: +* ```--word``` : The text to work on, default: the semantic concept +* ```--config``` : Path to config file, default: code/config/base.yaml +* ```--experiment``` : You can specify any experiment in the config file, default: conformal_0.5_dist_pixel_100_kernel201 +* ```--log_dir``` : Default: output folder +* ```--prompt_suffix``` : Default: "minimal flat 2d vector. lineal color. trending on artstation" + +### Examples +```bash +python code/main.py --semantic_concept "BUNNY" --optimized_letter "Y" --font "KaushanScript-Regular" --seed 0 +``` +
+
+ +
+ + +```bash +python code/main.py --semantic_concept "LEAVES" --word "NATURE" --optimized_letter "T" --font "HobeauxRococeaux-Sherman" --seed 0 +``` +
+
+ +
+ +* Pay attention, as the arguments are case-sensitive, but it can handle both upper and lowercase letters depending on the input letters. + + +## Tips +If the outcome does not meet your quality expectations, you could try the following options: + +1. Adjusting the weight 𝛼 of the Lπ‘Žπ‘π‘Žπ‘ loss, which preserves the letter's structure after deformation. +2. Modifying the 𝜎 parameter of the low-pass filter used in the Lπ‘‘π‘œπ‘›π‘’ loss, which limits the degree of deviation from the original letter. +3. Changing the number of control points, as this can influence the outputs. +4. Experimenting with different seeds, as each may produce slightly different results. +5. Changing the font type, as this can also result in various outputs. + + + -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +## Acknowledgement +Our implementation is based ob Stable Diffusion text-to-image model from Hugging Face's [Diffusers](https://github.com/huggingface/diffusers) library, combined with [Diffvg](https://github.com/BachiLi/diffvg). The framework is built on [Live](https://github.com/Picsart-AI-Research/LIVE-Layerwise-Image-Vectorization). + +## Citation +If you use this code for your research, please cite the following work: +``` +@article{IluzVinker2023, + author = {Iluz, Shir and Vinker, Yael and Hertz, Amir and Berio, Daniel and Cohen-Or, Daniel and Shamir, Ariel}, + title = {Word-As-Image for Semantic Typography}, + year = {2023}, + issue_date = {August 2023}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {42}, + number = {4}, + issn = {0730-0301}, + url = {https://doi.org/10.1145/3592123}, + doi = {10.1145/3592123}, + journal = {ACM Trans. Graph.}, + month = {jul}, + articleno = {151}, + numpages = {11}, + keywords = {semantic typography, SVG, stable diffusion, fonts} +} +``` + +## Licence +This work is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-nc-sa/4.0/). diff --git a/TOKEN b/TOKEN new file mode 100644 index 0000000000000000000000000000000000000000..66922850b4b5492d5ecd591fa62a6ba9a8cde45a --- /dev/null +++ b/TOKEN @@ -0,0 +1 @@ +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 \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..224f56534fb971e3580e7344c975ea01a4dd346a --- /dev/null +++ b/app.py @@ -0,0 +1,29 @@ + +import gradio as gr + +# Define the function to run the command +def run_command(concept, word, letter): + # Add your logic here for generating the image and saving the filename + # Example placeholders: + generated_image_path = "generated_image.png" # Path to the generated image + saved_filename = f"{concept}_{word}_{letter}.png" # Example saved filename + return generated_image_path, saved_filename + +# Create the Gradio interface +ui = gr.Interface( + fn=run_command, + inputs=[ + gr.Textbox(label="Prompt Semantic Concept", placeholder="Enter a semantic concept, e.g., FLOWER"), + gr.Textbox(label="Prompt Word", placeholder="Enter a word, e.g., FLOWER"), + gr.Textbox(label="Prompt Letter", placeholder="Enter a letter, e.g., O") + ], + outputs=[ + gr.Image(label="Generated Image"), + gr.Textbox(label="Saved Filename") + ], + title="Stable Diffusion Texture Generator", + description="Enter a semantic concept, word, and letter to generate a texture image." +) + +# Launch the UI +ui.launch(debug=True) diff --git a/code/__pycache__/bezier.cpython-38.pyc b/code/__pycache__/bezier.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f00a4b7736793e8925149166edf1a8ae0e8d0443 Binary files /dev/null and b/code/__pycache__/bezier.cpython-38.pyc differ diff --git a/code/__pycache__/config.cpython-38.pyc b/code/__pycache__/config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3f41cdb51bfdea23361fa91f45c16865f355e17 Binary files /dev/null and b/code/__pycache__/config.cpython-38.pyc differ diff --git a/code/__pycache__/losses.cpython-38.pyc b/code/__pycache__/losses.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c7a272312222edfcf054a5f2800901f0eb48ce2 Binary files /dev/null and b/code/__pycache__/losses.cpython-38.pyc differ diff --git a/code/__pycache__/save_svg.cpython-38.pyc b/code/__pycache__/save_svg.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0fab4035f98520e7aa746f236d16b751c21a05c Binary files /dev/null and b/code/__pycache__/save_svg.cpython-38.pyc differ diff --git a/code/__pycache__/ttf.cpython-38.pyc b/code/__pycache__/ttf.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b5fbae2c0ad4476d98ecffb246bb254bff2e170 Binary files /dev/null and b/code/__pycache__/ttf.cpython-38.pyc differ diff --git a/code/__pycache__/utils.cpython-38.pyc b/code/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98c5a06d3a21ee6c06f46db844a7255557f4f9b7 Binary files /dev/null and b/code/__pycache__/utils.cpython-38.pyc differ diff --git a/code/bezier.py b/code/bezier.py new file mode 100644 index 0000000000000000000000000000000000000000..506160936a36dfab196cb3ac0f8b652471528a5b --- /dev/null +++ b/code/bezier.py @@ -0,0 +1,122 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.special import binom +from numpy.linalg import norm + +def num_bezier(n_ctrl, degree=3): + if type(n_ctrl) == np.ndarray: + n_ctrl = len(n_ctrl) + return int((n_ctrl - 1) / degree) + +def bernstein(n, i): + bi = binom(n, i) + return lambda t, bi=bi, n=n, i=i: bi * t**i * (1 - t)**(n - i) + +def bezier(P, t, d=0): + '''Bezier curve of degree len(P)-1. d is the derivative order (0 gives positions)''' + n = P.shape[0] - 1 + if d > 0: + Q = np.diff(P, axis=0)*n + return bezier(Q, t, d-1) + B = np.vstack([bernstein(n, i)(t) for i, p in enumerate(P)]) + return (P.T @ B).T + +def cubic_bezier(P, t): + return (1.0-t)**3*P[0] + 3*(1.0-t)**2*t*P[1] + 3*(1.0-t)*t**2*P[2] + t**3*P[3] + +def bezier_piecewise(Cp, subd=100, degree=3, d=0): + ''' sample a piecewise Bezier curve given a sequence of control points''' + num = num_bezier(Cp.shape[0], degree) + X = [] + for i in range(num): + P = Cp[i*degree:i*degree+degree+1, :] + t = np.linspace(0, 1., subd)[:-1] + Y = bezier(P, t, d) + X += [Y] + X.append(Cp[-1]) + X = np.vstack(X) + return X + +def compute_beziers(beziers, subd=100, degree=3): + chain = beziers_to_chain(beziers) + return bezier_piecewise(chain, subd, degree) + +def plot_control_polygon(Cp, degree=3, lw=0.5, linecolor=np.ones(3)*0.1): + n_bezier = num_bezier(len(Cp), degree) + for i in range(n_bezier): + cp = Cp[i*degree:i*degree+degree+1, :] + if degree==3: + plt.plot(cp[0:2,0], cp[0:2, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[2:,0], cp[2:,1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:,0], cp[:,1], 'o', color=[0, 0.5, 1.], markersize=4) + else: + plt.plot(cp[:,0], cp[:,1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:,0], cp[:,1], 'o', color=[0, 0.5, 1.]) + + +def chain_to_beziers(chain, degree=3): + ''' Convert Bezier chain to list of curve segments (4 control points each)''' + num = num_bezier(chain.shape[0], degree) + beziers = [] + for i in range(num): + beziers.append(chain[i*degree:i*degree+degree+1,:]) + return beziers + + +def beziers_to_chain(beziers): + ''' Convert list of Bezier curve segments to a piecewise bezier chain (shares vertices)''' + n = len(beziers) + chain = [] + for i in range(n): + chain.append(list(beziers[i][:-1])) + chain.append([beziers[-1][-1]]) + return np.array(sum(chain, [])) + + +def split_cubic(bez, t): + p1, p2, p3, p4 = bez + + p12 = (p2 - p1) * t + p1 + p23 = (p3 - p2) * t + p2 + p34 = (p4 - p3) * t + p3 + + p123 = (p23 - p12) * t + p12 + p234 = (p34 - p23) * t + p23 + + p1234 = (p234 - p123) * t + p123 + + return np.array([p1, p12, p123, p1234]), np.array([p1234, p234, p34, p4]) + + +def approx_arc_length(bez): + c0, c1, c2, c3 = bez + v0 = norm(c1-c0)*0.15 + v1 = norm(-0.558983582205757*c0 + 0.325650248872424*c1 + 0.208983582205757*c2 + 0.024349751127576*c3) + v2 = norm(c3-c0+c2-c1)*0.26666666666666666 + v3 = norm(-0.024349751127576*c0 - 0.208983582205757*c1 - 0.325650248872424*c2 + 0.558983582205757*c3) + v4 = norm(c3-c2)*.15 + return v0 + v1 + v2 + v3 + v4 + + +def subdivide_bezier(bez, thresh): + stack = [bez] + res = [] + while stack: + bez = stack.pop() + l = approx_arc_length(bez) + if l < thresh: + res.append(bez) + else: + b1, b2 = split_cubic(bez, 0.5) + stack += [b2, b1] + return res + +def subdivide_bezier_chain(C, thresh): + beziers = chain_to_beziers(C) + res = [] + for bez in beziers: + res += subdivide_bezier(bez, thresh) + return beziers_to_chain(res) + + + diff --git a/code/config.py b/code/config.py new file mode 100644 index 0000000000000000000000000000000000000000..241c34cff751a7511e38642b7f69369a78f883a7 --- /dev/null +++ b/code/config.py @@ -0,0 +1,104 @@ +import argparse +import os.path as osp +import yaml +import random +from easydict import EasyDict as edict +import numpy.random as npr +import torch +from utils import ( + edict_2_dict, + check_and_create_dir, + update) +import wandb +import warnings +warnings.filterwarnings("ignore") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="code/config/base.yaml") + parser.add_argument("--experiment", type=str, default="conformal_0.5_dist_pixel_100_kernel201") + parser.add_argument("--seed", type=int, default=0) + parser.add_argument('--log_dir', metavar='DIR', default="output") + parser.add_argument('--font', type=str, default="none", help="font name") + parser.add_argument('--semantic_concept', type=str, help="the semantic concept to insert") + parser.add_argument('--word', type=str, default="none", help="the text to work on") + parser.add_argument('--prompt_suffix', type=str, default="minimal flat 2d vector. lineal color." + " trending on artstation") + parser.add_argument('--optimized_letter', type=str, default="none", help="the letter in the word to optimize") + parser.add_argument('--batch_size', type=int, default=1) + parser.add_argument('--use_wandb', type=int, default=0) + parser.add_argument('--wandb_user', type=str, default="none") + + cfg = edict() + args = parser.parse_args() + with open('TOKEN', 'r') as f: + setattr(args, 'token', f.read().replace('\n', '')) + cfg.config = args.config + cfg.experiment = args.experiment + cfg.seed = args.seed + cfg.font = args.font + cfg.semantic_concept = args.semantic_concept + cfg.word = cfg.semantic_concept if args.word == "none" else args.word + if " " in cfg.word: + raise ValueError(f'no spaces are allowed') + cfg.caption = f"a {args.semantic_concept}. {args.prompt_suffix}" + cfg.log_dir = f"{args.log_dir}/{args.experiment}_{cfg.word}" + if args.optimized_letter in cfg.word: + cfg.optimized_letter = args.optimized_letter + else: + raise ValueError(f'letter should be in word') + cfg.batch_size = args.batch_size + cfg.token = args.token + cfg.use_wandb = args.use_wandb + cfg.wandb_user = args.wandb_user + cfg.letter = f"{args.font}_{args.optimized_letter}_scaled" + cfg.target = f"code/data/init/{cfg.letter}" + + return cfg + + +def set_config(): + + cfg_arg = parse_args() + with open(cfg_arg.config, 'r') as f: + cfg_full = yaml.load(f, Loader=yaml.FullLoader) + + # recursively traverse parent_config pointers in the config dicts + cfg_key = cfg_arg.experiment + cfgs = [cfg_arg] + while cfg_key: + cfgs.append(cfg_full[cfg_key]) + cfg_key = cfgs[-1].get('parent_config', 'baseline') + + # allowing children configs to override their parents + cfg = edict() + for options in reversed(cfgs): + update(cfg, options) + del cfgs + + # set experiment dir + signature = f"{cfg.letter}_concept_{cfg.semantic_concept}_seed_{cfg.seed}" + cfg.experiment_dir = \ + osp.join(cfg.log_dir, cfg.font, signature) + configfile = osp.join(cfg.experiment_dir, 'config.yaml') + print('Config:', cfg) + + # create experiment dir and save config + check_and_create_dir(configfile) + with open(osp.join(configfile), 'w') as f: + yaml.dump(edict_2_dict(cfg), f) + + if cfg.use_wandb: + wandb.init(project="Word-As-Image", entity=cfg.wandb_user, + config=cfg, name=f"{signature}", id=wandb.util.generate_id()) + + if cfg.seed is not None: + random.seed(cfg.seed) + npr.seed(cfg.seed) + torch.manual_seed(cfg.seed) + torch.backends.cudnn.benchmark = False + else: + assert False + + return cfg diff --git a/code/config/base.yaml b/code/config/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4a619eaddd23670be053a8f4c857eae3aeba57b --- /dev/null +++ b/code/config/base.yaml @@ -0,0 +1,46 @@ +baseline: + parent_config: '' + save: + init: true + image: true + video: true + video_frame_freq: 1 + trainable: + point: true + lr_base: + point: 1 + lr: + lr_init: 0.002 + lr_final: 0.0008 + lr_delay_mult: 0.1 + lr_delay_steps: 100 + num_iter: 2 + render_size: 600 + cut_size: 512 + level_of_cc: 0 # 0 - original number of cc / 1 - recommended / 2 - more control points + seed: 0 + diffusion: + model: "runwayml/stable-diffusion-v1-5" + timesteps: 1000 + guidance_scale: 100 + loss: + use_sds_loss: true + tone: + use_tone_loss: false + conformal: + use_conformal_loss: false + +conformal_0.5_dist_pixel_100_kernel201: + parent_config: baseline + level_of_cc: 1 + loss: + tone: + use_tone_loss: true + dist_loss_weight: 100 + pixel_dist_kernel_blur: 201 + pixel_dist_sigma: 30 + conformal: + use_conformal_loss: true + angeles_w: 0.5 + + diff --git a/code/data/fonts/Bell MT.ttf b/code/data/fonts/Bell MT.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3f426758c482747f1ea1573eb1df315b37e49618 Binary files /dev/null and b/code/data/fonts/Bell MT.ttf differ diff --git a/code/data/fonts/DeliusUnicase-Regular.ttf b/code/data/fonts/DeliusUnicase-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..96ef9f495123eb1bf220d0f3eccd7277f8195054 Binary files /dev/null and b/code/data/fonts/DeliusUnicase-Regular.ttf differ diff --git a/code/data/fonts/HobeauxRococeaux-Sherman.ttf b/code/data/fonts/HobeauxRococeaux-Sherman.ttf new file mode 100644 index 0000000000000000000000000000000000000000..0d3c958fb3e11b099028cb0f3d7b32eff11ed3f3 Binary files /dev/null and b/code/data/fonts/HobeauxRococeaux-Sherman.ttf differ diff --git a/code/data/fonts/IndieFlower-Regular.ttf b/code/data/fonts/IndieFlower-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3774ef55d4dd8d0d272f602542bbbf444ebbbb23 Binary files /dev/null and b/code/data/fonts/IndieFlower-Regular.ttf differ diff --git a/code/data/fonts/JosefinSans-Light.ttf b/code/data/fonts/JosefinSans-Light.ttf new file mode 100644 index 0000000000000000000000000000000000000000..33ae128dc1b23da5321e3c711c36e3c88b1a668e Binary files /dev/null and b/code/data/fonts/JosefinSans-Light.ttf differ diff --git a/code/data/fonts/KaushanScript-Regular.ttf b/code/data/fonts/KaushanScript-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..bcda31429ff79f39b82283912cd26628f0c257cc Binary files /dev/null and b/code/data/fonts/KaushanScript-Regular.ttf differ diff --git a/code/data/fonts/LuckiestGuy-Regular.ttf b/code/data/fonts/LuckiestGuy-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..02c71fafc49d4c13c844ec945ad9d4993d2eabc9 Binary files /dev/null and b/code/data/fonts/LuckiestGuy-Regular.ttf differ diff --git a/code/data/fonts/Noteworthy-Bold.ttf b/code/data/fonts/Noteworthy-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2ad4e118fe288de23df75c8dd6c802f0461aab2c Binary files /dev/null and b/code/data/fonts/Noteworthy-Bold.ttf differ diff --git a/code/data/fonts/Quicksand.ttf b/code/data/fonts/Quicksand.ttf new file mode 100644 index 0000000000000000000000000000000000000000..0ec221996683fb1820d5515172f71243731d0e2b Binary files /dev/null and b/code/data/fonts/Quicksand.ttf differ diff --git a/code/data/fonts/Saira-Regular.ttf b/code/data/fonts/Saira-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..315c0f31af395af5de6c2cff2495687c2d913542 Binary files /dev/null and b/code/data/fonts/Saira-Regular.ttf differ diff --git a/code/data/init/HobeauxRococeaux-Sherman_A.svg b/code/data/init/HobeauxRococeaux-Sherman_A.svg new file mode 100644 index 0000000000000000000000000000000000000000..a90f3be9c0a9d86971e20558fb3a2a306525b156 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_A.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_A_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_A_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..29569cdfc49ef7bef9e23a8c56e1f452ddc24818 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_A_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_E.svg b/code/data/init/HobeauxRococeaux-Sherman_E.svg new file mode 100644 index 0000000000000000000000000000000000000000..e17f8137d0a66e7b90b33880dfdb8efa1351efc8 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_E.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_E_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_E_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..cd641d1f064a728a27c2023564c2a1ce0b764b7c --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_E_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_N.svg b/code/data/init/HobeauxRococeaux-Sherman_N.svg new file mode 100644 index 0000000000000000000000000000000000000000..48aeef943cca1d5eae717627d5e506156242059f --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_N.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_NATURE.svg b/code/data/init/HobeauxRococeaux-Sherman_NATURE.svg new file mode 100644 index 0000000000000000000000000000000000000000..ffadf95688f0df9f10c7550bc1b2d5229f9785ea --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_NATURE.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_NATURE_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_NATURE_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..5bc538d9e5f21798c51f8e013bdfd3961cd6f568 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_NATURE_scaled.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_N_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_N_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..ddd652b128fa80d9f01971a10c0445d79852fae4 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_N_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_R.svg b/code/data/init/HobeauxRococeaux-Sherman_R.svg new file mode 100644 index 0000000000000000000000000000000000000000..e2677337c1d6609cde74939cf18a2812f6c8290f --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_R.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_R_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_R_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..a027f2518c0e99f7df57ad3790cac3d85490becc --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_R_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_T.svg b/code/data/init/HobeauxRococeaux-Sherman_T.svg new file mode 100644 index 0000000000000000000000000000000000000000..64b4e52192dd1843b5e7bab8cd2bcb659ac1934d --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_T.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_T_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_T_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..37e0e384a78bb26b26214e8350b95e7595fd8f16 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_T_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_U.svg b/code/data/init/HobeauxRococeaux-Sherman_U.svg new file mode 100644 index 0000000000000000000000000000000000000000..693ddead049cbac8a39f8387791dbf513740aadf --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_U.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/HobeauxRococeaux-Sherman_U_scaled.svg b/code/data/init/HobeauxRococeaux-Sherman_U_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..6c4dc34d916f01406ba1b0ba3614351c34fc5300 --- /dev/null +++ b/code/data/init/HobeauxRococeaux-Sherman_U_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_B.svg b/code/data/init/KaushanScript-Regular_B.svg new file mode 100644 index 0000000000000000000000000000000000000000..875ff422fecc683359c25f4c350dadecf5290cdd --- /dev/null +++ b/code/data/init/KaushanScript-Regular_B.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/KaushanScript-Regular_BUNNY.svg b/code/data/init/KaushanScript-Regular_BUNNY.svg new file mode 100644 index 0000000000000000000000000000000000000000..60a5110d7facd4467a522e3188ab9b559ccb05b6 --- /dev/null +++ b/code/data/init/KaushanScript-Regular_BUNNY.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_BUNNY_scaled.svg b/code/data/init/KaushanScript-Regular_BUNNY_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..96bad1f4ec7d1057bbb2c3d9d43d08bb53c45e6a --- /dev/null +++ b/code/data/init/KaushanScript-Regular_BUNNY_scaled.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_B_scaled.svg b/code/data/init/KaushanScript-Regular_B_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..cb22b764cc796829281ed964770a15113d12bc8a --- /dev/null +++ b/code/data/init/KaushanScript-Regular_B_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_N.svg b/code/data/init/KaushanScript-Regular_N.svg new file mode 100644 index 0000000000000000000000000000000000000000..0bc2e8f8ee5f524052c6e1d5e2a64b609b1dc953 --- /dev/null +++ b/code/data/init/KaushanScript-Regular_N.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/KaushanScript-Regular_N_scaled.svg b/code/data/init/KaushanScript-Regular_N_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..5b584bc9e50d22e6ecc37c3eeb8200940ea1b2b0 --- /dev/null +++ b/code/data/init/KaushanScript-Regular_N_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_U.svg b/code/data/init/KaushanScript-Regular_U.svg new file mode 100644 index 0000000000000000000000000000000000000000..64953dea3209f23d74139ca683da043ecbfdf5df --- /dev/null +++ b/code/data/init/KaushanScript-Regular_U.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/KaushanScript-Regular_U_scaled.svg b/code/data/init/KaushanScript-Regular_U_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..0abb43e682828a9c7fe4dd10014efcaf3f90c007 --- /dev/null +++ b/code/data/init/KaushanScript-Regular_U_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/data/init/KaushanScript-Regular_Y.svg b/code/data/init/KaushanScript-Regular_Y.svg new file mode 100644 index 0000000000000000000000000000000000000000..7684165cd7beb8167a6c4305254b1dd8c7829d0c --- /dev/null +++ b/code/data/init/KaushanScript-Regular_Y.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/code/data/init/KaushanScript-Regular_Y_scaled.svg b/code/data/init/KaushanScript-Regular_Y_scaled.svg new file mode 100644 index 0000000000000000000000000000000000000000..24cf735e4f71ff914afd3799fba8f03523c11129 --- /dev/null +++ b/code/data/init/KaushanScript-Regular_Y_scaled.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/code/losses.py b/code/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..e881b6ac927e47f599338de42683bd26e7110136 --- /dev/null +++ b/code/losses.py @@ -0,0 +1,178 @@ +import torch.nn as nn +import torchvision +from scipy.spatial import Delaunay +import torch +import numpy as np +from torch.nn import functional as nnf +from easydict import EasyDict +from shapely.geometry import Point +from shapely.geometry.polygon import Polygon + +from diffusers import StableDiffusionPipeline + +class SDSLoss(nn.Module): + def __init__(self, cfg, device): + super(SDSLoss, self).__init__() + self.cfg = cfg + self.device = device + self.pipe = StableDiffusionPipeline.from_pretrained(cfg.diffusion.model, + torch_dtype=torch.float16, use_auth_token=cfg.token) + self.pipe = self.pipe.to(self.device) + # default scheduler: PNDMScheduler(beta_start=0.00085, beta_end=0.012, + # beta_schedule="scaled_linear", num_train_timesteps=1000) + self.alphas = self.pipe.scheduler.alphas_cumprod.to(self.device) + self.sigmas = (1 - self.pipe.scheduler.alphas_cumprod).to(self.device) + + self.text_embeddings = None + self.embed_text() + + def embed_text(self): + # tokenizer and embed text + text_input = self.pipe.tokenizer(self.cfg.caption, padding="max_length", + max_length=self.pipe.tokenizer.model_max_length, + truncation=True, return_tensors="pt") + uncond_input = self.pipe.tokenizer([""], padding="max_length", + max_length=text_input.input_ids.shape[-1], + return_tensors="pt") + with torch.no_grad(): + text_embeddings = self.pipe.text_encoder(text_input.input_ids.to(self.device))[0] + uncond_embeddings = self.pipe.text_encoder(uncond_input.input_ids.to(self.device))[0] + self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + self.text_embeddings = self.text_embeddings.repeat_interleave(self.cfg.batch_size, 0) + del self.pipe.tokenizer + del self.pipe.text_encoder + + + def forward(self, x_aug): + sds_loss = 0 + + # encode rendered image + x = x_aug * 2. - 1. + with torch.cuda.amp.autocast(): + init_latent_z = (self.pipe.vae.encode(x).latent_dist.sample()) + latent_z = 0.18215 * init_latent_z # scaling_factor * init_latents + + with torch.inference_mode(): + # sample timesteps + timestep = torch.randint( + low=50, + high=min(950, self.cfg.diffusion.timesteps) - 1, # avoid highest timestep | diffusion.timesteps=1000 + size=(latent_z.shape[0],), + device=self.device, dtype=torch.long) + + # add noise + eps = torch.randn_like(latent_z) + # zt = alpha_t * latent_z + sigma_t * eps + noised_latent_zt = self.pipe.scheduler.add_noise(latent_z, eps, timestep) + + # denoise + z_in = torch.cat([noised_latent_zt] * 2) # expand latents for classifier free guidance + timestep_in = torch.cat([timestep] * 2) + with torch.autocast(device_type="cuda", dtype=torch.float16): + eps_t_uncond, eps_t = self.pipe.unet(z_in, timestep, encoder_hidden_states=self.text_embeddings).sample.float().chunk(2) + + eps_t = eps_t_uncond + self.cfg.diffusion.guidance_scale * (eps_t - eps_t_uncond) + + # w = alphas[timestep]^0.5 * (1 - alphas[timestep]) = alphas[timestep]^0.5 * sigmas[timestep] + grad_z = self.alphas[timestep]**0.5 * self.sigmas[timestep] * (eps_t - eps) + assert torch.isfinite(grad_z).all() + grad_z = torch.nan_to_num(grad_z.detach().float(), 0.0, 0.0, 0.0) + + sds_loss = grad_z.clone() * latent_z + del grad_z + + sds_loss = sds_loss.sum(1).mean() + return sds_loss + + +class ToneLoss(nn.Module): + def __init__(self, cfg): + super(ToneLoss, self).__init__() + self.dist_loss_weight = cfg.loss.tone.dist_loss_weight + self.im_init = None + self.cfg = cfg + self.mse_loss = nn.MSELoss() + self.blurrer = torchvision.transforms.GaussianBlur(kernel_size=(cfg.loss.tone.pixel_dist_kernel_blur, + cfg.loss.tone.pixel_dist_kernel_blur), sigma=(cfg.loss.tone.pixel_dist_sigma)) + + def set_image_init(self, im_init): + self.im_init = im_init.permute(2, 0, 1).unsqueeze(0) + self.init_blurred = self.blurrer(self.im_init) + + + def get_scheduler(self, step=None): + if step is not None: + return self.dist_loss_weight * np.exp(-(1/5)*((step-300)/(20)) ** 2) + else: + return self.dist_loss_weight + + def forward(self, cur_raster, step=None): + blurred_cur = self.blurrer(cur_raster) + return self.mse_loss(self.init_blurred.detach(), blurred_cur) * self.get_scheduler(step) + + +class ConformalLoss: + def __init__(self, parameters: EasyDict, device: torch.device, target_letter: str, shape_groups): + self.parameters = parameters + self.target_letter = target_letter + self.shape_groups = shape_groups + self.faces = self.init_faces(device) + self.faces_roll_a = [torch.roll(self.faces[i], 1, 1) for i in range(len(self.faces))] + + with torch.no_grad(): + self.angles = [] + self.reset() + + + def get_angles(self, points: torch.Tensor) -> torch.Tensor: + angles_ = [] + for i in range(len(self.faces)): + triangles = points[self.faces[i]] + triangles_roll_a = points[self.faces_roll_a[i]] + edges = triangles_roll_a - triangles + length = edges.norm(dim=-1) + edges = edges / (length + 1e-1)[:, :, None] + edges_roll = torch.roll(edges, 1, 1) + cosine = torch.einsum('ned,ned->ne', edges, edges_roll) + angles = torch.arccos(cosine) + angles_.append(angles) + return angles_ + + def get_letter_inds(self, letter_to_insert): + for group, l in zip(self.shape_groups, self.target_letter): + if l == letter_to_insert: + letter_inds = group.shape_ids + return letter_inds[0], letter_inds[-1], len(letter_inds) + + def reset(self): + points = torch.cat([point.clone().detach() for point in self.parameters.point]) + self.angles = self.get_angles(points) + + def init_faces(self, device: torch.device) -> torch.tensor: + faces_ = [] + for j, c in enumerate(self.target_letter): + points_np = [self.parameters.point[i].clone().detach().cpu().numpy() for i in range(len(self.parameters.point))] + start_ind, end_ind, shapes_per_letter = self.get_letter_inds(c) + print(c, start_ind, end_ind) + holes = [] + if shapes_per_letter > 1: + holes = points_np[start_ind+1:end_ind] + poly = Polygon(points_np[start_ind], holes=holes) + poly = poly.buffer(0) + points_np = np.concatenate(points_np) + faces = Delaunay(points_np).simplices + is_intersect = np.array([poly.contains(Point(points_np[face].mean(0))) for face in faces], dtype=bool) + faces_.append(torch.from_numpy(faces[is_intersect]).to(device, dtype=torch.int64)) + return faces_ + + def __call__(self) -> torch.Tensor: + loss_angles = 0 + points = torch.cat(self.parameters.point) + angles = self.get_angles(points) + for i in range(len(self.faces)): + loss_angles += (nnf.mse_loss(angles[i], self.angles[i])) + return loss_angles + + + + diff --git a/code/main.py b/code/main.py new file mode 100644 index 0000000000000000000000000000000000000000..be77f4aebcc49a6159a7c01f9d7c68eecda9abfa --- /dev/null +++ b/code/main.py @@ -0,0 +1,185 @@ +from typing import Mapping +import os +from tqdm import tqdm +from easydict import EasyDict as edict +import matplotlib.pyplot as plt +import torch +from torch.optim.lr_scheduler import LambdaLR +import pydiffvg +import save_svg +from losses import SDSLoss, ToneLoss, ConformalLoss +from config import set_config +from utils import ( + check_and_create_dir, + get_data_augs, + save_image, + preprocess, + learning_rate_decay, + combine_word, + create_video) +import wandb +import warnings +warnings.filterwarnings("ignore") + +pydiffvg.set_print_timing(False) +gamma = 1.0 + + +def init_shapes(svg_path, trainable: Mapping[str, bool]): + + svg = f'{svg_path}.svg' + canvas_width, canvas_height, shapes_init, shape_groups_init = pydiffvg.svg_to_scene(svg) + + parameters = edict() + + # path points + if trainable.point: + parameters.point = [] + for path in shapes_init: + path.points.requires_grad = True + parameters.point.append(path.points) + + return shapes_init, shape_groups_init, parameters + + +if __name__ == "__main__": + + cfg = set_config() + + dir_path = "/content/Word-As-Image/output_svgs" + os.makedirs(dir_path, exist_ok=True) + + # use GPU if available + pydiffvg.set_use_gpu(torch.cuda.is_available()) + device = pydiffvg.get_device() + + print("preprocessing") + preprocess(cfg.font, cfg.word, cfg.optimized_letter, cfg.level_of_cc) + + if cfg.loss.use_sds_loss: + sds_loss = SDSLoss(cfg, device) + + h, w = cfg.render_size, cfg.render_size + + data_augs = get_data_augs(cfg.cut_size) + + render = pydiffvg.RenderFunction.apply + + # initialize shape + print('initializing shape') + shapes, shape_groups, parameters = init_shapes(svg_path=cfg.target, trainable=cfg.trainable) + + scene_args = pydiffvg.RenderFunction.serialize_scene(w, h, shapes, shape_groups) + img_init = render(w, h, 2, 2, 0, None, *scene_args) + img_init = img_init[:, :, 3:4] * img_init[:, :, :3] + \ + torch.ones(img_init.shape[0], img_init.shape[1], 3, device=device) * (1 - img_init[:, :, 3:4]) + img_init = img_init[:, :, :3] + if cfg.use_wandb: + plt.imshow(img_init.detach().cpu()) + wandb.log({"init": wandb.Image(plt)}, step=0) + plt.close() + + if cfg.loss.tone.use_tone_loss: + tone_loss = ToneLoss(cfg) + tone_loss.set_image_init(img_init) + + if cfg.save.init: + print('saving init') + filename = os.path.join( + cfg.experiment_dir, "svg-init", "init.svg") + check_and_create_dir(filename) + save_svg.save_svg(filename, w, h, shapes, shape_groups) + + num_iter = cfg.num_iter + pg = [{'params': parameters["point"], 'lr': cfg.lr_base["point"]}] + optim = torch.optim.Adam(pg, betas=(0.9, 0.9), eps=1e-6) + + if cfg.loss.conformal.use_conformal_loss: + conformal_loss = ConformalLoss(parameters, device, cfg.optimized_letter, shape_groups) + + lr_lambda = lambda step: learning_rate_decay(step, cfg.lr.lr_init, cfg.lr.lr_final, num_iter, + lr_delay_steps=cfg.lr.lr_delay_steps, + lr_delay_mult=cfg.lr.lr_delay_mult) / cfg.lr.lr_init + + scheduler = LambdaLR(optim, lr_lambda=lr_lambda, last_epoch=-1) # lr.base * lrlambda_f + + print("start training") + # training loop + t_range = tqdm(range(num_iter)) + for step in t_range: + if cfg.use_wandb: + wandb.log({"learning_rate": optim.param_groups[0]['lr']}, step=step) + optim.zero_grad() + + # render image + scene_args = pydiffvg.RenderFunction.serialize_scene(w, h, shapes, shape_groups) + img = render(w, h, 2, 2, step, None, *scene_args) + + # compose image with white background + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + + if cfg.save.video and (step % cfg.save.video_frame_freq == 0 or step == num_iter - 1): + save_image(img, os.path.join(cfg.experiment_dir, "video-png", f"iter{step:04d}.png"), gamma) + filename = os.path.join( + cfg.experiment_dir, "video-svg", f"iter{step:04d}.svg") + check_and_create_dir(filename) + save_svg.save_svg( + filename, w, h, shapes, shape_groups) + if cfg.use_wandb: + plt.imshow(img.detach().cpu()) + wandb.log({"img": wandb.Image(plt)}, step=step) + plt.close() + + x = img.unsqueeze(0).permute(0, 3, 1, 2) # HWC -> NCHW + x = x.repeat(cfg.batch_size, 1, 1, 1) + x_aug = data_augs.forward(x) + + # compute diffusion loss per pixel + loss = sds_loss(x_aug) + if cfg.use_wandb: + wandb.log({"sds_loss": loss.item()}, step=step) + + if cfg.loss.tone.use_tone_loss: + tone_loss_res = tone_loss(x, step) + if cfg.use_wandb: + wandb.log({"dist_loss": tone_loss_res}, step=step) + loss = loss + tone_loss_res + + if cfg.loss.conformal.use_conformal_loss: + loss_angles = conformal_loss() + loss_angles = cfg.loss.conformal.angeles_w * loss_angles + if cfg.use_wandb: + wandb.log({"loss_angles": loss_angles}, step=step) + loss = loss + loss_angles + + t_range.set_postfix({'loss': loss.item()}) + loss.backward() + optim.step() + scheduler.step() + + filename = os.path.join( + cfg.experiment_dir, "output-svg", "output.svg") + check_and_create_dir(filename) + save_svg.save_svg( + filename, w, h, shapes, shape_groups) + + combine_word(cfg.word, cfg.optimized_letter, cfg.font, cfg.experiment_dir) + + if cfg.save.image: + filename = os.path.join( + cfg.experiment_dir, "output-png", "output.png") + check_and_create_dir(filename) + imshow = img.detach().cpu() + pydiffvg.imwrite(imshow, filename, gamma=gamma) + if cfg.use_wandb: + plt.imshow(img.detach().cpu()) + wandb.log({"img": wandb.Image(plt)}, step=step) + plt.close() + + if cfg.save.video: + print("saving video") + create_video(cfg.num_iter, cfg.experiment_dir, cfg.save.video_frame_freq) + + if cfg.use_wandb: + wandb.finish() diff --git a/code/save_svg.py b/code/save_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..6479e18064f354be3daa14554aef525aa8610dca --- /dev/null +++ b/code/save_svg.py @@ -0,0 +1,155 @@ +import torch +import pydiffvg +import xml.etree.ElementTree as etree +from xml.dom import minidom +def prettify(elem): + """Return a pretty-printed XML string for the Element. + """ + rough_string = etree.tostring(elem, 'utf-8') + reparsed = minidom.parseString(rough_string) + return reparsed.toprettyxml(indent=" ") +def save_svg(filename, width, height, shapes, shape_groups, use_gamma = False, background=None): + root = etree.Element('svg') + root.set('version', '1.1') + root.set('xmlns', 'http://www.w3.org/2000/svg') + root.set('width', str(width)) + root.set('height', str(height)) + if background is not None: + print(f"setting background to {background}") + root.set('style', str(background)) + defs = etree.SubElement(root, 'defs') + g = etree.SubElement(root, 'g') + if use_gamma: + f = etree.SubElement(defs, 'filter') + f.set('id', 'gamma') + f.set('x', '0') + f.set('y', '0') + f.set('width', '100%') + f.set('height', '100%') + gamma = etree.SubElement(f, 'feComponentTransfer') + gamma.set('color-interpolation-filters', 'sRGB') + feFuncR = etree.SubElement(gamma, 'feFuncR') + feFuncR.set('type', 'gamma') + feFuncR.set('amplitude', str(1)) + feFuncR.set('exponent', str(1/2.2)) + feFuncG = etree.SubElement(gamma, 'feFuncG') + feFuncG.set('type', 'gamma') + feFuncG.set('amplitude', str(1)) + feFuncG.set('exponent', str(1/2.2)) + feFuncB = etree.SubElement(gamma, 'feFuncB') + feFuncB.set('type', 'gamma') + feFuncB.set('amplitude', str(1)) + feFuncB.set('exponent', str(1/2.2)) + feFuncA = etree.SubElement(gamma, 'feFuncA') + feFuncA.set('type', 'gamma') + feFuncA.set('amplitude', str(1)) + feFuncA.set('exponent', str(1/2.2)) + g.set('style', 'filter:url(#gamma)') + # Store color + for i, shape_group in enumerate(shape_groups): + def add_color(shape_color, name): + if isinstance(shape_color, pydiffvg.LinearGradient): + lg = shape_color + color = etree.SubElement(defs, 'linearGradient') + color.set('id', name) + color.set('x1', str(lg.begin[0].item()/width)) + color.set('y1', str(lg.begin[1].item()/height)) + color.set('x2', str(lg.end[0].item()/width)) + color.set('y2', str(lg.end[1].item()/height)) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', str(offsets[j])) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + stop.set('stop-opacity', '{}'.format(c[3])) + if isinstance(shape_color, pydiffvg.RadialGradient): + lg = shape_color + color = etree.SubElement(defs, 'radialGradient') + color.set('id', name) + color.set('cx', str(lg.center[0].item()/width)) + color.set('cy', str(lg.center[1].item()/height)) + # this only support width=height + color.set('r', str(lg.radius[0].item()/width)) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', str(offsets[j])) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + stop.set('stop-opacity', '{}'.format(c[3])) + if shape_group.fill_color is not None: + add_color(shape_group.fill_color, 'shape_{}_fill'.format(i)) + if shape_group.stroke_color is not None: + add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i)) + for i, shape_group in enumerate(shape_groups): + # shape = shapes[shape_group.shape_ids[0]] + for j,id in enumerate(shape_group.shape_ids): + shape = shapes[id] + if isinstance(shape, pydiffvg.Path): + if j == 0: + shape_node = etree.SubElement(g, 'path') + path_str = '' + # shape_node = etree.SubElement(g, 'path') + num_segments = shape.num_control_points.shape[0] + num_control_points = shape.num_control_points.data.cpu().numpy() + points = shape.points.data.cpu().numpy() + num_points = shape.points.shape[0] + path_str += 'M {} {}'.format(points[0, 0], points[0, 1]) + point_id = 1 + for j in range(0, num_segments): + if num_control_points[j] == 0: + p = point_id % num_points + path_str += ' L {} {}'.format(\ + points[p, 0], points[p, 1]) + point_id += 1 + elif num_control_points[j] == 1: + p1 = (point_id + 1) % num_points + path_str += ' Q {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[p1, 0], points[p1, 1]) + point_id += 2 + elif num_control_points[j] == 2: + p2 = (point_id + 2) % num_points + path_str += ' C {} {} {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[point_id + 1, 0], points[point_id + 1, 1], + points[p2, 0], points[p2, 1]) + point_id += 3 + else: + assert(False) + # shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item())) + shape_node.set('stroke-width', str(0)) # no strokes + if shape_group.fill_color is not None: + if isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) + elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): + shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) + else: + c = shape_group.fill_color.data.cpu().numpy() + shape_node.set('fill', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('opacity', str(c[3])) + else: + shape_node.set('fill', 'none') + if shape_group.stroke_color is not None: + if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) + else: + c = shape_group.stroke_color.data.cpu().numpy() + shape_node.set('stroke', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('stroke-opacity', str(c[3])) + shape_node.set('stroke-linecap', 'round') + shape_node.set('stroke-linejoin', 'round') + + shape_node.set('d', path_str) + + with open(filename, "w") as f: + f.write(prettify(root)) diff --git a/code/ttf.py b/code/ttf.py new file mode 100644 index 0000000000000000000000000000000000000000..97988ab78c6847de454caa51d8c705ded152259c --- /dev/null +++ b/code/ttf.py @@ -0,0 +1,265 @@ +from importlib import reload +import os +import numpy as np +import bezier +import freetype as ft +import pydiffvg +import torch +import save_svg + +device = torch.device("cuda" if ( + torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu") + +reload(bezier) + +def fix_single_svg(svg_path, all_word=False): + target_h_letter = 360 + target_canvas_width, target_canvas_height = 600, 600 + + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_path) + + letter_h = canvas_height + letter_w = canvas_width + + if all_word: + if letter_w > letter_h: + scale_canvas_w = target_h_letter / letter_w + hsize = int(letter_h * scale_canvas_w) + scale_canvas_h = hsize / letter_h + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + target_h_letter + + w_min, w_max = min([torch.min(p.points[:, 0]) for p in shapes]), max([torch.max(p.points[:, 0]) for p in shapes]) + h_min, h_max = min([torch.min(p.points[:, 1]) for p in shapes]), max([torch.max(p.points[:, 1]) for p in shapes]) + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] + target_canvas_width/2 - int(w_min + (w_max - w_min) / 2) + p.points[:, 1] = p.points[:, 1] + target_canvas_height/2 - int(h_min + (h_max - h_min) / 2) + + output_path = f"{svg_path[:-4]}_scaled.svg" + save_svg.save_svg(output_path, target_canvas_width, target_canvas_height, shapes, shape_groups) + + +def normalize_letter_size(dest_path, font, txt): + fontname = os.path.splitext(os.path.basename(font))[0] + for i, c in enumerate(txt): + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + fix_single_svg(fname) + + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + fix_single_svg(fname, all_word=True) + + +def glyph_to_cubics(face, x=0): + ''' Convert current font face glyph to cubic beziers''' + + def linear_to_cubic(Q): + a, b = Q + return [a + (b - a) * t for t in np.linspace(0, 1, 4)] + + def quadratic_to_cubic(Q): + return [Q[0], + Q[0] + (2 / 3) * (Q[1] - Q[0]), + Q[2] + (2 / 3) * (Q[1] - Q[2]), + Q[2]] + + beziers = [] + pt = lambda p: np.array([p.x + x, -p.y]) # Flipping here since freetype has y-up + last = lambda: beziers[-1][-1] + + def move_to(a, beziers): + beziers.append([pt(a)]) + + def line_to(a, beziers): + Q = linear_to_cubic([last(), pt(a)]) + beziers[-1] += Q[1:] + + def conic_to(a, b, beziers): + Q = quadratic_to_cubic([last(), pt(a), pt(b)]) + beziers[-1] += Q[1:] + + def cubic_to(a, b, c, beziers): + beziers[-1] += [pt(a), pt(b), pt(c)] + + face.glyph.outline.decompose(beziers, move_to=move_to, line_to=line_to, conic_to=conic_to, cubic_to=cubic_to) + beziers = [np.array(C).astype(float) for C in beziers] + return beziers + + +def font_string_to_beziers(font, txt, size=30, spacing=1.0, merge=True, target_control=None): + ''' Load a font and convert the outlines for a given string to cubic bezier curves, + if merge is True, simply return a list of all bezier curves, + otherwise return a list of lists with the bezier curves for each glyph''' + + face = ft.Face(font) + face.set_char_size(64 * size) + slot = face.glyph + + x = 0 + beziers = [] + previous = 0 + for c in txt: + face.load_char(c, ft.FT_LOAD_DEFAULT | ft.FT_LOAD_NO_BITMAP) + bez = glyph_to_cubics(face, x) + + # Check number of control points if desired + if target_control is not None: + if c in target_control.keys(): + nctrl = np.sum([len(C) for C in bez]) + while nctrl < target_control[c]: + longest = np.max( + sum([[bezier.approx_arc_length(b) for b in bezier.chain_to_beziers(C)] for C in bez], [])) + thresh = longest * 0.5 + bez = [bezier.subdivide_bezier_chain(C, thresh) for C in bez] + nctrl = np.sum([len(C) for C in bez]) + print(nctrl) + + if merge: + beziers += bez + else: + beziers.append(bez) + + kerning = face.get_kerning(previous, c) + x += (slot.advance.x + kerning.x) * spacing + previous = c + + return beziers + + +def bezier_chain_to_commands(C, closed=True): + curves = bezier.chain_to_beziers(C) + cmds = 'M %f %f ' % (C[0][0], C[0][1]) + n = len(curves) + for i, bez in enumerate(curves): + if i == n - 1 and closed: + cmds += 'C %f %f %f %f %f %fz ' % (*bez[1], *bez[2], *bez[3]) + else: + cmds += 'C %f %f %f %f %f %f ' % (*bez[1], *bez[2], *bez[3]) + return cmds + + +def count_cp(file_name, font_name): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(file_name) + p_counter = 0 + for path in shapes: + p_counter += path.points.shape[0] + print(f"TOTAL CP: [{p_counter}]") + return p_counter + + +def write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path): + cmds = '' + svg = header + + path = '\n' + svg += path + '\n' + + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + f = open(fname, 'w') + f.write(svg) + f.close() + return fname, path + + +def font_string_to_svgs(dest_path, font, txt, size=30, spacing=1.0, target_control=None, subdivision_thresh=None): + + fontname = os.path.splitext(os.path.basename(font))[0] + glyph_beziers = font_string_to_beziers(font, txt, size, spacing, merge=False, target_control=target_control) + if not os.path.isdir(dest_path): + os.mkdir(dest_path) + # Compute boundig box + points = np.vstack(sum(glyph_beziers, [])) + lt = np.min(points, axis=0) + rb = np.max(points, axis=0) + size = rb - lt + + sizestr = 'width="%.1f" height="%.1f"' % (size[0], size[1]) + boxstr = ' viewBox="%.1f %.1f %.1f %.1f"' % (lt[0], lt[1], size[0], size[1]) + header = ''' +\n' + + # Save global svg + svg_all += '\n' + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + f = open(fname, 'w') + f.write(svg_all) + f.close() + + +if __name__ == '__main__': + + fonts = ["KaushanScript-Regular"] + level_of_cc = 1 + + if level_of_cc == 0: + target_cp = None + + else: + target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, + "E": 120, "F": 120, "G": 120, "H": 120, + "I": 35, "J": 80, "K": 100, "L": 80, + "M": 100, "N": 100, "O": 100, "P": 120, + "Q": 120, "R": 130, "S": 110, "T": 90, + "U": 100, "V": 100, "W": 100, "X": 130, + "Y": 120, "Z": 120, + "a": 120, "b": 120, "c": 100, "d": 100, + "e": 120, "f": 120, "g": 120, "h": 120, + "i": 35, "j": 80, "k": 100, "l": 80, + "m": 100, "n": 100, "o": 100, "p": 120, + "q": 120, "r": 130, "s": 110, "t": 90, + "u": 100, "v": 100, "w": 100, "x": 130, + "y": 120, "z": 120 + } + + target_cp = {k: v * level_of_cc for k, v in target_cp.items()} + + for f in fonts: + print(f"======= {f} =======") + font_path = f"data/fonts/{f}.ttf" + output_path = f"data/init" + txt = "BUNNY" + subdivision_thresh = None + font_string_to_svgs(output_path, font_path, txt, target_control=target_cp, + subdivision_thresh=subdivision_thresh) + normalize_letter_size(output_path, font_path, txt) + + print("DONE") + + + + diff --git a/code/utils.py b/code/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e09b9c54f4b72de4d0227dd12feacd200010a817 --- /dev/null +++ b/code/utils.py @@ -0,0 +1,225 @@ +import collections.abc +import os +import os.path as osp +from torch import nn +import kornia.augmentation as K +import pydiffvg +import save_svg +import cv2 +from ttf import font_string_to_svgs, normalize_letter_size +import torch +import numpy as np + + +def edict_2_dict(x): + if isinstance(x, dict): + xnew = {} + for k in x: + xnew[k] = edict_2_dict(x[k]) + return xnew + elif isinstance(x, list): + xnew = [] + for i in range(len(x)): + xnew.append( edict_2_dict(x[i])) + return xnew + else: + return x + + +def check_and_create_dir(path): + pathdir = osp.split(path)[0] + if osp.isdir(pathdir): + pass + else: + os.makedirs(pathdir) + + +def update(d, u): + """https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth""" + for k, v in u.items(): + if isinstance(v, collections.abc.Mapping): + d[k] = update(d.get(k, {}), v) + else: + d[k] = v + return d + + +def preprocess(font, word, letter, level_of_cc=1): + + if level_of_cc == 0: + target_cp = None + else: + target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, + "E": 120, "F": 120, "G": 120, "H": 120, + "I": 35, "J": 80, "K": 100, "L": 80, + "M": 100, "N": 100, "O": 100, "P": 120, + "Q": 120, "R": 130, "S": 110, "T": 90, + "U": 100, "V": 100, "W": 100, "X": 130, + "Y": 120, "Z": 120, + "a": 120, "b": 120, "c": 100, "d": 100, + "e": 120, "f": 120, "g": 120, "h": 120, + "i": 35, "j": 80, "k": 100, "l": 80, + "m": 100, "n": 100, "o": 100, "p": 120, + "q": 120, "r": 130, "s": 110, "t": 90, + "u": 100, "v": 100, "w": 100, "x": 130, + "y": 120, "z": 120 + } + target_cp = {k: v * level_of_cc for k, v in target_cp.items()} + + print(f"======= {font} =======") + font_path = f"code/data/fonts/{font}.ttf" + init_path = f"code/data/init" + subdivision_thresh = None + font_string_to_svgs(init_path, font_path, word, target_control=target_cp, + subdivision_thresh=subdivision_thresh) + normalize_letter_size(init_path, font_path, word) + + # optimaize two adjacent letters + if len(letter) > 1: + subdivision_thresh = None + font_string_to_svgs(init_path, font_path, letter, target_control=target_cp, + subdivision_thresh=subdivision_thresh) + normalize_letter_size(init_path, font_path, letter) + + print("Done preprocess") + + +def get_data_augs(cut_size): + augmentations = [] + augmentations.append(K.RandomPerspective(distortion_scale=0.5, p=0.7)) + augmentations.append(K.RandomCrop(size=(cut_size, cut_size), pad_if_needed=True, padding_mode='reflect', p=1.0)) + return nn.Sequential(*augmentations) + + +'''pytorch adaptation of https://github.com/google/mipnerf''' +def learning_rate_decay(step, + lr_init, + lr_final, + max_steps, + lr_delay_steps=0, + lr_delay_mult=1): + """Continuous learning rate decay function. + The returned rate is lr_init when step=0 and lr_final when step=max_steps, and + is log-linearly interpolated elsewhere (equivalent to exponential decay). + If lr_delay_steps>0 then the learning rate will be scaled by some smooth + function of lr_delay_mult, such that the initial learning rate is + lr_init*lr_delay_mult at the beginning of optimization but will be eased back + to the normal learning rate when steps>lr_delay_steps. + Args: + step: int, the current optimization step. + lr_init: float, the initial learning rate. + lr_final: float, the final learning rate. + max_steps: int, the number of steps during optimization. + lr_delay_steps: int, the number of steps to delay the full learning rate. + lr_delay_mult: float, the multiplier on the rate when delaying it. + Returns: + lr: the learning for current step 'step'. + """ + if lr_delay_steps > 0: + # A kind of reverse cosine decay. + delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( + 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)) + else: + delay_rate = 1. + t = np.clip(step / max_steps, 0, 1) + log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) + return delay_rate * log_lerp + + + +def save_image(img, filename, gamma=1): + check_and_create_dir(filename) + imshow = img.detach().cpu() + pydiffvg.imwrite(imshow, filename, gamma=gamma) + + +def get_letter_ids(letter, word, shape_groups): + for group, l in zip(shape_groups, word): + if l == letter: + return group.shape_ids + + +def combine_word(word, letter, font, experiment_dir): + word_svg_scaled = f"./code/data/init/{font}_{word}_scaled.svg" + canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled) + letter_ids = [] + for l in letter: + letter_ids += get_letter_ids(l, word, shape_groups_word) + + w_min, w_max = min([torch.min(shapes_word[ids].points[:, 0]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 0]) for ids in letter_ids]) + h_min, h_max = min([torch.min(shapes_word[ids].points[:, 1]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 1]) for ids in letter_ids]) + + c_w = (-w_min + w_max) / 2 + c_h = (-h_min + h_max) / 2 + + svg_result = os.path.join(experiment_dir, "output-svg", "output.svg") + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_result) + + out_w_min, out_w_max = min([torch.min(p.points[:, 0]) for p in shapes]), max( + [torch.max(p.points[:, 0]) for p in shapes]) + out_h_min, out_h_max = min([torch.min(p.points[:, 1]) for p in shapes]), max( + [torch.max(p.points[:, 1]) for p in shapes]) + + out_c_w = (-out_w_min + out_w_max) / 2 + out_c_h = (-out_h_min + out_h_max) / 2 + + scale_canvas_w = (w_max - w_min) / (out_w_max - out_w_min) + scale_canvas_h = (h_max - h_min) / (out_h_max - out_h_min) + + if scale_canvas_h > scale_canvas_w: + wsize = int((out_w_max - out_w_min) * scale_canvas_h) + scale_canvas_w = wsize / (out_w_max - out_w_min) + shift_w = -out_c_w * scale_canvas_w + c_w + else: + hsize = int((out_h_max - out_h_min) * scale_canvas_w) + scale_canvas_h = hsize / (out_h_max - out_h_min) + shift_h = -out_c_h * scale_canvas_h + c_h + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + if scale_canvas_h > scale_canvas_w: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + shift_w + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + else: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + shift_h + + for j, s in enumerate(letter_ids): + shapes_word[s] = shapes[j] + + # save_svg.save_svg( + # f"{experiment_dir}/{font}_{word}_{letter}.svg", canvas_width, canvas_height, shapes_word, + # shape_groups_word) + save_svg.save_svg( + f"/content/Word-As-Image/output_svgs/{word}.svg", canvas_width, canvas_height, shapes_word, + shape_groups_word) + + + render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, canvas_height, shapes_word, shape_groups_word) + img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device="cuda:0") * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + save_image(img, f"{experiment_dir}/{font}_{word}_{letter}.png") + + +def create_video(num_iter, experiment_dir, video_frame_freq): + img_array = [] + for ii in range(0, num_iter): + if ii % video_frame_freq == 0 or ii == num_iter - 1: + filename = os.path.join( + experiment_dir, "video-png", f"iter{ii:04d}.png") + img = cv2.imread(filename) + img_array.append(img) + + video_name = os.path.join( + experiment_dir, "video.mp4") + check_and_create_dir(video_name) + out = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (600, 600)) + for iii in range(len(img_array)): + out.write(img_array[iii]) + out.release() diff --git a/coming_soon.png b/coming_soon.png new file mode 100644 index 0000000000000000000000000000000000000000..3ccd5663f61360556506dc3e7e01c32672e25be2 Binary files /dev/null and b/coming_soon.png differ diff --git a/diffvg/.gitignore b/diffvg/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bb2ce513e436401757b39532494414105ae0469b --- /dev/null +++ b/diffvg/.gitignore @@ -0,0 +1,10 @@ +build +apps/results +apps/files +apps/__pycache__ +compile_commands.json +.vimrc +diffvg.egg-info +dist +__pycache__ +.DS_Store diff --git a/diffvg/.gitmodules b/diffvg/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..45e0456ef0aa82aa4a0379a74ed665710ef3510b --- /dev/null +++ b/diffvg/.gitmodules @@ -0,0 +1,6 @@ +[submodule "pybind11"] + path = pybind11 + url = https://github.com/pybind/pybind11.git +[submodule "thrust"] + path = thrust + url = https://github.com/thrust/thrust.git diff --git a/diffvg/CMakeLists.txt b/diffvg/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..233e4be19e6dd4f2a209c5fa78867feb7f4005b7 --- /dev/null +++ b/diffvg/CMakeLists.txt @@ -0,0 +1,140 @@ +cmake_minimum_required(VERSION 3.12) + +project(diffvg VERSION 0.0.1 DESCRIPTION "Differentiable Vector Graphics") + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/") +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +if(WIN32) + find_package(Python 3.6 COMPONENTS Development REQUIRED) +else() + find_package(Python 3.7 COMPONENTS Development REQUIRED) +endif() +add_subdirectory(pybind11) + +option(DIFFVG_CUDA "Build diffvg with GPU code path?" ON) + +if(DIFFVG_CUDA) + message(STATUS "Build with CUDA support") + find_package(CUDA 10 REQUIRED) + set(CMAKE_CUDA_STANDARD 11) + if(NOT WIN32) + # Hack: for some reason the line above doesn't work on some Linux systems. + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11") + #set(CUDA_NVCC_FLAGS_DEBUG "-g -G") + endif() +else() + message(STATUS "Build without CUDA support") + find_package(Thrust REQUIRED) +endif() + +# include_directories(${CMAKE_SOURCE_DIR}/pybind11/include) +include_directories(${PYTHON_INCLUDE_PATH}) +find_package(PythonLibs REQUIRED) +include_directories(${PYTHON_INCLUDE_PATH}) +include_directories(${PYTHON_INCLUDE_DIRS}) +include_directories(pybind11/include) +if(DIFFVG_CUDA) + link_directories(${CUDA_LIBRARIES}) +else() + include_directories(${THRUST_INCLUDE_DIR}) +endif() + +if(NOT MSVC) + # These compile definitions are not meaningful for MSVC + add_compile_options(-Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas) +else() + add_compile_options(/Wall /Zi) + add_link_options(/DEBUG) +endif() + +if(NOT DIFFVG_CUDA) + add_compile_options("-DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CPP") +endif() + +set(SRCS atomic.h + color.h + cdf.h + cuda_utils.h + diffvg.h + edge_query.h + filter.h + matrix.h + parallel.h + pcg.h + ptr.h + sample_boundary.h + scene.h + shape.h + solve.h + vector.h + within_distance.h + winding_number.h + atomic.cpp + color.cpp + diffvg.cpp + parallel.cpp + scene.cpp + shape.cpp) + +if(DIFFVG_CUDA) + add_compile_definitions(COMPILE_WITH_CUDA) + set_source_files_properties( + diffvg.cpp + scene.cpp + PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ) + + cuda_add_library(diffvg MODULE ${SRCS}) +else() + add_library(diffvg MODULE ${SRCS}) +endif() + +if(APPLE) + # The "-undefined dynamic_lookup" is a hack for systems with + # multiple Python installed. If we link a particular Python version + # here, and we import it with a different Python version later. + # likely a segmentation fault. + # The solution for Linux Mac OS machines, as mentioned in + # https://github.com/pybind/pybind11/blob/master/tools/pybind11Tools.cmake + # is to not link against Python library at all and resolve the symbols + # at compile time. + set(DYNAMIC_LOOKUP "-undefined dynamic_lookup") +endif() + +target_link_libraries(diffvg ${DYNAMIC_LOOKUP}) + +if(WIN32) + # See: https://pybind11.readthedocs.io/en/master/compiling.html#advanced-interface-library-target + target_link_libraries(diffvg pybind11::module) + set_target_properties(diffvg PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") +endif() + +set_target_properties(diffvg PROPERTIES SKIP_BUILD_RPATH FALSE) +set_target_properties(diffvg PROPERTIES BUILD_WITH_INSTALL_RPATH TRUE) +if(UNIX AND NOT APPLE) + set_target_properties(diffvg PROPERTIES INSTALL_RPATH "$ORIGIN") +elseif(APPLE) + set_target_properties(diffvg PROPERTIES INSTALL_RPATH "@loader_path") +endif() + +set_property(TARGET diffvg PROPERTY CXX_STANDARD 11) +set_target_properties(diffvg PROPERTIES PREFIX "") +# Still enable assertion in release mode +string( REPLACE "/DNDEBUG" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "/DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "/DNDEBUG" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "/DNDEBUG" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + +if(NOT WIN32) + find_package(TensorFlow) + if(TensorFlow_FOUND) + add_subdirectory(pydiffvg_tensorflow/custom_ops) + else() + message(INFO " Building without TensorFlow support (not found)") + endif() +endif() diff --git a/diffvg/LICENSE b/diffvg/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/diffvg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/diffvg/README.md b/diffvg/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6c01273e21d6d6718236e5a2265e278f89286b1a --- /dev/null +++ b/diffvg/README.md @@ -0,0 +1,153 @@ +# diffvg +Differentiable Rasterizer for Vector Graphics +https://people.csail.mit.edu/tzumao/diffvg + +diffvg is a differentiable rasterizer for 2D vector graphics. See the webpage for more info. + +![teaser](https://user-images.githubusercontent.com/951021/92184822-2a0bc500-ee20-11ea-81a6-f26af2d120f4.jpg) + +![circle](https://user-images.githubusercontent.com/951021/63556018-0b2ddf80-c4f8-11e9-849c-b4ecfcb9a865.gif) +![ellipse](https://user-images.githubusercontent.com/951021/63556021-0ec16680-c4f8-11e9-8fc6-8b34de45b8be.gif) +![rect](https://user-images.githubusercontent.com/951021/63556028-12ed8400-c4f8-11e9-8072-81702c9193e1.gif) +![polygon](https://user-images.githubusercontent.com/951021/63980999-1e99f700-ca72-11e9-9786-1cba14d2d862.gif) +![curve](https://user-images.githubusercontent.com/951021/64042667-3d9e9480-cb17-11e9-88d8-2f7b9da8b8ab.gif) +![path](https://user-images.githubusercontent.com/951021/64070625-7a52b480-cc19-11e9-9380-eac02f56f693.gif) +![gradient](https://user-images.githubusercontent.com/951021/64898668-da475300-d63c-11e9-917a-825b94be0710.gif) +![circle_outline](https://user-images.githubusercontent.com/951021/65125594-84f7a280-d9aa-11e9-8bc4-669fd2eff2f4.gif) +![ellipse_transform](https://user-images.githubusercontent.com/951021/67149013-06b54700-f25b-11e9-91eb-a61171c6d4a4.gif) + +# Install +``` +git submodule update --init --recursive +conda install -y pytorch torchvision -c pytorch +conda install -y numpy +conda install -y scikit-image +conda install -y -c anaconda cmake +conda install -y -c conda-forge ffmpeg +pip install svgwrite +pip install svgpathtools +pip install cssutils +pip install numba +pip install torch-tools +pip install visdom +python setup.py install +``` +# Install using poetry + +## prerequisite +install python 3.7, poetry and ffmpeg + +``` +# install poetry (mac, linux) +curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - + +# install ffmpeg + +(macos) +brew install ffmpeg + +(linux) +sudo apt install ffmpeg + +or use conda +conda install -y -c conda-forge ffmpeg +``` + +## Install python packages + +``` +# install all python dependencies +poetry install + +# install pydiffvg +poetry run python setup.py install +``` + +Now to run the apps, just add `poetry run` before each of the commands below, e.g. + +``` +poetry run python single_circle.py +``` + +# Building in debug mode + +``` +python setup.py build --debug install +``` + +# Run +``` +cd apps +``` + +Optimizing a single circle to a target. +``` +python single_circle.py +``` + +Finite difference comparison. +``` +finite_difference_comp.py [-h] [--size_scale SIZE_SCALE] + [--clamping_factor CLAMPING_FACTOR] + [--use_prefiltering USE_PREFILTERING] + svg_file +``` +e.g., +``` +python finite_difference_comp.py imgs/tiger.svg +``` + +Interactive editor +``` +python svg_brush.py +``` + +Painterly rendering +``` +painterly_rendering.py [-h] [--num_paths NUM_PATHS] + [--max_width MAX_WIDTH] [--use_lpips_loss] + [--num_iter NUM_ITER] [--use_blob] + target +``` +e.g., +``` +python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 --use_lpips_loss +``` + +Image vectorization +``` +python refine_svg.py [-h] [--use_lpips_loss] [--num_iter NUM_ITER] svg target +``` +e.g., +``` +python refine_svg.py imgs/flower.svg imgs/flower.jpg +``` + +Seam carving +``` +python seam_carving.py [-h] [--svg SVG] [--optim_steps OPTIM_STEPS] +``` +e.g., +``` +python seam_carving.py imgs/hokusai.svg +``` + +Vector variational autoencoder & vector GAN: + +For the GAN models, see `apps/generative_models/train_gan.py`. Generate samples from a pretrained using `apps/generative_models/eval_gan.py`. + +For the VAE models, see `apps/generative_models/mnist_vae.py`. + +If you use diffvg in your academic work, please cite + +``` +@article{Li:2020:DVG, + title = {Differentiable Vector Graphics Rasterization for Editing and Learning}, + author = {Li, Tzu-Mao and Luk\'{a}\v{c}, Michal and Gharbi Micha\"{e}l and Jonathan Ragan-Kelley}, + journal = {ACM Trans. Graph. (Proc. SIGGRAPH Asia)}, + volume = {39}, + number = {6}, + pages = {193:1--193:15}, + year = {2020} +} +``` diff --git a/diffvg/aabb.h b/diffvg/aabb.h new file mode 100644 index 0000000000000000000000000000000000000000..c35968e113188e1503e61c1eff3ec346161cf025 --- /dev/null +++ b/diffvg/aabb.h @@ -0,0 +1,67 @@ +#pragma once + +#include "diffvg.h" +#include "cuda_utils.h" +#include "vector.h" +#include "matrix.h" + +struct AABB { + DEVICE + inline AABB(const Vector2f &p_min = Vector2f{infinity(), infinity()}, + const Vector2f &p_max = Vector2f{-infinity(), -infinity()}) + : p_min(p_min), p_max(p_max) {} + Vector2f p_min, p_max; +}; + +DEVICE +inline +AABB merge(const AABB &box, const Vector2f &p) { + return AABB{Vector2f{min(p.x, box.p_min.x), min(p.y, box.p_min.y)}, + Vector2f{max(p.x, box.p_max.x), max(p.y, box.p_max.y)}}; +} + +DEVICE +inline +AABB merge(const AABB &box0, const AABB &box1) { + return AABB{Vector2f{min(box0.p_min.x, box1.p_min.x), min(box0.p_min.y, box1.p_min.y)}, + Vector2f{max(box0.p_max.x, box1.p_max.x), max(box0.p_max.y, box1.p_max.y)}}; +} + +DEVICE +inline +bool inside(const AABB &box, const Vector2f &p) { + return p.x >= box.p_min.x && p.x <= box.p_max.x && + p.y >= box.p_min.y && p.y <= box.p_max.y; +} + +DEVICE +inline +bool inside(const AABB &box, const Vector2f &p, float radius) { + return p.x >= box.p_min.x - radius && p.x <= box.p_max.x + radius && + p.y >= box.p_min.y - radius && p.y <= box.p_max.y + radius; +} + +DEVICE +inline +AABB enlarge(const AABB &box, float width) { + return AABB{Vector2f{box.p_min.x - width, box.p_min.y - width}, + Vector2f{box.p_max.x + width, box.p_max.y + width}}; +} + +DEVICE +inline +AABB transform(const Matrix3x3f &xform, const AABB &box) { + auto ret = AABB(); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_min.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_max.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_min.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_max.y})); + return ret; +} + +DEVICE +inline +bool within_distance(const AABB &box, const Vector2f &pt, float r) { + return pt.x >= box.p_min.x - r && pt.x <= box.p_max.x + r && + pt.y >= box.p_min.y - r && pt.y <= box.p_max.y + r; +} diff --git a/diffvg/apps/.gitignore b/diffvg/apps/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fe18dd39ffca9bae2840152db8f46e517124431e --- /dev/null +++ b/diffvg/apps/.gitignore @@ -0,0 +1,3 @@ +mnist +data/sketchrnn_cat.npz +data diff --git a/diffvg/apps/Makefile b/diffvg/apps/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ecc25e446f1983da750280ea36dadf6c917c6f6d --- /dev/null +++ b/diffvg/apps/Makefile @@ -0,0 +1,10 @@ +SEAM_IMAGES=seaside2 sunset2 hokusai cat ice_cream +SEAM_OUT=results/seam_carving +SEAM_RESULTS=$(addsuffix /out.mp4,$(addprefix $(SEAM_OUT)/,$(SEAM_IMAGES))) + +all: $(SEAM_RESULTS) + echo $(SEAM_RESULTS) + +$(SEAM_OUT)/%/out.mp4: imgs/seamcarving/%.svg + python seam_carving.py --svg $^ + diff --git a/diffvg/apps/curve_subdivision.py b/diffvg/apps/curve_subdivision.py new file mode 100644 index 0000000000000000000000000000000000000000..7f03df1ba0e087a007b57e586f80883887cc7bf6 --- /dev/null +++ b/diffvg/apps/curve_subdivision.py @@ -0,0 +1,85 @@ +import svgpathtools +import numpy as np +import math + +def split_cubic(c, t): + c0, c1 = svgpathtools.split_bezier(c, t) + return svgpathtools.CubicBezier(c0[0], c0[1], c0[2], c0[3]), svgpathtools.CubicBezier(c1[0], c1[1], c1[2], c1[3]) + +def cubic_to_quadratic(curve): + # Best L2 approximation + m = (-curve.start + 3 * curve.control1 + 3 * curve.control2 - curve.end) / 4.0 + return svgpathtools.QuadraticBezier(curve.start, m, curve.end) + +def convert_and_write_svg(cubic, filename): + cubic_path = svgpathtools.Path(cubic) + cubic_ctrl = svgpathtools.Path(svgpathtools.Line(cubic.start, cubic.control1), + svgpathtools.Line(cubic.control1, cubic.control2), + svgpathtools.Line(cubic.control2, cubic.end)) + cubic_color = (50, 50, 200) + cubic_ctrl_color = (150, 150, 150) + + r = 4.0 + + paths = [cubic_path, cubic_ctrl] + colors = [cubic_color, cubic_ctrl_color] + dots = [cubic_path[0].start, cubic_path[0].control1, cubic_path[0].control2, cubic_path[0].end] + ncols = ['green', 'green', 'green', 'green'] + nradii = [r, r, r, r] + stroke_widths = [3.0, 1.5] + + def add_quadratic(q): + paths.append(q) + q_ctrl = svgpathtools.Path(svgpathtools.Line(q.start, q.control), + svgpathtools.Line(q.control, q.end)) + paths.append(q_ctrl) + colors.append((200, 50, 50)) # q_color + colors.append((150, 150, 150)) # q_ctrl_color + dots.append(q.start) + dots.append(q.control) + dots.append(q.end) + ncols.append('purple') + ncols.append('purple') + ncols.append('purple') + nradii.append(r) + nradii.append(r) + nradii.append(r) + stroke_widths.append(3.0) + stroke_widths.append(1.5) + + prec = 1.0 + queue = [cubic] + num_quadratics = 0 + while len(queue) > 0: + c = queue[-1] + queue = queue[:-1] + + # Criteria for conversion + # http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html + p = c.end - 3 * c.control2 + 3 * c.control1 - c.start + d = math.sqrt(p.real * p.real + p.imag * p.imag) * math.sqrt(3.0) / 36 + t = math.pow(1.0 / d, 1.0 / 3.0) + + if t < 1.0: + c0, c1 = split_cubic(c, 0.5) + queue.append(c0) + queue.append(c1) + else: + quadratic = cubic_to_quadratic(c) + print(quadratic) + add_quadratic(quadratic) + num_quadratics += 1 + print('num_quadratics:', num_quadratics) + + svgpathtools.wsvg(paths, + colors = colors, + stroke_widths = stroke_widths, + nodes = dots, + node_colors = ncols, + node_radii = nradii, + filename = filename) + +convert_and_write_svg(svgpathtools.CubicBezier(100+200j, 426+50j, 50+50j, 300+200j), + 'results/curve_subdivision/subdiv_curve0.svg') +convert_and_write_svg(svgpathtools.CubicBezier(100+200j, 427+50j, 50+50j, 300+200j), + 'results/curve_subdivision/subdiv_curve1.svg') diff --git a/diffvg/apps/finite_difference_comp.py b/diffvg/apps/finite_difference_comp.py new file mode 100644 index 0000000000000000000000000000000000000000..331f6d4bf94fbab535627055f027e6108fbfc98f --- /dev/null +++ b/diffvg/apps/finite_difference_comp.py @@ -0,0 +1,197 @@ +# python finite_difference_comp.py imgs/tiger.svg +# python finite_difference_comp.py --use_prefiltering True imgs/tiger.svg +# python finite_difference_comp.py imgs/boston.svg +# python finite_difference_comp.py --use_prefiltering True imgs/boston.svg +# python finite_difference_comp.py imgs/contour.svg +# python finite_difference_comp.py --use_prefiltering True imgs/contour.svg +# python finite_difference_comp.py --size_scale 0.5 --clamping_factor 0.05 imgs/hawaii.svg +# python finite_difference_comp.py --size_scale 0.5 --clamping_factor 0.05 --use_prefiltering True imgs/hawaii.svg +# python finite_difference_comp.py imgs/mcseem2.svg +# python finite_difference_comp.py --use_prefiltering True imgs/mcseem2.svg +# python finite_difference_comp.py imgs/reschart.svg +# python finite_difference_comp.py --use_prefiltering True imgs/reschart.svg + +import pydiffvg +import diffvg +from matplotlib import cm +import matplotlib.pyplot as plt +import argparse +import torch + +pydiffvg.set_print_timing(True) +#pydiffvg.set_use_gpu(False) + +def normalize(x, min_, max_): + range = max(abs(min_), abs(max_)) + return (x + range) / (2 * range) + +def main(args): + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_file) + + w = int(canvas_width * args.size_scale) + h = int(canvas_height * args.size_scale) + + print(w, h) + curve_counts = 0 + for s in shapes: + if isinstance(s, pydiffvg.Circle): + curve_counts += 1 + elif isinstance(s, pydiffvg.Ellipse): + curve_counts += 1 + elif isinstance(s, pydiffvg.Path): + curve_counts += len(s.num_control_points) + elif isinstance(s, pydiffvg.Polygon): + curve_counts += len(s.points) - 1 + if s.is_closed: + curve_counts += 1 + elif isinstance(s, pydiffvg.Rect): + curve_counts += 1 + print('curve_counts:', curve_counts) + + pfilter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)) + + use_prefiltering = args.use_prefiltering + print('use_prefiltering:', use_prefiltering) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + + num_samples_x = args.num_spp + num_samples_y = args.num_spp + if (use_prefiltering): + num_samples_x = 1 + num_samples_y = 1 + + render = pydiffvg.RenderFunction.apply + img = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/finite_difference_comp/img.png', gamma=1.0) + + epsilon = 0.1 + def perturb_scene(axis, epsilon): + for s in shapes: + if isinstance(s, pydiffvg.Circle): + s.center[axis] += epsilon + elif isinstance(s, pydiffvg.Ellipse): + s.center[axis] += epsilon + elif isinstance(s, pydiffvg.Path): + s.points[:, axis] += epsilon + elif isinstance(s, pydiffvg.Polygon): + s.points[:, axis] += epsilon + elif isinstance(s, pydiffvg.Rect): + s.p_min[axis] += epsilon + s.p_max[axis] += epsilon + for s in shape_groups: + if isinstance(s.fill_color, pydiffvg.LinearGradient): + s.fill_color.begin[axis] += epsilon + s.fill_color.end[axis] += epsilon + + perturb_scene(0, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + + perturb_scene(0, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + x_diff = (img0 - img1) / (2 * epsilon) + x_diff = x_diff.sum(axis = 2) + x_diff_max = x_diff.max() * args.clamping_factor + x_diff_min = x_diff.min() * args.clamping_factor + print(x_diff.max()) + print(x_diff.min()) + x_diff = cm.viridis(normalize(x_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/finite_x_diff.png', gamma=1.0) + + perturb_scene(0, epsilon) + + perturb_scene(1, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + + perturb_scene(1, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + y_diff = (img0 - img1) / (2 * epsilon) + y_diff = y_diff.sum(axis = 2) + y_diff_max = y_diff.max() * args.clamping_factor + y_diff_min = y_diff.min() * args.clamping_factor + y_diff = cm.viridis(normalize(y_diff, y_diff_min, y_diff_max).cpu().numpy()) + pydiffvg.imwrite(y_diff, 'results/finite_difference_comp/finite_y_diff.png', gamma=1.0) + perturb_scene(1, epsilon) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render_grad = pydiffvg.RenderFunction.render_grad + img_grad = render_grad(torch.ones(h, w, 4, device = pydiffvg.get_device()), + w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + print(img_grad[:, :, 0].max()) + print(img_grad[:, :, 0].min()) + x_diff = cm.viridis(normalize(img_grad[:, :, 0], x_diff_min, x_diff_max).cpu().numpy()) + y_diff = cm.viridis(normalize(img_grad[:, :, 1], y_diff_min, y_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/ours_x_diff.png', gamma=1.0) + pydiffvg.imwrite(y_diff, 'results/finite_difference_comp/ours_y_diff.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg_file", help="source SVG path") + parser.add_argument("--size_scale", type=float, default=1.0) + parser.add_argument("--clamping_factor", type=float, default=0.1) + parser.add_argument("--num_spp", type=int, default=4) + parser.add_argument("--use_prefiltering", type=bool, default=False) + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/gaussian_blur.py b/diffvg/apps/gaussian_blur.py new file mode 100644 index 0000000000000000000000000000000000000000..8d1480267597c2513236698d027302b9d0a02635 --- /dev/null +++ b/diffvg/apps/gaussian_blur.py @@ -0,0 +1,93 @@ +""" +""" +import os +import pydiffvg +import torch as th +import scipy.ndimage.filters as F + + +def render(canvas_width, canvas_height, shapes, shape_groups): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def main(): + pydiffvg.set_device(th.device('cuda:1')) + + # Load SVG + svg = os.path.join("imgs", "peppers.svg") + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(svg) + + # Save initial state + ref = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(ref.cpu(), 'results/gaussian_blur/init.png', gamma=2.2) + + target = F.gaussian_filter(ref.cpu().numpy(), [10, 10, 0]) + target = th.from_numpy(target).to(ref.device) + pydiffvg.imwrite(target.cpu(), 'results/gaussian_blur/target.png', gamma=2.2) + + # Collect variables to optimize + points_vars = [] + width_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + path.stroke_width.requires_grad = True + width_vars.append(path.stroke_width) + color_vars = [] + for group in shape_groups: + # do not optimize alpha + group.fill_color[..., :3].requires_grad = True + color_vars.append(group.fill_color) + + # Optimize + points_optim = th.optim.Adam(points_vars, lr=1.0) + width_optim = th.optim.Adam(width_vars, lr=1.0) + color_optim = th.optim.Adam(color_vars, lr=0.01) + + for t in range(20): + print('\niteration:', t) + points_optim.zero_grad() + width_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + img = render(canvas_width, canvas_height, shapes, shape_groups) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/gaussian_blur/iter_{}.png'.format(t), gamma=2.2) + loss = (img - target)[..., :3].pow(2).mean() + + print('alpha:', img[..., 3].mean().item()) + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + width_optim.step() + color_optim.step() + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + # Final render + img = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(img.cpu(), 'results/gaussian_blur/final.png', gamma=2.2) + + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/gaussian_blur/iter_%d.png", "-vb", "20M", + "results/gaussian_blur/out.mp4"]) + +if __name__ == "__main__": + main() diff --git a/diffvg/apps/generative_models/.gitignore b/diffvg/apps/generative_models/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..78fb1fcefe78e8a695ea479d1f738fac922cdea0 --- /dev/null +++ b/diffvg/apps/generative_models/.gitignore @@ -0,0 +1 @@ +.gdb_history diff --git a/diffvg/apps/generative_models/README.md b/diffvg/apps/generative_models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b987ee6290d752897558fe614d26cfb12d108c9 --- /dev/null +++ b/diffvg/apps/generative_models/README.md @@ -0,0 +1,5 @@ +# Usage + +For the GAN models, see `train_gan.py`. Generate samples from a pretrained using `eval_gan.py` + +For the VAE models, see `mnist_vae.py`. diff --git a/diffvg/apps/generative_models/__init__.py b/diffvg/apps/generative_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffvg/apps/generative_models/data.py b/diffvg/apps/generative_models/data.py new file mode 100644 index 0000000000000000000000000000000000000000..bdbac65baeaaf7945530cdaf78be7d34cf2fc11a --- /dev/null +++ b/diffvg/apps/generative_models/data.py @@ -0,0 +1,229 @@ +import os +import time +import torch as th +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +import imageio + +import ttools +import rendering + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +DATA = os.path.join(BASE_DIR, "data") + +LOG = ttools.get_logger(__name__) + + +class QuickDrawImageDataset(th.utils.data.Dataset): + BASE_DATA_URL = \ + "https://console.cloud.google.com/storage/browser/_details/quickdraw_dataset/full/numpy_bitmap/cat.npy" + """ + Args: + spatial_limit(int): maximum spatial extent in pixels. + """ + def __init__(self, imsize, train=True): + super(QuickDrawImageDataset, self).__init__() + file = os.path.join(DATA, "cat.npy") + + self.imsize = imsize + + if not os.path.exists(file): + msg = "Dataset file %s does not exist, please download" + " it from %s" % (file, QuickDrawImageDataset.BASE_DATA_URL) + LOG.error(msg) + raise RuntimeError(msg) + + self.data = np.load(file, allow_pickle=True, encoding="latin1") + + def __len__(self): + return self.data.shape[0] + + def __getitem__(self, idx): + im = np.reshape(self.data[idx], (1, 1, 28, 28)) + im = th.from_numpy(im).float() / 255.0 + im = th.nn.functional.interpolate(im, size=(self.imsize, self.imsize)) + + # Bring it to [-1, 1] + im = th.clamp(im, 0, 1) + im -= 0.5 + im /= 0.5 + + return im.squeeze(0) + + +class QuickDrawDataset(th.utils.data.Dataset): + BASE_DATA_URL = \ + "https://storage.cloud.google.com/quickdraw_dataset/sketchrnn" + + """ + Args: + spatial_limit(int): maximum spatial extent in pixels. + """ + def __init__(self, dataset, mode="train", + max_seq_length=250, + spatial_limit=1000): + super(QuickDrawDataset, self).__init__() + file = os.path.join(DATA, "sketchrnn_"+dataset) + remote = os.path.join(QuickDrawDataset.BASE_DATA_URL, dataset) + + self.max_seq_length = max_seq_length + self.spatial_limit = spatial_limit + + if mode not in ["train", "test", "valid"]: + return ValueError("Only allowed data mode are 'train' and 'test'," + " 'valid'.") + + if not os.path.exists(file): + msg = "Dataset file %s does not exist, please download" + " it from %s" % (file, remote) + LOG.error(msg) + raise RuntimeError(msg) + + data = np.load(file, allow_pickle=True, encoding="latin1")[mode] + data = self.purify(data) + data = self.normalize(data) + + # Length of longest sequence in the dataset + self.nmax = max([len(seq) for seq in data]) + self.sketches = data + + def __repr__(self): + return "Dataset with %d sequences of max length %d" % \ + (len(self.sketches), self.nmax) + + def __len__(self): + return len(self.sketches) + + def __getitem__(self, idx): + """Return the idx-th stroke in 5-D format, padded to length (Nmax+2). + + The first and last element of the sequence are fixed to "start-" and + "end-of-sequence" token. + + dx, dy, + 3 numbers for one-hot encoding of state: + 1 0 0: pen touching paper till next point + 0 1 0: pen lifted from paper after current point + 0 0 1: drawing has ended, next points (including current will not be + drawn) + """ + sample_data = self.sketches[idx] + + # Allow two extra slots for start/end of sequence tokens + sample = np.zeros((self.nmax+2, 5), dtype=np.float32) + + n = sample_data.shape[0] + + # normalize dx, dy + deltas = sample_data[:, :2] + # Absolute coordinates + positions = deltas[..., :2].cumsum(0) + maxi = np.abs(positions).max() + 1e-8 + deltas = deltas / (1.1 * maxi) # leave some margin on edges + + # fill in dx, dy coordinates + sample[1:n+1, :2] = deltas + + # on paper indicator: 0 means touching paper in the 3d format, flip it + sample[1:n+1, 2] = 1 - sample_data[:, 2] + + # off-paper indicator, complement of previous flag + sample[1:n+1, 3] = 1 - sample[1:n+1, 2] + + # fill with end of sequence tokens for the remainder + sample[n+1:, 4] = 1 + + # Start of sequence token + sample[0] = [0, 0, 1, 0, 0] + + return sample + + def purify(self, strokes): + """removes to small or too long sequences + removes large gaps""" + data = [] + for seq in strokes: + if seq.shape[0] <= self.max_seq_length: + # and seq.shape[0] > 10: + + # Limit large spatial gaps + seq = np.minimum(seq, self.spatial_limit) + seq = np.maximum(seq, -self.spatial_limit) + seq = np.array(seq, dtype=np.float32) + data.append(seq) + return data + + def calculate_normalizing_scale_factor(self, strokes): + """Calculate the normalizing factor explained in appendix of + sketch-rnn.""" + data = [] + for i, stroke_i in enumerate(strokes): + for j, pt in enumerate(strokes[i]): + data.append(pt[0]) + data.append(pt[1]) + data = np.array(data) + return np.std(data) + + def normalize(self, strokes): + """Normalize entire dataset (delta_x, delta_y) by the scaling + factor.""" + data = [] + scale_factor = self.calculate_normalizing_scale_factor(strokes) + for seq in strokes: + seq[:, 0:2] /= scale_factor + data.append(seq) + return data + + +class FixedLengthQuickDrawDataset(QuickDrawDataset): + """A variant of the QuickDraw dataset where the strokes are represented as + a fixed-length sequence of triplets (dx, dy, opacity), where opacity = 0, 1. + """ + def __init__(self, *args, canvas_size=64, **kwargs): + super(FixedLengthQuickDrawDataset, self).__init__(*args, **kwargs) + self.canvas_size = canvas_size + + def __getitem__(self, idx): + sample = super(FixedLengthQuickDrawDataset, self).__getitem__(idx) + + # We construct a stroke opacity variable from the pen down state, dx, dy remain unchanged + strokes = sample[:, :3] + + im = np.zeros((1, 1)) + + # render image + # start = time.time() + im = rendering.opacityStroke2diffvg( + th.from_numpy(strokes).unsqueeze(0), canvas_size=self.canvas_size, + relative=True, debug=False) + im = im.squeeze(0).numpy() + # elapsed = (time.time() - start)*1000 + # print("item %d pipeline gt rendering took %.2fms" % (idx, elapsed)) + + return strokes, im + + +class MNISTDataset(th.utils.data.Dataset): + def __init__(self, imsize, train=True): + super(MNISTDataset, self).__init__() + self.mnist = dset.MNIST(root=os.path.join(DATA, "mnist"), + train=train, + download=True, + transform=transforms.Compose([ + transforms.Resize((imsize, imsize)), + transforms.ToTensor(), + ])) + + def __len__(self): + return len(self.mnist) + + def __getitem__(self, idx): + im, label = self.mnist[idx] + + # make sure data uses [0, 1] range + im -= im.min() + im /= im.max() + 1e-8 + + # Bring it to [-1, 1] + im -= 0.5 + im /= 0.5 + return im diff --git a/diffvg/apps/generative_models/eval_gan.py b/diffvg/apps/generative_models/eval_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..f415a51c97646de18e3f49bea1a3ccbba0f36138 --- /dev/null +++ b/diffvg/apps/generative_models/eval_gan.py @@ -0,0 +1,182 @@ +"""Evaluate a pretrained GAN model. +Usage: + +`python eval_gan.py `, e.g. +`../results/quickdraw_gan_vector_bezier_fc_wgan`. + +""" +import os +import argparse +import torch as th +import numpy as np +import ttools +import imageio +from subprocess import call + +import pydiffvg + +import models + + +LOG = ttools.get_logger(__name__) + + +def postprocess(im, invert=False): + im = th.clamp((im + 1.0) / 2.0, 0, 1) + if invert: + im = (1.0 - im) + im = ttools.tensor2image(im) + return im + + +def imsave(im, path): + os.makedirs(os.path.dirname(path), exist_ok=True) + imageio.imwrite(path, im) + + +def save_scene(scn, path): + os.makedirs(os.path.dirname(path), exist_ok=True) + pydiffvg.save_svg(path, *scn, use_gamma=False) + + +def run(args): + th.manual_seed(0) + np.random.seed(0) + + meta = ttools.Checkpointer.load_meta(args.model, "vect_g_") + + if meta is None: + LOG.warning("Could not load metadata at %s, aborting.", args.model) + return + + LOG.info("Loaded model %s with metadata:\n %s", args.model, meta) + + if args.output_dir is None: + outdir = os.path.join(args.model, "eval") + else: + outdir = args.output_dir + os.makedirs(outdir, exist_ok=True) + + model_params = meta["model_params"] + if args.imsize is not None: + LOG.info("Overriding output image size to: %dx%d", args.imsize, + args.imsize) + old_size = model_params["imsize"] + scale = args.imsize * 1.0 / old_size + model_params["imsize"] = args.imsize + model_params["stroke_width"] = [w*scale for w in + model_params["stroke_width"]] + LOG.info("Overriding width to: %s", model_params["stroke_width"]) + + # task = meta["task"] + generator = meta["generator"] + if generator == "fc": + model = models.VectorGenerator(**model_params) + elif generator == "bezier_fc": + model = models.BezierVectorGenerator(**model_params) + elif generator in ["rnn"]: + model = models.RNNVectorGenerator(**model_params) + elif generator in ["chain_rnn"]: + model = models.ChainRNNVectorGenerator(**model_params) + else: + raise NotImplementedError() + model.eval() + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + + model.to(device) + + checkpointer = ttools.Checkpointer( + args.model, model, meta=meta, prefix="vect_g_") + checkpointer.load_latest() + + LOG.info("Computing latent space interpolation") + for i in range(args.nsamples): + z0 = model.sample_z(1) + z1 = model.sample_z(1) + + # interpolation + alpha = th.linspace(0, 1, args.nsteps).view(args.nsteps, 1).to(device) + alpha_video = th.linspace(0, 1, args.nframes).view(args.nframes, 1) + alpha_video = alpha_video.to(device) + + length = [args.nsteps, args.nframes] + for idx, a in enumerate([alpha, alpha_video]): + _z0 = z0.repeat(length[idx], 1).to(device) + _z1 = z1.repeat(length[idx], 1).to(device) + batch = _z0*(1-a) + _z1*a + out = model(batch) + if idx == 0: # image viz + n, c, h, w = out.shape + out = out.permute(1, 2, 0, 3) + out = out.contiguous().view(1, c, h, w*n) + out = postprocess(out, invert=args.invert) + imsave(out, os.path.join(outdir, + "latent_interp", "%03d.png" % i)) + + scenes = model.get_vector(batch) + for scn_idx, scn in enumerate(scenes): + save_scene(scn, os.path.join(outdir, "latent_interp_svg", + "%03d" % i, "%03d.svg" % + scn_idx)) + else: # video viz + anim_root = os.path.join(outdir, + "latent_interp_video", "%03d" % i) + LOG.info("Rendering animation %d", i) + for frame_idx, frame in enumerate(out): + LOG.info("frame %d", frame_idx) + frame = frame.unsqueeze(0) + frame = postprocess(frame, invert=args.invert) + imsave(frame, os.path.join(anim_root, + "frame%04d.png" % frame_idx)) + call(["ffmpeg", "-framerate", "30", "-i", + os.path.join(anim_root, "frame%04d.png"), "-vb", "20M", + os.path.join(outdir, + "latent_interp_video", "%03d.mp4" % i)]) + LOG.info(" saved %d", i) + + LOG.info("Sampling latent space") + + for i in range(args.nsamples): + n = 8 + bs = n*n + z = model.sample_z(bs).to(device) + out = model(z) + _, c, h, w = out.shape + out = out.view(n, n, c, h, w).permute(2, 0, 3, 1, 4) + out = out.contiguous().view(1, c, h*n, w*n) + out = postprocess(out) + imsave(out, os.path.join(outdir, "samples_%03d.png" % i)) + LOG.info(" saved %d", i) + + LOG.info("output images saved to %s", outdir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("model") + parser.add_argument("--output_dir", help="output directory for " + " the samples. Defaults to the model's path") + parser.add_argument("--nsamples", default=16, type=int, + help="number of output to compute") + parser.add_argument("--imsize", type=int, + help="if provided, override the raster output " + "resolution") + parser.add_argument("--nsteps", default=9, type=int, help="number of " + "interpolation steps for the interpolation") + parser.add_argument("--nframes", default=120, type=int, help="number of " + "frames for the interpolation video") + parser.add_argument("--invert", default=False, action="store_true", + help="if True, render black on white rather than the" + " opposite") + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + ttools.set_logger(False) + + run(args) diff --git a/diffvg/apps/generative_models/losses.py b/diffvg/apps/generative_models/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..8bebb7c978567bacf4b434027d6317c3e588b5d8 --- /dev/null +++ b/diffvg/apps/generative_models/losses.py @@ -0,0 +1,99 @@ +"""Losses for the generative models and baselines.""" +import torch as th +import numpy as np + +import ttools.modules.image_operators as imops + + +class KLDivergence(th.nn.Module): + """ + Args: + min_value(float): the loss is clipped so that value below this + number don't affect the optimization. + """ + def __init__(self, min_value=0.2): + super(KLDivergence, self).__init__() + self.min_value = min_value + + def forward(self, mu, log_sigma): + loss = -0.5 * (1.0 + log_sigma - mu.pow(2) - log_sigma.exp()) + loss = loss.mean() + loss = th.max(loss, self.min_value*th.ones_like(loss)) + return loss + + +class MultiscaleMSELoss(th.nn.Module): + def __init__(self, channels=3): + super(MultiscaleMSELoss, self).__init__() + self.blur = imops.GaussianBlur(1, channels=channels) + + def forward(self, im, target): + bs, c, h, w = im.shape + num_levels = max(int(np.ceil(np.log2(h))) - 2, 1) + + losses = [] + for lvl in range(num_levels): + loss = th.nn.functional.mse_loss(im, target) + losses.append(loss) + im = th.nn.functional.interpolate(self.blur(im), + scale_factor=0.5, + mode="nearest") + target = th.nn.functional.interpolate(self.blur(target), + scale_factor=0.5, + mode="nearest") + + losses = th.stack(losses) + return losses.sum() + + +def gaussian_pdfs(dx, dy, params): + """Returns the pdf at (dx, dy) for each Gaussian in the mixture. + """ + dx = dx.unsqueeze(-1) # replicate dx, dy to evaluate all pdfs at once + dy = dy.unsqueeze(-1) + + mu_x = params[..., 0] + mu_y = params[..., 1] + sigma_x = params[..., 2].exp() + sigma_y = params[..., 3].exp() + rho_xy = th.tanh(params[..., 4]) + + x = ((dx-mu_x) / sigma_x).pow(2) + y = ((dy-mu_y) / sigma_y).pow(2) + + xy = (dx-mu_x)*(dy-mu_y) / (sigma_x * sigma_y) + arg = x + y - 2.0*rho_xy*xy + pdf = th.exp(-arg / (2*(1.0 - rho_xy.pow(2)))) + norm = 2.0 * np.pi * sigma_x * sigma_y * (1.0 - rho_xy.pow(2)).sqrt() + + return pdf / norm + + +class GaussianMixtureReconstructionLoss(th.nn.Module): + """ + Args: + """ + def __init__(self, eps=1e-5): + super(GaussianMixtureReconstructionLoss, self).__init__() + self.eps = eps + + def forward(self, pen_logits, mixture_logits, gaussian_params, targets): + dx = targets[..., 0] + dy = targets[..., 1] + pen_state = targets[..., 2:].argmax(-1) # target index + + # Likelihood loss on the stroke position + # No need to predict accurate pen position for end-of-sequence tokens + valid_stroke = (targets[..., -1] != 1.0).float() + mixture_weights = th.nn.functional.softmax(mixture_logits, -1) + pdfs = gaussian_pdfs(dx, dy, gaussian_params) + position_loss = - th.log(self.eps + (pdfs * mixture_weights).sum(-1)) + + # by actual non-empty count + position_loss = (position_loss*valid_stroke).sum() / valid_stroke.sum() + + # Classification loss for the stroke mode + pen_loss = th.nn.functional.cross_entropy(pen_logits.view(-1, 3), + pen_state.view(-1)) + + return position_loss + pen_loss diff --git a/diffvg/apps/generative_models/mnist_vae.py b/diffvg/apps/generative_models/mnist_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..c0da626508d86f659cac08e62bda0a028d6278e9 --- /dev/null +++ b/diffvg/apps/generative_models/mnist_vae.py @@ -0,0 +1,689 @@ +#!/bin/env python +"""Train a VAE MNIST generator. + +Usage: + +* Train a model: + +`python mnist_vae.py train` + +* Generate samples from a trained model: + +`python mnist_vae.py sample` + +* Generate latent space interpolations from a trained model: + +`python mnist_vae.py interpolate` +""" +import argparse +import os + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces + +from modules import Flatten + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +VAE_OUTPUT = os.path.join(BASE_DIR, "results", "mnist_vae") +AE_OUTPUT = os.path.join(BASE_DIR, "results", "mnist_ae") + + +def _onehot(label): + bs = label.shape[0] + label_onehot = label.new(bs, 10) + label_onehot = label_onehot.zero_() + label_onehot.scatter_(1, label.unsqueeze(1), 1) + return label_onehot.float() + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, + canvas_height, + samples, + samples, + 0, + None, + *scene_args) + return img + + +class MNISTCallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize generated images during training.""" + + def visualized_image(self, batch, step_data, is_val=False): + im = step_data["rendering"].detach().cpu() + im = 0.5 + 0.5*im + ref = batch[0].cpu() + + vizdata = [im, ref] + + # tensor to visualize, concatenate images + viz = th.clamp(th.cat(vizdata, 2), 0, 1) + return viz + + def caption(self, batch, step_data, is_val=False): + return "fake, real" + + +class VAEInterface(ttools.ModelInterface): + def __init__(self, model, lr=1e-4, cuda=True, max_grad_norm=10, + variational=True, w_kld=1.0): + super(VAEInterface, self).__init__() + + self.max_grad_norm = max_grad_norm + + self.model = model + + self.w_kld = w_kld + + self.variational = variational + + self.device = "cpu" + if cuda: + self.device = "cuda" + + self.model.to(self.device) + + self.opt = th.optim.Adam( + self.model.parameters(), lr=lr, betas=(0.5, 0.5), eps=1e-12) + + def training_step(self, batch): + im, label = batch[0], batch[1] + im = im.to(self.device) + label = label.to(self.device) + rendering, auxdata = self.model(im, label) + + im = batch[0] + im = im.to(self.device) + + logvar = auxdata["logvar"] + mu = auxdata["mu"] + + data_loss = th.nn.functional.mse_loss(rendering, im) + + ret = {} + if self.variational: # VAE mode + kld = -0.5 * th.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1) + kld = kld.mean() + loss = data_loss + kld*self.w_kld + ret["kld"] = kld.item() + else: # Regular autoencoder + loss = data_loss + + # optimize + self.opt.zero_grad() + loss.backward() + + # Clip large gradients if needed + if self.max_grad_norm is not None: + nrm = th.nn.utils.clip_grad_norm_( + self.model.parameters(), self.max_grad_norm) + if nrm > self.max_grad_norm: + LOG.warning("Clipping generator gradients. norm = %.3f > %.3f", + nrm, self.max_grad_norm) + + self.opt.step() + + ret["loss"] = loss.item() + ret["data_loss"] = data_loss.item() + ret["auxdata"] = auxdata + ret["rendering"] = rendering + ret["logvar"] = logvar.abs().max().item() + + return ret + + +class VectorMNISTVAE(th.nn.Module): + def __init__(self, imsize=28, paths=4, segments=5, samples=2, zdim=128, + conditional=False, variational=True, raster=False, fc=False, + stroke_width=None): + super(VectorMNISTVAE, self).__init__() + + self.samples = samples + self.imsize = imsize + self.paths = paths + self.segments = segments + self.zdim = zdim + self.conditional = conditional + self.variational = variational + + if stroke_width is None: + self.stroke_width = (1.0, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + ncond = 0 + if self.conditional: # one hot encoded input for conditional model + ncond = 10 + + self.fc = fc + mult = 1 + nc = 1024 + + if not self.fc: # conv model + self.encoder = th.nn.Sequential( + # 32x32 + th.nn.Conv2d(1 + ncond, mult*64, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + + # 16x16 + th.nn.Conv2d(mult*64, mult*128, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + + # 8x8 + th.nn.Conv2d(mult*128, mult*256, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + Flatten(), + ) + else: + self.encoder = th.nn.Sequential( + # 32x32 + Flatten(), + th.nn.Linear(28*28 + ncond, mult*256), + th.nn.LeakyReLU(0.2, inplace=True), + + # 8x8 + th.nn.Linear(mult*256, mult*256, 4), + th.nn.LeakyReLU(0.2, inplace=True), + ) + + self.mu_predictor = th.nn.Linear(256*1*1, zdim) + if self.variational: + self.logvar_predictor = th.nn.Linear(256*1*1, zdim) + + self.decoder = th.nn.Sequential( + th.nn.Linear(zdim + ncond, nc), + th.nn.SELU(inplace=True), + + th.nn.Linear(nc, nc), + th.nn.SELU(inplace=True), + ) + + self.raster = raster + + if self.raster: + self.raster_decoder = th.nn.Sequential( + th.nn.Linear(nc, imsize*imsize), + ) + else: + # 4 points bezier with n_segments -> 3*n_segments + 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(nc, 2*self.paths*(self.segments*3+1)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Sigmoid() + ) + + def encode(self, im, label): + bs, _, h, w = im.shape + if self.conditional: + label_onehot = _onehot(label) + if not self.fc: + label_onehot = label_onehot.view( + bs, 10, 1, 1).repeat(1, 1, h, w) + out = self.encoder(th.cat([im, label_onehot], 1)) + else: + out = self.encoder(th.cat([im.view(bs, -1), label_onehot], 1)) + else: + out = self.encoder(im) + mu = self.mu_predictor(out) + if self.variational: + logvar = self.logvar_predictor(out) + return mu, logvar + else: + return mu + + def reparameterize(self, mu, logvar): + std = th.exp(0.5*logvar) + eps = th.randn_like(logvar) + return mu + std*eps + + def _decode_features(self, z, label): + if label is not None: + if not self.conditional: + raise ValueError("decoding with an input label " + "requires a conditional AE") + label_onehot = _onehot(label) + z = th.cat([z, label_onehot], 1) + + decoded = self.decoder(z) + return decoded + + def decode(self, z, label=None): + bs = z.shape[0] + + feats = self._decode_features(z, label) + + if self.raster: + out = self.raster_decoder(feats).view( + bs, 1, self.imsize, self.imsize) + return out, {} + + all_points = self.point_predictor(feats) + all_points = all_points.view(bs, self.paths, -1, 2) + + all_points = all_points*(self.imsize//2-2) + self.imsize//2 + + if False: + all_widths = th.ones(bs, self.paths) * 0.5 + else: + all_widths = self.width_predictor(feats) + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + if False: + all_alphas = th.ones(bs, self.paths) + else: + all_alphas = self.alpha_predictor(feats) + + # Process the batch sequentially + outputs = [] + scenes = [] + for k in range(bs): + # Get point parameters from network + shapes = [] + shape_groups = [] + for p in range(self.paths): + points = all_points[k, p].contiguous().cpu() + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + + color = th.cat([th.ones(3), alpha.view(1,)]) + num_ctrl_pts = th.zeros(self.segments, dtype=th.int32) + 2 + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + scenes.append( + [shapes, shape_groups, (self.imsize, self.imsize)]) + + # Rasterize + out = render(self.imsize, self.imsize, shapes, shape_groups, + samples=self.samples) + + # Torch format, discard alpha, make gray + out = out.permute(2, 0, 1).view( + 4, self.imsize, self.imsize)[:3].mean(0, keepdim=True) + + outputs.append(out) + + output = th.stack(outputs).to(z.device) + + auxdata = { + "points": all_points, + "scenes": scenes, + } + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, auxdata + + def forward(self, im, label): + if self.variational: + mu, logvar = self.encode(im, label) + z = self.reparameterize(mu, logvar) + else: + mu = self.encode(im, label) + z = mu + logvar = None + + if self.conditional: + output, aux = self.decode(z, label=label) + else: + output, aux = self.decode(z) + + aux["logvar"] = logvar + aux["mu"] = mu + + return output, aux + + +class Dataset(th.utils.data.Dataset): + def __init__(self, data_dir, imsize): + super(Dataset, self).__init__() + self.mnist = dset.MNIST(root=data_dir, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + ])) + + def __len__(self): + return len(self.mnist) + + def __getitem__(self, idx): + im, label = self.mnist[idx] + + # make sure data uses [0, 1] range + im -= im.min() + im /= im.max() + 1e-8 + im -= 0.5 + im /= 0.5 + return im, label + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + pydiffvg.set_use_gpu(args.cuda) + + # Initialize datasets + imsize = 28 + dataset = Dataset(args.data_dir, imsize) + dataloader = DataLoader(dataset, batch_size=args.bs, + num_workers=4, shuffle=True) + + if args.generator in ["vae", "ae"]: + LOG.info("Vector config:\n samples %d\n" + " paths: %d\n segments: %d\n" + " zdim: %d\n" + " conditional: %d\n" + " fc: %d\n", + args.samples, args.paths, args.segments, + args.zdim, args.conditional, args.fc) + + model_params = dict(samples=args.samples, paths=args.paths, + segments=args.segments, conditional=args.conditional, + zdim=args.zdim, fc=args.fc) + + if args.generator == "vae": + model = VectorMNISTVAE(variational=True, **model_params) + chkpt = VAE_OUTPUT + name = "mnist_vae" + elif args.generator == "ae": + model = VectorMNISTVAE(variational=False, **model_params) + chkpt = AE_OUTPUT + name = "mnist_ae" + else: + raise ValueError("unknown generator") + + if args.conditional: + name += "_conditional" + chkpt += "_conditional" + + if args.fc: + name += "_fc" + chkpt += "_fc" + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, prefix="g_") + extras, meta = checkpointer.load_latest() + + if meta is not None and meta != model_params: + LOG.info(f"Checkpoint's metaparams differ from CLI, " + f"aborting: {meta} and {model_params}") + + # Hook interface + if args.generator in ["vae", "ae"]: + variational = args.generator == "vae" + if variational: + LOG.info("Using a VAE") + else: + LOG.info("Using an AE") + interface = VAEInterface(model, lr=args.lr, cuda=args.cuda, + variational=variational, + w_kld=args.kld_weight) + + trainer = ttools.Trainer(interface) + + # Add callbacks + keys = [] + if args.generator == "vae": + keys = ["kld", "data_loss", "loss", "logvar"] + elif args.generator == "ae": + keys = ["data_loss", "loss"] + port = 8080 + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=keys, val_keys=keys)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=keys, val_keys=keys, env=name, port=port)) + trainer.add_callback(MNISTCallback( + env=name, win="samples", port=port, frequency=args.freq)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=50)) + + # Start training + trainer.train(dataloader, num_epochs=args.num_epochs) + + +def generate_samples(args): + chkpt = VAE_OUTPUT + if args.conditional: + chkpt += "_conditional" + if args.fc: + chkpt += "_fc" + + meta = ttools.Checkpointer.load_meta(chkpt, prefix="g_") + if meta is None: + LOG.info("No metadata in checkpoint (or no checkpoint), aborting.") + return + + model = VectorMNISTVAE(**meta) + checkpointer = ttools.Checkpointer(chkpt, model, prefix="g_") + checkpointer.load_latest() + model.eval() + + # Sample some latent vectors + n = 8 + bs = n*n + z = th.randn(bs, model.zdim) + + imsize = 28 + dataset = Dataset(args.data_dir, imsize) + dataloader = DataLoader(dataset, batch_size=bs, + num_workers=1, shuffle=True) + + for batch in dataloader: + ref, label = batch + break + + autoencode = True + if autoencode: + LOG.info("Sampling with auto-encoder code") + if not args.conditional: + label = None + mu, logvar = model.encode(ref, label) + z = model.reparameterize(mu, logvar) + else: + label = None + if args.conditional: + label = th.clamp(th.rand(bs)*10, 0, 9).long() + if args.digit is not None: + label[:] = args.digit + + with th.no_grad(): + images, aux = model.decode(z, label=label) + scenes = aux["scenes"] + images += 1.0 + images /= 2.0 + + h = w = model.imsize + + images = images.view(n, n, h, w).permute(0, 2, 1, 3) + images = images.contiguous().view(n*h, n*w) + images = th.clamp(images, 0, 1).cpu().numpy() + path = os.path.join(chkpt, "samples.png") + pydiffvg.imwrite(images, path, gamma=2.2) + + if autoencode: + ref += 1.0 + ref /= 2.0 + ref = ref.view(n, n, h, w).permute(0, 2, 1, 3) + ref = ref.contiguous().view(n*h, n*w) + ref = th.clamp(ref, 0, 1).cpu().numpy() + path = os.path.join(chkpt, "ref.png") + pydiffvg.imwrite(ref, path, gamma=2.2) + + # merge scenes + all_shapes = [] + all_shape_groups = [] + cur_id = 0 + for idx, s in enumerate(scenes): + shapes, shape_groups, _ = s + # width, height = sizes + + # Shift digit on canvas + center_x = idx % n + center_y = idx // n + for shape in shapes: + shape.points[:, 0] += center_x * model.imsize + shape.points[:, 1] += center_y * model.imsize + all_shapes.append(shape) + for grp in shape_groups: + grp.shape_ids[:] = cur_id + cur_id += 1 + all_shape_groups.append(grp) + + LOG.info("Generated %d shapes", len(all_shapes)) + + fname = os.path.join(chkpt, "digits.svg") + pydiffvg.save_svg(fname, n*model.imsize, n*model.imsize, all_shapes, + all_shape_groups, use_gamma=False) + + LOG.info("Results saved to %s", chkpt) + + +def interpolate(args): + chkpt = VAE_OUTPUT + if args.conditional: + chkpt += "_conditional" + if args.fc: + chkpt += "_fc" + + meta = ttools.Checkpointer.load_meta(chkpt, prefix="g_") + if meta is None: + LOG.info("No metadata in checkpoint (or no checkpoint), aborting.") + return + + model = VectorMNISTVAE(imsize=128, **meta) + checkpointer = ttools.Checkpointer(chkpt, model, prefix="g_") + checkpointer.load_latest() + model.eval() + + # Sample some latent vectors + bs = 10 + z = th.randn(bs, model.zdim) + + label = None + label = th.arange(0, 10) + + animation = [] + nframes = 60 + with th.no_grad(): + for idx, _z in enumerate(z): + if idx == 0: # skip first + continue + _z0 = z[idx-1].unsqueeze(0).repeat(nframes, 1) + _z = _z.unsqueeze(0).repeat(nframes, 1) + if args.conditional: + _label = label[idx].unsqueeze(0).repeat(nframes) + else: + _label = None + + # interp weights + alpha = th.linspace(0, 1, nframes).view(nframes, 1) + batch = alpha*_z + (1.0 - alpha)*_z0 + images, aux = model.decode(batch, label=_label) + images += 1.0 + images /= 2.0 + animation.append(images) + + anim_dir = os.path.join(chkpt, "interpolation") + os.makedirs(anim_dir, exist_ok=True) + animation = th.cat(animation, 0) + for idx, frame in enumerate(animation): + frame = frame.squeeze() + frame = th.clamp(frame, 0, 1).cpu().numpy() + path = os.path.join(anim_dir, "frame%03d.png" % idx) + pydiffvg.imwrite(frame, path, gamma=2.2) + + LOG.info("Results saved to %s", anim_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + subs = parser.add_subparsers() + + parser.add_argument("--cpu", dest="cuda", action="store_false", + default=th.cuda.is_available(), + help="if true, use CPU instead of GPU.") + parser.add_argument("--no-conditional", dest="conditional", + action="store_false", default=True) + parser.add_argument("--no-fc", dest="fc", action="store_false", + default=True) + parser.add_argument("--data_dir", default="mnist", + help="path to download and store the data.") + + # -- Train ---------------------------------------------------------------- + parser_train = subs.add_parser("train") + parser_train.add_argument("--generator", choices=["vae", "ae"], + default="vae", + help="choice of regular or variational " + "autoencoder") + parser_train.add_argument("--freq", type=int, default=100, + help="number of steps between visualizations") + parser_train.add_argument("--lr", type=float, default=5e-5, + help="learning rate") + parser_train.add_argument("--kld_weight", type=float, default=1.0, + help="scalar weight for the KL divergence term.") + parser_train.add_argument("--bs", type=int, default=8, help="batch size") + parser_train.add_argument("--num_epochs", default=50, type=int, + help="max number of epochs") + # Vector configs + parser_train.add_argument("--paths", type=int, default=1, + help="number of vector paths to generate.") + parser_train.add_argument("--segments", type=int, default=3, + help="number of segments per vector path") + parser_train.add_argument("--samples", type=int, default=4, + help="number of samples in the MC rasterizer") + parser_train.add_argument("--zdim", type=int, default=20, + help="dimension of the latent space") + parser_train.set_defaults(func=train) + + # -- Eval ----------------------------------------------------------------- + parser_sample = subs.add_parser("sample") + parser_sample.add_argument("--digit", type=int, choices=list(range(10)), + help="digits to synthesize, " + "random if not specified") + parser_sample.set_defaults(func=generate_samples) + + parser_interpolate = subs.add_parser("interpolate") + parser_interpolate.set_defaults(func=interpolate) + + args = parser.parse_args() + + ttools.set_logger(True) + args.func(args) diff --git a/diffvg/apps/generative_models/models.py b/diffvg/apps/generative_models/models.py new file mode 100644 index 0000000000000000000000000000000000000000..06d4b1cecd003bd683fbba0475f3288b5809e2cf --- /dev/null +++ b/diffvg/apps/generative_models/models.py @@ -0,0 +1,484 @@ +"""Collection of generative models.""" + +import torch as th +import ttools + +import rendering +import modules + +LOG = ttools.get_logger(__name__) + + +class BaseModel(th.nn.Module): + def sample_z(self, bs, device="cpu"): + return th.randn(bs, self.zdim).to(device) + + +class BaseVectorModel(BaseModel): + def get_vector(self, z): + _, scenes = self._forward(z) + return scenes + + def _forward(self, x): + raise NotImplementedError() + + def forward(self, z): + # Only return the raster + return self._forward(z)[0] + + +class BezierVectorGenerator(BaseVectorModel): + NUM_SEGMENTS = 2 + def __init__(self, num_strokes=4, + zdim=128, width=32, imsize=32, + color_output=False, + stroke_width=None): + super(BezierVectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.imsize = imsize + self.num_strokes = num_strokes + self.zdim = zdim + + self.trunk = th.nn.Sequential( + th.nn.Linear(zdim, width), + th.nn.SELU(inplace=True), + + th.nn.Linear(width, 2*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(2*width, 4*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(4*width, 8*width), + th.nn.SELU(inplace=True), + ) + + # 4 points bezier with n_segments -> 3*n_segments + 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(8*width, + 2*self.num_strokes*( + BezierVectorGenerator.NUM_SEGMENTS*3 + 1)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.color_predictor = None + if color_output: + self.color_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 3*self.num_strokes), + th.nn.Sigmoid() + ) + + def _forward(self, z): + bs = z.shape[0] + + feats = self.trunk(z) + all_points = self.point_predictor(feats) + all_alphas = self.alpha_predictor(feats) + + if self.color_predictor: + all_colors = self.color_predictor(feats) + all_colors = all_colors.view(bs, self.num_strokes, 3) + else: + all_colors = None + + all_widths = self.width_predictor(feats) + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + all_points = all_points.view( + bs, self.num_strokes, BezierVectorGenerator.NUM_SEGMENTS*3+1, 2) + + output, scenes = rendering.bezier_render(all_points, all_widths, all_alphas, + colors=all_colors, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class VectorGenerator(BaseVectorModel): + def __init__(self, num_strokes=4, + zdim=128, width=32, imsize=32, + color_output=False, + stroke_width=None): + super(VectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.imsize = imsize + self.num_strokes = num_strokes + self.zdim = zdim + + self.trunk = th.nn.Sequential( + th.nn.Linear(zdim, width), + th.nn.SELU(inplace=True), + + th.nn.Linear(width, 2*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(2*width, 4*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(4*width, 8*width), + th.nn.SELU(inplace=True), + ) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 2*(self.num_strokes*2)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.color_predictor = None + if color_output: + self.color_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 3*self.num_strokes), + th.nn.Sigmoid() + ) + + def _forward(self, z): + bs = z.shape[0] + + feats = self.trunk(z) + + all_points = self.point_predictor(feats) + + all_alphas = self.alpha_predictor(feats) + + if self.color_predictor: + all_colors = self.color_predictor(feats) + all_colors = all_colors.view(bs, self.num_strokes, 3) + else: + all_colors = None + + all_widths = self.width_predictor(feats) + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + all_points = all_points.view(bs, self.num_strokes, 2, 2) + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + colors=all_colors, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class RNNVectorGenerator(BaseVectorModel): + def __init__(self, num_strokes=64, + zdim=128, width=32, imsize=32, + hidden_size=512, dropout=0.9, + color_output=False, + num_layers=3, stroke_width=None): + super(RNNVectorGenerator, self).__init__() + + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.num_layers = num_layers + self.imsize = imsize + self.num_strokes = num_strokes + self.hidden_size = hidden_size + self.zdim = zdim + + self.hidden_cell_predictor = th.nn.Linear( + zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2*2), # 2 points, (x,y) + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + def _forward(self, z, hidden_and_cell=None): + steps = self.num_strokes + + # z is passed at each step, duplicate it + bs = z.shape[0] + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + # First step in the RNN + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + hidden_and_cell = (hidden, cell) + + feats, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + feats = feats.reshape(bs*steps, self.hidden_size) + + all_points = self.point_predictor(feats).view(bs, steps, 2, 2) + all_alphas = self.alpha_predictor(feats).view(bs, steps) + all_widths = self.width_predictor(feats).view(bs, steps) + + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class ChainRNNVectorGenerator(BaseVectorModel): + """Strokes form a single long chain.""" + def __init__(self, num_strokes=64, + zdim=128, width=32, imsize=32, + hidden_size=512, dropout=0.9, + color_output=False, + num_layers=3, stroke_width=None): + super(ChainRNNVectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.num_layers = num_layers + self.imsize = imsize + self.num_strokes = num_strokes + self.hidden_size = hidden_size + self.zdim = zdim + + self.hidden_cell_predictor = th.nn.Linear( + zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2), # 1 point, (x,y) + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + def _forward(self, z, hidden_and_cell=None): + steps = self.num_strokes + + # z is passed at each step, duplicate it + bs = z.shape[0] + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + # First step in the RNN + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + hidden_and_cell = (hidden, cell) + + feats, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + feats = feats.reshape(bs*steps, self.hidden_size) + + # Construct the chain + end_points = self.point_predictor(feats).view(bs, steps, 1, 2) + start_points = th.cat([ + # first point is canvas center + th.zeros(bs, 1, 1, 2, device=feats.device), + end_points[:, 1:, :, :]], 1) + all_points = th.cat([start_points, end_points], 2) + + all_alphas = self.alpha_predictor(feats).view(bs, steps) + all_widths = self.width_predictor(feats).view(bs, steps) + + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class Generator(BaseModel): + def __init__(self, width=64, imsize=32, zdim=128, + stroke_width=None, + color_output=False, + num_strokes=4): + super(Generator, self).__init__() + assert imsize == 32 + + self.imsize = imsize + self.zdim = zdim + + num_in_chans = self.zdim // (2*2) + num_out_chans = 3 if color_output else 1 + + self.net = th.nn.Sequential( + th.nn.ConvTranspose2d(num_in_chans, width*8, 4, padding=1, + stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width*8, width*8, 3, padding=1), + th.nn.BatchNorm2d(width*8), + th.nn.LeakyReLU(0.2, inplace=True), + # 4x4 + + th.nn.ConvTranspose2d(8*width, 4*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(4*width, 4*width, 3, padding=1), + th.nn.BatchNorm2d(width*4), + th.nn.LeakyReLU(0.2, inplace=True), + # 8x8 + + th.nn.ConvTranspose2d(4*width, 2*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(2*width, 2*width, 3, padding=1), + th.nn.BatchNorm2d(width*2), + th.nn.LeakyReLU(0.2, inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.BatchNorm2d(width), + th.nn.LeakyReLU(0.2, inplace=True), + # 32x32 + + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.BatchNorm2d(width), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, num_out_chans, 1), + + th.nn.Tanh(), + ) + + def forward(self, z): + bs = z.shape[0] + num_in_chans = self.zdim // (2*2) + raster = self.net(z.view(bs, num_in_chans, 2, 2)) + return raster + + +class Discriminator(th.nn.Module): + def __init__(self, conditional=False, width=64, color_output=False): + super(Discriminator, self).__init__() + + self.conditional = conditional + + sn = th.nn.utils.spectral_norm + + num_chan_in = 3 if color_output else 1 + + self.net = th.nn.Sequential( + th.nn.Conv2d(num_chan_in, width, 3, padding=1), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, 2*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + # 16x16 + + sn(th.nn.Conv2d(2*width, 2*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(2*width, 4*width, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 8x8 + + sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 4x4 + + sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 2x2 + + modules.Flatten(), + th.nn.Linear(width*4*2*2, 1), + ) + + def forward(self, x): + out = self.net(x) + return out diff --git a/diffvg/apps/generative_models/modules.py b/diffvg/apps/generative_models/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..e8589ae8007508a041e5923c9708789535526a85 --- /dev/null +++ b/diffvg/apps/generative_models/modules.py @@ -0,0 +1,11 @@ +"""Helper modules to build our networks.""" +import torch as th + + +class Flatten(th.nn.Module): + def __init__(self): + super(Flatten, self).__init__() + + def forward(self, x): + bs = x.shape[0] + return x.view(bs, -1) diff --git a/diffvg/apps/generative_models/rendering.py b/diffvg/apps/generative_models/rendering.py new file mode 100644 index 0000000000000000000000000000000000000000..4ef475ec77ba2a8d4ab326bcd9b6f05211be6215 --- /dev/null +++ b/diffvg/apps/generative_models/rendering.py @@ -0,0 +1,307 @@ +import os +import torch as th +import torch.multiprocessing as mp +import threading as mt +import numpy as np +import random + +import ttools + +import pydiffvg +import time + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2, + seed=None): + if seed is None: + seed = random.randint(0, 1000000) + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, canvas_height, samples, samples, + seed, # seed + None, # background image + *scene_args) + return img + + +def opacityStroke2diffvg(strokes, canvas_size=128, debug=False, relative=True, + force_cpu=True): + + dev = strokes.device + if force_cpu: + strokes = strokes.to("cpu") + + + # pydiffvg.set_use_gpu(False) + # if strokes.is_cuda: + # pydiffvg.set_use_gpu(True) + + """Rasterize strokes given in (dx, dy, opacity) sequence format.""" + bs, nsegs, dims = strokes.shape + out = [] + + start = time.time() + for batch_idx, stroke in enumerate(strokes): + + if relative: # Absolute coordinates + all_points = stroke[..., :2].cumsum(0) + else: + all_points = stroke[..., :2] + + all_opacities = stroke[..., 2] + + # Transform from [-1, 1] to canvas coordinates + # Make sure points are in canvas + all_points = 0.5*(all_points + 1.0) * canvas_size + # all_points = th.clamp(0.5*(all_points + 1.0), 0, 1) * canvas_size + + # Avoid overlapping points + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + shapes = [] + shape_groups = [] + + for start_idx in range(0, nsegs-1): + points = all_points[start_idx:start_idx+2].contiguous().float() + opacity = all_opacities[start_idx] + + num_ctrl_pts = th.zeros(points.shape[0] - 1, dtype=th.int32) + width = th.ones(1) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + shapes.append(path) + + color = th.cat([th.ones(3, device=opacity.device), + opacity.unsqueeze(0)], 0) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize only if there are shapes + if shapes: + inner_start = time.time() + out.append(render(canvas_size, canvas_size, shapes, shape_groups, + samples=4)) + if debug: + inner_elapsed = time.time() - inner_start + print("diffvg call took %.2fms" % inner_elapsed) + else: + out.append(th.zeros(canvas_size, canvas_size, 4, + device=strokes.device)) + + if debug: + elapsed = (time.time() - start)*1000 + print("rendering took %.2fms" % elapsed) + images = th.stack(out, 0).permute(0, 3, 1, 2).contiguous() + + # Return data on the same device as input + return images.to(dev) + + +def stroke2diffvg(strokes, canvas_size=128): + """Rasterize strokes given some sequential data.""" + bs, nsegs, dims = strokes.shape + out = [] + for stroke_idx, stroke in enumerate(strokes): + end_of_stroke = stroke[:, 4] == 1 + last = end_of_stroke.cpu().numpy().argmax() + stroke = stroke[:last+1, :] + # stroke = stroke[~end_of_stroke] + # TODO: stop at the first end of stroke + # import ipdb; ipdb.set_trace() + split_idx = stroke[:, 3].nonzero().squeeze(1) + + # Absolute coordinates + all_points = stroke[..., :2].cumsum(0) + + # Transform to canvas coordinates + all_points[..., 0] += 0.5 + all_points[..., 0] *= canvas_size + all_points[..., 1] += 0.5 + all_points[..., 1] *= canvas_size + + # Make sure points are in canvas + all_points[..., :2] = th.clamp(all_points[..., :2], 0, canvas_size) + + shape_groups = [] + shapes = [] + start_idx = 0 + + for count, end_idx in enumerate(split_idx): + points = all_points[start_idx:end_idx+1].contiguous().float() + + if points.shape[0] <= 2: # we need at least 2 points for a line + continue + + num_ctrl_pts = th.zeros(points.shape[0] - 1, dtype=th.int32) + width = th.ones(1) + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + start_idx = end_idx+1 + shapes.append(path) + + color = th.ones(4, 1) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + if shapes: + # draw only if there are shapes + out.append(render(canvas_size, canvas_size, shapes, shape_groups, samples=2)) + else: + out.append(th.zeros(canvas_size, canvas_size, 4, + device=strokes.device)) + + return th.stack(out, 0).permute(0, 3, 1, 2)[:, :3].contiguous() + + +def line_render(all_points, all_widths, all_alphas, force_cpu=True, + canvas_size=32, colors=None): + dev = all_points.device + if force_cpu: + all_points = all_points.to("cpu") + all_widths = all_widths.to("cpu") + all_alphas = all_alphas.to("cpu") + + if colors is not None: + colors = colors.to("cpu") + + all_points = 0.5*(all_points + 1.0) * canvas_size + + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + bs, num_segments, _, _ = all_points.shape + n_out = 3 if colors is not None else 1 + output = th.zeros(bs, n_out, canvas_size, canvas_size, + device=all_points.device) + + scenes = [] + for k in range(bs): + shapes = [] + shape_groups = [] + for p in range(num_segments): + points = all_points[k, p].contiguous().cpu() + num_ctrl_pts = th.zeros(1, dtype=th.int32) + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + if colors is not None: + color = colors[k, p] + else: + color = th.ones(3, device=alpha.device) + + color = th.cat([color, alpha.view(1,)]) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + scenes.append((canvas_size, canvas_size, shapes, shape_groups)) + raster = render(canvas_size, canvas_size, shapes, shape_groups, + samples=2) + raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size) + + alpha = raster[3:4] + if colors is not None: # color output + image = raster[:3] + alpha = alpha.repeat(3, 1, 1) + else: + image = raster[:1] + + # alpha compositing + image = image*alpha + output[k] = image + + output = output.to(dev) + + return output, scenes + + +def bezier_render(all_points, all_widths, all_alphas, force_cpu=True, + canvas_size=32, colors=None): + dev = all_points.device + if force_cpu: + all_points = all_points.to("cpu") + all_widths = all_widths.to("cpu") + all_alphas = all_alphas.to("cpu") + + if colors is not None: + colors = colors.to("cpu") + + all_points = 0.5*(all_points + 1.0) * canvas_size + + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + bs, num_strokes, num_pts, _ = all_points.shape + num_segments = (num_pts - 1) // 3 + n_out = 3 if colors is not None else 1 + output = th.zeros(bs, n_out, canvas_size, canvas_size, + device=all_points.device) + + scenes = [] + for k in range(bs): + shapes = [] + shape_groups = [] + for p in range(num_strokes): + points = all_points[k, p].contiguous().cpu() + # bezier + num_ctrl_pts = th.zeros(num_segments, dtype=th.int32) + 2 + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + if colors is not None: + color = colors[k, p] + else: + color = th.ones(3, device=alpha.device) + + color = th.cat([color, alpha.view(1,)]) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + scenes.append((canvas_size, canvas_size, shapes, shape_groups)) + raster = render(canvas_size, canvas_size, shapes, shape_groups, + samples=2) + raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size) + + alpha = raster[3:4] + if colors is not None: # color output + image = raster[:3] + alpha = alpha.repeat(3, 1, 1) + else: + image = raster[:1] + + # alpha compositing + image = image*alpha + output[k] = image + + output = output.to(dev) + + return output, scenes diff --git a/diffvg/apps/generative_models/sketch_rnn.py b/diffvg/apps/generative_models/sketch_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2b88767379bd89399dd10ba02c8739e8e65e93ec --- /dev/null +++ b/diffvg/apps/generative_models/sketch_rnn.py @@ -0,0 +1,461 @@ +#!/bin/env python +"""Train a Sketch-RNN.""" +import argparse +from enum import Enum +import os +import wget + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces +from ttools.modules import networks + +import pydiffvg + +import rendering +import losses +import data + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results", "sketch_rnn_diffvg") +OUTPUT_BASELINE = os.path.join(BASE_DIR, "results", "sketch_rnn") + + +class SketchRNN(th.nn.Module): + class Encoder(th.nn.Module): + def __init__(self, hidden_size=512, dropout=0.9, zdim=128, + num_layers=1): + super(SketchRNN.Encoder, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + + self.lstm = th.nn.LSTM(5, hidden_size, num_layers=self.num_layers, + dropout=dropout, bidirectional=True, + batch_first=True) + + # bidirectional model -> *2 + self.mu_predictor = th.nn.Linear(2*hidden_size, zdim) + self.sigma_predictor = th.nn.Linear(2*hidden_size, zdim) + + def forward(self, sequences, hidden_and_cell=None): + bs = sequences.shape[0] + if hidden_and_cell is None: + hidden = th.zeros(self.num_layers*2, bs, self.hidden_size).to( + sequences.device) + cell = th.zeros(self.num_layers*2, bs, self.hidden_size).to( + sequences.device) + hidden_and_cell = (hidden, cell) + + out, hidden_and_cell = self.lstm(sequences, hidden_and_cell) + hidden = hidden_and_cell[0] + + # Concat the forward/backward states + fc_input = th.cat([hidden[0], hidden[1]], 1) + + # VAE params + mu = self.mu_predictor(fc_input) + log_sigma = self.sigma_predictor(fc_input) + + # Sample a latent vector + sigma = th.exp(log_sigma/2.0) + z0 = th.randn(self.zdim, device=mu.device) + z = mu + sigma*z0 + + # KL divergence needs mu/sigma + return z, mu, log_sigma + + class Decoder(th.nn.Module): + """ + The decoder outputs a sequence where each time step models (dx, dy) as + a mixture of `num_gaussians` 2D Gaussians and the state triplet is a + categorical distribution. + + The model outputs at each time step: + - 5 parameters for each Gaussian: mu_x, mu_y, sigma_x, sigma_y, + rho_xy + - 1 logit for each Gaussian (the mixture weight) + - 3 logits for the state triplet probabilities + """ + def __init__(self, hidden_size=512, dropout=0.9, zdim=128, + num_layers=1, num_gaussians=20): + super(SketchRNN.Decoder, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + self.num_gaussians = num_gaussians + + # Maps the latent vector to an initial cell/hidden vector + self.hidden_cell_predictor = th.nn.Linear(zdim, 2*hidden_size) + + self.lstm = th.nn.LSTM( + 5 + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + self.parameters_predictor = th.nn.Linear( + hidden_size, num_gaussians + 5*num_gaussians + 3) + + def forward(self, inputs, z, hidden_and_cell=None): + # Every step in the sequence takes the latent vector as input so we + # replicate it here + expanded_z = z.unsqueeze(1).repeat(1, inputs.shape[1], 1) + inputs = th.cat([inputs, expanded_z], 2) + + bs, steps = inputs.shape[:2] + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size] + hidden = hidden.unsqueeze(0).contiguous() + cell = hidden_and_cell[:, self.hidden_size:] + cell = cell.unsqueeze(0).contiguous() + hidden_and_cell = (hidden, cell) + + outputs, hidden_and_cell = self.lstm(inputs, hidden_and_cell) + hidden, cell = hidden_and_cell + + # if self.training: + # At train time we want parameters for each time step + outputs = outputs.reshape(bs*steps, self.hidden_size) + params = self.parameters_predictor(outputs).view(bs, steps, -1) + + pen_logits = params[..., -3:] + gaussian_params = params[..., :-3] + mixture_logits = gaussian_params[..., :self.num_gaussians] + gaussian_params = gaussian_params[..., self.num_gaussians:].view( + bs, steps, self.num_gaussians, -1) + + return pen_logits, mixture_logits, gaussian_params, hidden_and_cell + + def __init__(self, zdim=128, num_gaussians=20, encoder_dim=256, + decoder_dim=512): + super(SketchRNN, self).__init__() + self.encoder = SketchRNN.Encoder(zdim=zdim, hidden_size=encoder_dim) + self.decoder = SketchRNN.Decoder(zdim=zdim, hidden_size=decoder_dim, + num_gaussians=num_gaussians) + + def forward(self, sequences): + # Encode the sequences as latent vectors + # We skip the first time step since it is the same for all sequences: + # (0, 0, 1, 0, 0) + z, mu, log_sigma = self.encoder(sequences[:, 1:]) + + # Decode the latent vector into a model sequence + # Do not process the last time step (it is an end-of-sequence token) + pen_logits, mixture_logits, gaussian_params, hidden_and_cell = \ + self.decoder(sequences[:, :-1], z) + + return { + "pen_logits": pen_logits, + "mixture_logits": mixture_logits, + "gaussian_params": gaussian_params, + "z": z, + "mu": mu, + "log_sigma": log_sigma, + "hidden_and_cell": hidden_and_cell, + } + + def sample(self, sequences, temperature=1.0): + # Compute a latent vector conditionned based on a real sequence + z, _, _ = self.encoder(sequences[:, 1:]) + + start_of_seq = sequences[:, :1] + + max_steps = sequences.shape[1] - 1 # last step is an end-of-seq token + + output_sequences = th.zeros_like(sequences) + output_sequences[:, 0] = start_of_seq.squeeze(1) + + current_input = start_of_seq + hidden_and_cell = None + for step in range(max_steps): + pen_logits, mixture_logits, gaussian_params, hidden_and_cell = \ + self.decoder(current_input, z, hidden_and_cell=hidden_and_cell) + + # Pen and displacement state for the next step + next_state = th.zeros_like(current_input) + + # Adjust temperature to control randomness + mixture_logits = mixture_logits*temperature + pen_logits = pen_logits*temperature + + # Select one of 3 pen states + pen_distrib = \ + th.distributions.categorical.Categorical(logits=pen_logits) + pen_state = pen_distrib.sample() + + # One-hot encoding of the state + next_state[:, :, 2:].scatter_(2, pen_state.unsqueeze(-1), + th.ones_like(next_state[:, :, 2:])) + + # Select one of the Gaussians from the mixture + mixture_distrib = \ + th.distributions.categorical.Categorical(logits=mixture_logits) + mixture_idx = mixture_distrib.sample() + + # select the Gaussian parameter + mixture_idx = mixture_idx.unsqueeze(-1).unsqueeze(-1) + mixture_idx = mixture_idx.repeat(1, 1, 1, 5) + params = th.gather(gaussian_params, 2, mixture_idx).squeeze(2) + + # Sample a Gaussian from the corresponding Gaussian + mu = params[..., :2] + sigma_x = params[..., 2].exp() + sigma_y = params[..., 3].exp() + rho_xy = th.tanh(params[..., 4]) + cov = th.zeros(params.shape[0], params.shape[1], 2, 2, + device=params.device) + cov[..., 0, 0] = sigma_x.pow(2)*temperature + cov[..., 1, 1] = sigma_x.pow(2)*temperature + cov[..., 1, 0] = sigma_x*sigma_y*rho_xy*temperature + point_distrib = \ + th.distributions.multivariate_normal.MultivariateNormal( + mu, scale_tril=cov) + point = point_distrib.sample() + next_state[:, :, :2] = point + + # Commit step to output + output_sequences[:, step + 1] = next_state.squeeze(1) + + # Prepare next recurrent step + current_input = next_state + + return output_sequences + + +class SketchRNNCallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if not is_val: + # No need to render training data + return None + + with th.no_grad(): + # only display the first n drawings + n = 8 + batch = batch[:n] + + out_im = rendering.stroke2diffvg(step_data["sample"][:n]) + im = rendering.stroke2diffvg(batch) + im = th.cat([im, out_im], 2) + + return im + + def caption(self, batch, step_data, is_val=False): + if is_val: + return "top: truth, bottom: sample" + else: + return "top: truth, bottom: sample" + + +class Interface(ttools.ModelInterface): + def __init__(self, model, lr=1e-3, lr_decay=0.9999, + kl_weight=0.5, kl_min_weight=0.01, kl_decay=0.99995, + device="cpu", grad_clip=1.0, sampling_temperature=0.4): + super(Interface, self).__init__() + self.grad_clip = grad_clip + self.sampling_temperature = sampling_temperature + + self.model = model + self.device = device + self.model.to(self.device) + self.enc_opt = th.optim.Adam(self.model.encoder.parameters(), lr=lr) + self.dec_opt = th.optim.Adam(self.model.decoder.parameters(), lr=lr) + + self.kl_weight = kl_weight + self.kl_min_weight = kl_min_weight + self.kl_decay = kl_decay + self.kl_loss = losses.KLDivergence() + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.enc_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.dec_opt, lr_decay), + ] + + self.reconstruction_loss = losses.GaussianMixtureReconstructionLoss() + + def optimizers(self): + return [self.enc_opt, self.dec_opt] + + def training_step(self, batch): + batch = batch.to(self.device) + out = self.model(batch) + + kl_loss = self.kl_loss( + out["mu"], out["log_sigma"]) + + # The target to predict is the next sequence step + targets = batch[:, 1:].to(self.device) + + # Scale the KL divergence weight + try: + state = self.enc_opt.state_dict()["param_groups"][0]["params"][0] + optim_step = self.enc_opt.state_dict()["state"][state]["step"] + except KeyError: + optim_step = 0 # no step taken yet + kl_scaling = 1.0 - (1.0 - + self.kl_min_weight)*(self.kl_decay**optim_step) + kl_weight = self.kl_weight * kl_scaling + + reconstruction_loss = self.reconstruction_loss( + out["pen_logits"], out["mixture_logits"], + out["gaussian_params"], targets) + loss = kl_loss*self.kl_weight + reconstruction_loss + + self.enc_opt.zero_grad() + self.dec_opt.zero_grad() + loss.backward() + + # clip gradients + enc_nrm = th.nn.utils.clip_grad_norm_( + self.model.encoder.parameters(), self.grad_clip) + dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.decoder.parameters(), self.grad_clip) + + if enc_nrm > self.grad_clip: + LOG.debug("Clipped encoder gradient (%.5f) to %.2f", + enc_nrm, self.grad_clip) + + if dec_nrm > self.grad_clip: + LOG.debug("Clipped decoder gradient (%.5f) to %.2f", + dec_nrm, self.grad_clip) + + self.enc_opt.step() + self.dec_opt.step() + + return { + "loss": loss.item(), + "kl_loss": kl_loss.item(), + "kl_weight": kl_weight, + "recons_loss": reconstruction_loss.item(), + "lr": self.enc_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + self.model.eval() + with th.no_grad(): + sample = self.model.sample( + batch.to(self.device), temperature=self.sampling_temperature) + running_data["sample"] = sample + self.model.train() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + dataset = data.QuickDrawDataset(args.dataset) + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=4, shuffle=True, + pin_memory=False) + + val_dataset = [s for idx, s in enumerate(dataset) if idx < 8] + val_dataloader = DataLoader( + val_dataset, batch_size=8, num_workers=4, shuffle=False, + pin_memory=False) + + model_params = { + "zdim": args.zdim, + "num_gaussians": args.num_gaussians, + "encoder_dim": args.encoder_dim, + "decoder_dim": args.decoder_dim, + } + model = SketchRNN(**model_params) + model.train() + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(model, lr=args.lr, lr_decay=args.lr_decay, + kl_decay=args.kl_decay, kl_weight=args.kl_weight, + sampling_temperature=args.sampling_temperature, + device=device) + + chkpt = OUTPUT_BASELINE + env_name = "sketch_rnn" + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, + optimizers=interface.optimizers(), + schedulers=interface.schedulers) + extras, meta = checkpointer.load_latest() + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + if meta is not None and meta != model_params: + LOG.info("Checkpoint's metaparams differ " + "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss", "kl_loss", "recons_loss"] + training_debug = ["lr", "kl_weight"] + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=losses, val_keys=None, env=env_name, port=args.port)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + trainer.add_callback(SketchRNNCallback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", default="cat.npz") + + # Training params + parser.add_argument("--bs", type=int, default=100) + parser.add_argument("--num_epochs", type=int, default=10000) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--lr_decay", type=float, default=0.9999) + parser.add_argument("--kl_weight", type=float, default=0.5) + parser.add_argument("--kl_decay", type=float, default=0.99995) + + # Model configuration + parser.add_argument("--zdim", type=int, default=128) + parser.add_argument("--num_gaussians", type=int, default=20) + parser.add_argument("--encoder_dim", type=int, default=256) + parser.add_argument("--decoder_dim", type=int, default=512) + + parser.add_argument("--sampling_temperature", type=float, default=0.4, + help="controls sampling randomness. " + "0.0: deterministic, 1.0: unchanged") + + # Viz params + parser.add_argument("--freq", type=int, default=100) + parser.add_argument("--port", type=int, default=5000) + + args = parser.parse_args() + + pydiffvg.set_use_gpu(th.cuda.is_available()) + + train(args) diff --git a/diffvg/apps/generative_models/sketch_vae.py b/diffvg/apps/generative_models/sketch_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..797c3e522a7d21cf72e911af51824d40242e6735 --- /dev/null +++ b/diffvg/apps/generative_models/sketch_vae.py @@ -0,0 +1,524 @@ +#!/bin/env python +"""Train a Sketch-VAE.""" +import argparse +from enum import Enum +import os +import wget +import time + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces +from ttools.modules import networks + +import rendering +import losses +import modules +import data + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results") + + +class SketchVAE(th.nn.Module): + class ImageEncoder(th.nn.Module): + def __init__(self, image_size=64, width=64, zdim=128): + super(SketchVAE.ImageEncoder, self).__init__() + self.zdim = zdim + + self.net = th.nn.Sequential( + th.nn.Conv2d(4, width, 5, padding=2), + th.nn.InstanceNorm2d(width), + th.nn.ReLU(inplace=True), + # 64x64 + + th.nn.Conv2d(width, width, 5, padding=2), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, 2*width, 5, stride=1, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 8x8 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 4x4 + + modules.Flatten(), + th.nn.Linear(4*4*2*width, 2*zdim) + ) + + def forward(self, images): + features = self.net(images) + + # VAE params + mu = features[:, :self.zdim] + log_sigma = features[:, self.zdim:] + + # Sample a latent vector + sigma = th.exp(log_sigma/2.0) + z0 = th.randn(self.zdim, device=mu.device) + z = mu + sigma*z0 + + # KL divergence needs mu/sigma + return z, mu, log_sigma + + class ImageDecoder(th.nn.Module): + """""" + def __init__(self, zdim=128, image_size=64, width=64): + super(SketchVAE.ImageDecoder, self).__init__() + self.zdim = zdim + self.width = width + + self.embedding = th.nn.Linear(zdim, 4*4*2*width) + + self.net = th.nn.Sequential( + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 8x8 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.Conv2d(2*width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.ConvTranspose2d(width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, 4, 5, padding=2, stride=1), + ) + + def forward(self, z): + bs = z.shape[0] + im = self.embedding(z).view(bs, 2*self.width, 4, 4) + out = self.net(im) + return out + + class SketchDecoder(th.nn.Module): + """ + The decoder outputs a sequence where each time step models (dx, dy, + opacity). + """ + def __init__(self, sequence_length, hidden_size=512, dropout=0.9, + zdim=128, num_layers=3): + super(SketchVAE.SketchDecoder, self).__init__() + self.sequence_length = sequence_length + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + + # Maps the latent vector to an initial cell/hidden vector + self.hidden_cell_predictor = th.nn.Linear(zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + self.dxdy_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2), + th.nn.Tanh(), + ) + self.opacity_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid(), + ) + + def forward(self, z, hidden_and_cell=None): + # Every step in the sequence takes the latent vector as input so we + # replicate it here + bs = z.shape[0] + steps = self.sequence_length - 1 # no need to predict the start of sequence + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor( + th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + # hidden = hidden.unsqueeze(1).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + # cell = cell.unsqueeze(1).contiguous() + hidden_and_cell = (hidden, cell) + + outputs, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + dxdy = self.dxdy_predictor( + outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1) + + opacity = self.opacity_predictor( + outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1) + + strokes = th.cat([dxdy, opacity], -1) + + return strokes + + def __init__(self, sequence_length, zdim=128, image_size=64): + super(SketchVAE, self).__init__() + self.im_encoder = SketchVAE.ImageEncoder( + zdim=zdim, image_size=image_size) + self.im_decoder = SketchVAE.ImageDecoder( + zdim=zdim, image_size=image_size) + self.sketch_decoder = SketchVAE.SketchDecoder( + sequence_length, zdim=zdim) + + def forward(self, images): + # Encode the images as latent vectors + z, mu, log_sigma = self.im_encoder(images) + decoded_im = self.im_decoder(z) + decoded_sketch = self.sketch_decoder(z) + + return { + "decoded_im": decoded_im, + "decoded_sketch": decoded_sketch, + "z": z, + "mu": mu, + "log_sigma": log_sigma, + } + + +class SketchVAECallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if is_val: + return None + + # only display the first n drawings + n = 8 + gt = step_data["gt_image"][:n].detach() + vae_im = step_data["vae_image"][:n].detach() + sketch_im = step_data["sketch_image"][:n].detach() + + rendering = th.cat([gt, vae_im, sketch_im], 2) + rendering = th.clamp(rendering, 0, 1) + alpha = rendering[:, 3:4] + rendering = rendering[:, :3] * alpha + + return rendering + + def caption(self, batch, step_data, is_val=False): + if is_val: + return "" + else: + return "top: truth, middle: vae sample, output: rnn-output" + + + + +class Interface(ttools.ModelInterface): + def __init__(self, model, lr=1e-4, lr_decay=0.9999, + kl_weight=0.5, kl_min_weight=0.01, kl_decay=0.99995, + raster_resolution=64, absolute_coords=False, + device="cpu", grad_clip=1.0): + super(Interface, self).__init__() + + self.grad_clip = grad_clip + self.raster_resolution = raster_resolution + self.absolute_coords = absolute_coords + + self.model = model + self.device = device + self.model.to(self.device) + self.im_enc_opt = th.optim.Adam( + self.model.im_encoder.parameters(), lr=lr) + self.im_dec_opt = th.optim.Adam( + self.model.im_decoder.parameters(), lr=lr) + self.sketch_dec_opt = th.optim.Adam( + self.model.sketch_decoder.parameters(), lr=lr) + + self.kl_weight = kl_weight + self.kl_min_weight = kl_min_weight + self.kl_decay = kl_decay + self.kl_loss = losses.KLDivergence() + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.im_enc_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.im_dec_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.sketch_dec_opt, lr_decay), + ] + + # include loss on alpha + self.im_loss = losses.MultiscaleMSELoss(channels=4).to(self.device) + + def optimizers(self): + return [self.im_enc_opt, self.im_dec_opt, self.sketch_dec_opt] + + def kl_scaling(self): + # Scale the KL divergence weight + try: + state = self.im_enc_opt.state_dict()["param_groups"][0]["params"][0] + optim_step = self.im_enc_opt.state_dict()["state"][state]["step"] + except KeyError: + optim_step = 0 # no step taken yet + kl_scaling = 1.0 - (1.0 - + self.kl_min_weight)*(self.kl_decay**optim_step) + return kl_scaling + + def training_step(self, batch): + gt_strokes, gt_im = batch + gt_strokes = gt_strokes.to(self.device) + gt_im = gt_im.to(self.device) + + out = self.model(gt_im) + + kl_loss = self.kl_loss( + out["mu"], out["log_sigma"]) + kl_weight = self.kl_weight * self.kl_scaling() + + # add start of sequence + sos = gt_strokes[:, :1] + sketch = th.cat([sos, out["decoded_sketch"]], 1) + + vae_im = out["decoded_im"] + + # start = time.time() + sketch_im = rendering.opacityStroke2diffvg( + sketch, canvas_size=self.raster_resolution, debug=False, + force_cpu=True, relative=not self.absolute_coords) + # elapsed = (time.time() - start)*1000 + # print("out rendering took %.2fms" % elapsed) + + vae_im_loss = self.im_loss(vae_im, gt_im) + sketch_im_loss = self.im_loss(sketch_im, gt_im) + + # vae_im_loss = th.nn.functional.mse_loss(vae_im, gt_im) + # sketch_im_loss = th.nn.functional.mse_loss(sketch_im, gt_im) + + loss = vae_im_loss + kl_loss*kl_weight + sketch_im_loss + + self.im_enc_opt.zero_grad() + self.im_dec_opt.zero_grad() + self.sketch_dec_opt.zero_grad() + loss.backward() + + # clip gradients + enc_nrm = th.nn.utils.clip_grad_norm_( + self.model.im_encoder.parameters(), self.grad_clip) + dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.im_decoder.parameters(), self.grad_clip) + sketch_dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.sketch_decoder.parameters(), self.grad_clip) + + if enc_nrm > self.grad_clip: + LOG.debug("Clipped encoder gradient (%.5f) to %.2f", + enc_nrm, self.grad_clip) + + if dec_nrm > self.grad_clip: + LOG.debug("Clipped decoder gradient (%.5f) to %.2f", + dec_nrm, self.grad_clip) + + if sketch_dec_nrm > self.grad_clip: + LOG.debug("Clipped sketch decoder gradient (%.5f) to %.2f", + sketch_dec_nrm, self.grad_clip) + + self.im_enc_opt.step() + self.im_dec_opt.step() + self.sketch_dec_opt.step() + + return { + "vae_image": vae_im, + "sketch_image": sketch_im, + "gt_image": gt_im, + "loss": loss.item(), + "vae_im_loss": vae_im_loss.item(), + "sketch_im_loss": sketch_im_loss.item(), + "kl_loss": kl_loss.item(), + "kl_weight": kl_weight, + "lr": self.im_enc_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + # self.model.eval() + # with th.no_grad(): + # # sample = self.model.sample( + # # batch.to(self.device), temperature=self.sampling_temperature) + # # running_data["sample"] = sample + # self.model.train() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + dataset = data.FixedLengthQuickDrawDataset( + args.dataset, max_seq_length=args.sequence_length, + canvas_size=args.raster_resolution) + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=args.workers, shuffle=True) + + # val_dataset = [s for idx, s in enumerate(dataset) if idx < 8] + # val_dataloader = DataLoader( + # val_dataset, batch_size=8, num_workers=4, shuffle=False) + + val_dataloader = None + + model_params = { + "zdim": args.zdim, + "sequence_length": args.sequence_length, + "image_size": args.raster_resolution, + # "encoder_dim": args.encoder_dim, + # "decoder_dim": args.decoder_dim, + } + model = SketchVAE(**model_params) + model.train() + + LOG.info("Model parameters:\n%s", model_params) + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(model, raster_resolution=args.raster_resolution, + lr=args.lr, lr_decay=args.lr_decay, + kl_decay=args.kl_decay, kl_weight=args.kl_weight, + absolute_coords=args.absolute_coordinates, + device=device) + + env_name = "sketch_vae" + if args.custom_name is not None: + env_name += "_" + args.custom_name + + if args.absolute_coordinates: + env_name += "_abs_coords" + + chkpt = os.path.join(OUTPUT, env_name) + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, + optimizers=interface.optimizers(), + schedulers=interface.schedulers) + extras, meta = checkpointer.load_latest() + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + if meta is not None and meta != model_params: + LOG.info("Checkpoint's metaparams differ " + "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss", "kl_loss", "vae_im_loss", "sketch_im_loss"] + training_debug = ["lr", "kl_weight"] + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=losses, val_keys=None, env=env_name, port=args.port)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + trainer.add_callback(SketchVAECallback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", default="cat.npz") + + parser.add_argument("--absolute_coordinates", action="store_true", + default=False) + + parser.add_argument("--custom_name") + + # Training params + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--workers", type=int, default=0) + parser.add_argument("--num_epochs", type=int, default=10000) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--lr_decay", type=float, default=0.9999) + parser.add_argument("--kl_weight", type=float, default=0.5) + parser.add_argument("--kl_decay", type=float, default=0.99995) + + # Model configuration + parser.add_argument("--zdim", type=int, default=128) + parser.add_argument("--sequence_length", type=int, default=50) + parser.add_argument("--raster_resolution", type=int, default=64) + # parser.add_argument("--encoder_dim", type=int, default=256) + # parser.add_argument("--decoder_dim", type=int, default=512) + + # Viz params + parser.add_argument("--freq", type=int, default=10) + parser.add_argument("--port", type=int, default=5000) + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + train(args) diff --git a/diffvg/apps/generative_models/train_gan.py b/diffvg/apps/generative_models/train_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..9b5eaa1a0374050b21ed150a1bb4060ec74f9957 --- /dev/null +++ b/diffvg/apps/generative_models/train_gan.py @@ -0,0 +1,489 @@ +#!/bin/env python +"""Train a GAN. + +Usage: + +* Train a MNIST model: + +`python train_gan.py` + +* Train a Quickdraw model: + +`python train_gan.py --task quickdraw` + +""" +import argparse +import os + +import numpy as np +import torch as th +from torch.utils.data import DataLoader + +import ttools +import ttools.interfaces + +import losses +import data +import models + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results") + + +class Callback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if is_val: + return + + gen = step_data["gen_image"][:16].detach() + ref = step_data["gt_image"][:16].detach() + + # tensor to visualize, concatenate images + vizdata = th.cat([ref, gen], 2) + + vector = step_data["vector_image"] + if vector is not None: + vector = vector[:16].detach() + vizdata = th.cat([vizdata, vector], 2) + + vizdata = (vizdata + 1.0 ) * 0.5 + viz = th.clamp(vizdata, 0, 1) + return viz + + def caption(self, batch, step_data, is_val=False): + if step_data["vector_image"] is not None: + s = "top: real, middle: raster, bottom: vector" + else: + s = "top: real, bottom: fake" + return s + + +class Interface(ttools.ModelInterface): + def __init__(self, generator, vect_generator, + discriminator, vect_discriminator, + lr=1e-4, lr_decay=0.9999, + gradient_penalty=10, + wgan_gp=False, + raster_resolution=32, device="cpu", grad_clip=1.0): + super(Interface, self).__init__() + + self.wgan_gp = wgan_gp + self.w_gradient_penalty = gradient_penalty + + self.n_critic = 1 + if self.wgan_gp: + self.n_critic = 5 + + self.grad_clip = grad_clip + self.raster_resolution = raster_resolution + + self.gen = generator + self.vect_gen = vect_generator + self.discrim = discriminator + self.vect_discrim = vect_discriminator + + self.device = device + self.gen.to(self.device) + self.discrim.to(self.device) + + beta1 = 0.5 + beta2 = 0.9 + + self.gen_opt = th.optim.Adam( + self.gen.parameters(), lr=lr, betas=(beta1, beta2)) + self.discrim_opt = th.optim.Adam( + self.discrim.parameters(), lr=lr, betas=(beta1, beta2)) + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.gen_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.discrim_opt, lr_decay), + ] + + self.optimizers = [self.gen_opt, self.discrim_opt] + + if self.vect_gen is not None: + assert self.vect_discrim is not None + + self.vect_gen.to(self.device) + self.vect_discrim.to(self.device) + + self.vect_gen_opt = th.optim.Adam( + self.vect_gen.parameters(), lr=lr, betas=(beta1, beta2)) + self.vect_discrim_opt = th.optim.Adam( + self.vect_discrim.parameters(), lr=lr, betas=(beta1, beta2)) + + self.schedulers += [ + th.optim.lr_scheduler.ExponentialLR(self.vect_gen_opt, + lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.vect_discrim_opt, + lr_decay), + ] + + self.optimizers += [self.vect_gen_opt, self.vect_discrim_opt] + + # include loss on alpha + self.im_loss = losses.MultiscaleMSELoss(channels=4).to(self.device) + + self.iter = 0 + + self.cross_entropy = th.nn.BCEWithLogitsLoss() + self.mse = th.nn.MSELoss() + + def _gradient_penalty(self, discrim, fake, real): + bs = real.size(0) + epsilon = th.rand(bs, 1, 1, 1, device=real.device) + epsilon = epsilon.expand_as(real) + + interpolation = epsilon * real.data + (1 - epsilon) * fake.data + interpolation = th.autograd.Variable(interpolation, requires_grad=True) + + interpolation_logits = discrim(interpolation) + grad_outputs = th.ones(interpolation_logits.size(), device=real.device) + + gradients = th.autograd.grad(outputs=interpolation_logits, + inputs=interpolation, + grad_outputs=grad_outputs, + create_graph=True, retain_graph=True)[0] + + gradients = gradients.view(bs, -1) + gradients_norm = th.sqrt(th.sum(gradients ** 2, dim=1) + 1e-12) + + # [Tanh-Tung 2019] https://openreview.net/pdf?id=ByxPYjC5KQ + return self.w_gradient_penalty * ((gradients_norm - 0) ** 2).mean() + + # return self.w_gradient_penalty * ((gradients_norm - 1) ** 2).mean() + + def _discriminator_step(self, discrim, opt, fake, real): + """Try to classify fake as 0 and real as 1.""" + + opt.zero_grad() + + # no backprop to gen + fake = fake.detach() + + fake_pred = discrim(fake) + real_pred = discrim(real) + + if self.wgan_gp: + gradient_penalty = self._gradient_penalty(discrim, fake, real) + loss_d = fake_pred.mean() - real_pred.mean() + gradient_penalty + gradient_penalty = gradient_penalty.item() + else: + fake_loss = self.cross_entropy(fake_pred, th.zeros_like(fake_pred)) + real_loss = self.cross_entropy(real_pred, th.ones_like(real_pred)) + # fake_loss = self.mse(fake_pred, th.zeros_like(fake_pred)) + # real_loss = self.mse(real_pred, th.ones_like(real_pred)) + loss_d = 0.5*(fake_loss + real_loss) + gradient_penalty = None + + loss_d.backward() + nrm = th.nn.utils.clip_grad_norm_( + discrim.parameters(), self.grad_clip) + if nrm > self.grad_clip: + LOG.debug("Clipped discriminator gradient (%.5f) to %.2f", + nrm, self.grad_clip) + + opt.step() + + return loss_d.item(), gradient_penalty + + def _generator_step(self, gen, discrim, opt, fake): + """Try to classify fake as 1.""" + + opt.zero_grad() + + fake_pred = discrim(fake) + + if self.wgan_gp: + loss_g = -fake_pred.mean() + else: + loss_g = self.cross_entropy(fake_pred, th.ones_like(fake_pred)) + # loss_g = self.mse(fake_pred, th.ones_like(fake_pred)) + + loss_g.backward() + + # clip gradients + nrm = th.nn.utils.clip_grad_norm_( + gen.parameters(), self.grad_clip) + if nrm > self.grad_clip: + LOG.debug("Clipped generator gradient (%.5f) to %.2f", + nrm, self.grad_clip) + + opt.step() + + return loss_g.item() + + def training_step(self, batch): + im = batch + im = im.to(self.device) + + z = self.gen.sample_z(im.shape[0], device=self.device) + + generated = self.gen(z) + + vect_generated = None + if self.vect_gen is not None: + vect_generated = self.vect_gen(z) + + loss_g = None + loss_d = None + loss_g_vect = None + loss_d_vect = None + + gp = None + gp_vect = None + + if self.iter < self.n_critic: # Discriminator update + self.iter += 1 + + loss_d, gp = self._discriminator_step( + self.discrim, self.discrim_opt, generated, im) + + if vect_generated is not None: + loss_d_vect, gp_vect = self._discriminator_step( + self.vect_discrim, self.vect_discrim_opt, vect_generated, im) + + else: # Generator update + self.iter = 0 + + loss_g = self._generator_step( + self.gen, self.discrim, self.gen_opt, generated) + + if vect_generated is not None: + loss_g_vect = self._generator_step( + self.vect_gen, self.vect_discrim, self.vect_gen_opt, vect_generated) + + return { + "loss_g": loss_g, + "loss_d": loss_d, + "loss_g_vect": loss_g_vect, + "loss_d_vect": loss_d_vect, + "gp": gp, + "gp_vect": gp_vect, + "gt_image": im, + "gen_image": generated, + "vector_image": vect_generated, + "lr": self.gen_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + self.model.eval() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + color_output = False + if args.task == "mnist": + dataset = data.MNISTDataset(args.raster_resolution, train=True) + elif args.task == "quickdraw": + dataset = data.QuickDrawImageDataset( + args.raster_resolution, train=True) + else: + raise NotImplementedError() + + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=args.workers, shuffle=True) + + val_dataloader = None + + model_params = { + "zdim": args.zdim, + "num_strokes": args.num_strokes, + "imsize": args.raster_resolution, + "stroke_width": args.stroke_width, + "color_output": color_output, + } + gen = models.Generator(**model_params) + gen.train() + + discrim = models.Discriminator(color_output=color_output) + discrim.train() + + if args.raster_only: + vect_gen = None + vect_discrim = None + else: + if args.generator == "fc": + vect_gen = models.VectorGenerator(**model_params) + elif args.generator == "bezier_fc": + vect_gen = models.BezierVectorGenerator(**model_params) + elif args.generator in ["rnn"]: + vect_gen = models.RNNVectorGenerator(**model_params) + elif args.generator in ["chain_rnn"]: + vect_gen = models.ChainRNNVectorGenerator(**model_params) + else: + raise NotImplementedError() + vect_gen.train() + + vect_discrim = models.Discriminator(color_output=color_output) + vect_discrim.train() + + LOG.info("Model parameters:\n%s", model_params) + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(gen, vect_gen, discrim, vect_discrim, + raster_resolution=args.raster_resolution, lr=args.lr, + wgan_gp=args.wgan_gp, + lr_decay=args.lr_decay, device=device) + + env_name = args.task + "_gan" + + if args.raster_only: + env_name += "_raster" + else: + env_name += "_vector" + + env_name += "_" + args.generator + + if args.wgan_gp: + env_name += "_wgan" + + chkpt = os.path.join(OUTPUT, env_name) + + meta = { + "model_params": model_params, + "task": args.task, + "generator": args.generator, + } + checkpointer = ttools.Checkpointer( + chkpt, gen, meta=meta, + optimizers=interface.optimizers, + schedulers=interface.schedulers, + prefix="g_") + checkpointer_d = ttools.Checkpointer( + chkpt, discrim, + prefix="d_") + + # Resume from checkpoint, if any + extras, _ = checkpointer.load_latest() + checkpointer_d.load_latest() + + if not args.raster_only: + checkpointer_vect = ttools.Checkpointer( + chkpt, vect_gen, meta=meta, + optimizers=interface.optimizers, + schedulers=interface.schedulers, + prefix="vect_g_") + checkpointer_d_vect = ttools.Checkpointer( + chkpt, vect_discrim, + prefix="vect_d_") + extras, _ = checkpointer_vect.load_latest() + checkpointer_d_vect.load_latest() + + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + # if meta is not None and meta["model_parameters"] != model_params: + # LOG.info("Checkpoint's metaparams differ " + # "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss_g", "loss_d", "loss_g_vect", "loss_d_vect", "gp", + "gp_vect"] + training_debug = ["lr"] + + trainer.add_callback(Callback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.MultiPlotCallback( + keys=losses, val_keys=None, env=env_name, port=args.port, + server=args.server, base_url=args.base_url, + win="losses", frequency=args.freq)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + server=args.server, base_url=args.base_url, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_d, max_files=2, interval=600, max_epochs=10)) + + if not args.raster_only: + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_vect, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_d_vect, max_files=2, interval=600, max_epochs=10)) + + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--task", + default="mnist", + choices=["mnist", "quickdraw"]) + parser.add_argument("--generator", + default="bezier_fc", + choices=["bezier_fc", "fc", "rnn", "chain_rnn"], + help="model to use as generator") + + parser.add_argument("--raster_only", action="store_true", default=False, + help="if true only train the raster baseline") + + parser.add_argument("--standard_gan", dest="wgan_gp", action="store_false", + default=True, + help="if true, use regular GAN instead of WGAN") + + # Training params + parser.add_argument("--bs", type=int, default=4, help="batch size") + parser.add_argument("--workers", type=int, default=4, + help="number of dataloader threads") + parser.add_argument("--num_epochs", type=int, default=200, + help="number of epochs to train for") + parser.add_argument("--lr", type=float, default=1e-4, + help="learning rate") + parser.add_argument("--lr_decay", type=float, default=0.9999, + help="exponential learning rate decay rate") + + # Model configuration + parser.add_argument("--zdim", type=int, default=32, + help="latent space dimension") + parser.add_argument("--stroke_width", type=float, nargs=2, + default=(0.5, 1.5), + help="min and max stroke width") + parser.add_argument("--num_strokes", type=int, default=16, + help="number of strokes to generate") + parser.add_argument("--raster_resolution", type=int, default=32, + help="raster canvas resolution on each side") + + # Viz params + parser.add_argument("--freq", type=int, default=10, + help="visualization frequency") + parser.add_argument("--port", type=int, default=8097, + help="visdom port") + parser.add_argument("--server", default=None, + help="visdom server if not local.") + parser.add_argument("--base_url", default="", help="visdom entrypoint URL") + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + ttools.set_logger(False) + + train(args) diff --git a/diffvg/apps/geometry.py b/diffvg/apps/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..59e00db9eab29532984e2d7baea2533a80b417ae --- /dev/null +++ b/diffvg/apps/geometry.py @@ -0,0 +1,226 @@ +import math +import torch + +class GeometryLoss: + def __init__(self, pathObj, xyalign=True, parallel=True, smooth_node=True): + self.pathObj=pathObj + self.pathId=pathObj.id + self.get_segments(pathObj) + if xyalign: + self.make_hor_ver_constraints(pathObj) + + self.xyalign=xyalign + self.parallel=parallel + self.smooth_node=smooth_node + + if parallel: + self.make_parallel_constraints(pathObj) + + if smooth_node: + self.make_smoothness_constraints(pathObj) + + def make_smoothness_constraints(self,pathObj): + self.smooth_nodes=[] + for idx, node in enumerate(self.iterate_nodes()): + sm, t0, t1=self.node_smoothness(node,pathObj) + if abs(sm)<1e-2: + self.smooth_nodes.append((node,((t0.norm()/self.segment_approx_length(node[0],pathObj)).item(),(t1.norm()/self.segment_approx_length(node[1],pathObj)).item()))) + #print("Node {} is smooth (smoothness {})".format(idx,sm)) + else: + #print("Node {} is not smooth (smoothness {})".format(idx, sm)) + pass + + def node_smoothness(self,node,pathObj): + t0=self.tangent_out(node[0],pathObj) + t1=self.tangent_in(node[1],pathObj) + t1rot=torch.stack((-t1[1],t1[0])) + smoothness=t0.dot(t1rot)/(t0.norm()*t1.norm()) + + return smoothness, t0, t1 + + def segment_approx_length(self,segment,pathObj): + if segment[0]==0: + #line + idxs=self.segList[segment[0]][segment[1]] + #should have a pair of indices now + length=(pathObj.points[idxs[1],:]-pathObj.points[idxs[0],:]).norm() + return length + elif segment[0]==1: + #quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + length = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]).norm()+(pathObj.points[idxs[2],:] - pathObj.points[idxs[1],:]).norm() + return length + elif segment[0]==2: + #cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + length = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]).norm()+(pathObj.points[idxs[2],:] - pathObj.points[idxs[1],:]).norm()+(pathObj.points[idxs[3],:] - pathObj.points[idxs[2],:]).norm() + return length + + def tangent_in(self, segment,pathObj): + if segment[0]==0: + #line + idxs=self.segList[segment[0]][segment[1]] + #should have a pair of indices now + tangent=(pathObj.points[idxs[1],:]-pathObj.points[idxs[0],:])/2 + return tangent + elif segment[0]==1: + #quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]) + return tangent + elif segment[0]==2: + #cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]) + return tangent + + assert(False) + + def tangent_out(self, segment, pathObj): + if segment[0] == 0: + # line + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[0],:] - pathObj.points[idxs[1],:]) / 2 + return tangent + elif segment[0] == 1: + # quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[2],:]) + return tangent + elif segment[0] == 2: + # cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[2],:] - pathObj.points[idxs[3],:]) + return tangent + + assert (False) + + def get_segments(self, pathObj): + self.segments=[] + self.lines = [] + self.quadrics=[] + self.cubics=[] + self.segList =(self.lines,self.quadrics,self.cubics) + idx=0 + total_points=pathObj.points.shape[0] + for ncp in pathObj.num_control_points.numpy(): + if ncp==0: + self.segments.append((0,len(self.lines))) + self.lines.append((idx, (idx + 1) % total_points)) + idx+=1 + elif ncp==1: + self.segments.append((1, len(self.quadrics))) + self.quadrics.append((idx, (idx + 1), (idx+2) % total_points)) + idx+=ncp+1 + elif ncp==2: + self.segments.append((2, len(self.cubics))) + self.cubics.append((idx, (idx + 1), (idx+2), (idx + 3) % total_points)) + idx += ncp + 1 + + def iterate_nodes(self): + for prev, next in zip([self.segments[-1]]+self.segments[:-1],self.segments): + yield (prev, next) + + def make_hor_ver_constraints(self, pathObj): + self.horizontals=[] + self.verticals=[] + for idx, line in enumerate(self.lines): + startPt=pathObj.points[line[0],:] + endPt=pathObj.points[line[1],:] + + dif=endPt-startPt + + if abs(dif[0])<1e-6: + #is horizontal + self.horizontals.append(idx) + + if abs(dif[1])<1e-6: + #is vertical + self.verticals.append(idx) + + def make_parallel_constraints(self,pathObj): + slopes=[] + for lidx, line in enumerate(self.lines): + startPt = pathObj.points[line[0], :] + endPt = pathObj.points[line[1], :] + + dif = endPt - startPt + + slope=math.atan2(dif[1],dif[0]) + if slope<0: + slope+=math.pi + + minidx=-1 + for idx, s in enumerate(slopes): + if abs(s[0]-slope)<1e-3: + minidx=idx + break + + if minidx>=0: + slopes[minidx][1].append(lidx) + else: + slopes.append((slope,[lidx])) + + self.parallel_groups=[sgroup[1] for sgroup in slopes if len(sgroup[1])>1 and (not self.xyalign or (sgroup[0]>1e-3 and abs(sgroup[0]-(math.pi/2))>1e-3))] + + def make_line_diff(self,pathObj,lidx): + line = self.lines[lidx] + startPt = pathObj.points[line[0], :] + endPt = pathObj.points[line[1], :] + + dif = endPt - startPt + return dif + + def calc_hor_ver_loss(self,loss,pathObj): + for lidx in self.horizontals: + dif = self.make_line_diff(pathObj,lidx) + loss+=dif[0].pow(2) + + for lidx in self.verticals: + dif = self.make_line_diff(pathObj,lidx) + loss += dif[1].pow(2) + + def calc_parallel_loss(self,loss,pathObj): + for group in self.parallel_groups: + diffs=[self.make_line_diff(pathObj,lidx) for lidx in group] + difmat=torch.stack(diffs,1) + lengths=difmat.pow(2).sum(dim=0).sqrt() + difmat=difmat/lengths + difmat=torch.cat((difmat,torch.zeros(1,difmat.shape[1]))) + rotmat=difmat[:,list(range(1,difmat.shape[1]))+[0]] + cross=difmat.cross(rotmat) + ploss=cross.pow(2).sum()*lengths.sum()*10 + loss+=ploss + + def calc_smoothness_loss(self,loss,pathObj): + for node, tlengths in self.smooth_nodes: + sl,t0,t1=self.node_smoothness(node,pathObj) + #add smoothness loss + loss+=sl.pow(2)*t0.norm().sqrt()*t1.norm().sqrt() + tl=((t0.norm()/self.segment_approx_length(node[0],pathObj))-tlengths[0]).pow(2)+((t1.norm()/self.segment_approx_length(node[1],pathObj))-tlengths[1]).pow(2) + loss+=tl*10 + + def compute(self, pathObj): + if pathObj.id != self.pathId: + raise ValueError("Path ID {} does not match construction-time ID {}".format(pathObj.id,self.pathId)) + + loss=torch.tensor(0.) + if self.xyalign: + self.calc_hor_ver_loss(loss,pathObj) + + if self.parallel: + self.calc_parallel_loss(loss, pathObj) + + if self.smooth_node: + self.calc_smoothness_loss(loss,pathObj) + + #print(loss.item()) + + return loss diff --git a/diffvg/apps/image_compare.py b/diffvg/apps/image_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..fc55aa2d1b4b9d022df84f5115d236d87d38c176 --- /dev/null +++ b/diffvg/apps/image_compare.py @@ -0,0 +1,45 @@ +import argparse +import skimage.io +import numpy as np +from matplotlib import cm +import math +from skimage.metrics import structural_similarity as ssim + +def normalize(x, min_, max_): + return (x - min_) / (max_ - min_) + +def main(args): + img1 = skimage.img_as_float(skimage.io.imread(args.img1)).astype(np.float32) + img2 = skimage.img_as_float(skimage.io.imread(args.img2)).astype(np.float32) + ref = skimage.img_as_float(skimage.io.imread(args.ref)).astype(np.float32) + img1 = img1[:, :, :3] + img2 = img2[:, :, :3] + ref = ref[:, :, :3] + + diff1 = np.sum(np.abs(img1 - ref), axis = 2) + diff2 = np.sum(np.abs(img2 - ref), axis = 2) + min_ = min(np.min(diff1), np.min(diff2)) + max_ = max(np.max(diff1), np.max(diff2)) * 0.5 + diff1 = cm.viridis(normalize(diff1, min_, max_)) + diff2 = cm.viridis(normalize(diff2, min_, max_)) + + # MSE + print('MSE img1:', np.mean(np.power(img1 - ref, 2.0))) + print('MSE img2:', np.mean(np.power(img2 - ref, 2.0))) + # PSNR + print('PSNR img1:', 20 * math.log10(1.0 / math.sqrt(np.mean(np.power(img1 - ref, 2.0))))) + print('PSNR img2:', 20 * math.log10(1.0 / math.sqrt(np.mean(np.power(img2 - ref, 2.0))))) + # SSIM + print('SSIM img1:', ssim(img1, ref, multichannel=True)) + print('SSIM img2:', ssim(img2, ref, multichannel=True)) + + skimage.io.imsave('diff1.png', (diff1 * 255).astype(np.uint8)) + skimage.io.imsave('diff2.png', (diff2 * 255).astype(np.uint8)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("img1", help="img1") + parser.add_argument("img2", help="img2") + parser.add_argument("ref", help="ref") + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/imgs/baboon.png b/diffvg/apps/imgs/baboon.png new file mode 100644 index 0000000000000000000000000000000000000000..2b1499a5eb878c8b50149ce6ceb4853c7d5abe47 Binary files /dev/null and b/diffvg/apps/imgs/baboon.png differ diff --git a/diffvg/apps/imgs/baboon.svg b/diffvg/apps/imgs/baboon.svg new file mode 100644 index 0000000000000000000000000000000000000000..a50bdc9635f3a4ebfb9e5813e3b4e10ab2402c64 --- /dev/null +++ b/diffvg/apps/imgs/baboon.svg @@ -0,0 +1,8694 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/boston.svg b/diffvg/apps/imgs/boston.svg new file mode 100644 index 0000000000000000000000000000000000000000..006718a2d9527315e9efd52f746e037202a8d012 --- /dev/null +++ b/diffvg/apps/imgs/boston.svg @@ -0,0 +1,1936 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/circle.svg b/diffvg/apps/imgs/circle.svg new file mode 100644 index 0000000000000000000000000000000000000000..dde6e72e245978f4d2dbca95266dd4aa0ae5c2a7 --- /dev/null +++ b/diffvg/apps/imgs/circle.svg @@ -0,0 +1,12 @@ + + + + diff --git a/diffvg/apps/imgs/contour.svg b/diffvg/apps/imgs/contour.svg new file mode 100644 index 0000000000000000000000000000000000000000..d173fd92ca447c663ecf704921fd925d9392d472 --- /dev/null +++ b/diffvg/apps/imgs/contour.svg @@ -0,0 +1,53256 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/eleven_below_single.svg b/diffvg/apps/imgs/eleven_below_single.svg new file mode 100644 index 0000000000000000000000000000000000000000..f28c3c55625ad2a01e32dc992e82d5363a11cd1c --- /dev/null +++ b/diffvg/apps/imgs/eleven_below_single.svg @@ -0,0 +1,247 @@ + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/diffvg/apps/imgs/fallingwater.jpg b/diffvg/apps/imgs/fallingwater.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9abc664d72ba02c9376fb2fc597d5ecfc31b9bd5 Binary files /dev/null and b/diffvg/apps/imgs/fallingwater.jpg differ diff --git a/diffvg/apps/imgs/fallingwater.svg b/diffvg/apps/imgs/fallingwater.svg new file mode 100644 index 0000000000000000000000000000000000000000..6276b0f5530eb4c8bdeab76154b37abefb8aff45 --- /dev/null +++ b/diffvg/apps/imgs/fallingwater.svg @@ -0,0 +1,3457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/flower.jpg b/diffvg/apps/imgs/flower.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6801f625a27a880b667a6feb7e266fcb0f52537f Binary files /dev/null and b/diffvg/apps/imgs/flower.jpg differ diff --git a/diffvg/apps/imgs/flower.svg b/diffvg/apps/imgs/flower.svg new file mode 100644 index 0000000000000000000000000000000000000000..0eb9d977e684ee335682b2b790305fe26928fb06 --- /dev/null +++ b/diffvg/apps/imgs/flower.svg @@ -0,0 +1,3771 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/hawaii.svg b/diffvg/apps/imgs/hawaii.svg new file mode 100644 index 0000000000000000000000000000000000000000..c8674484e235522d330076b59e7b2b93c24024e8 --- /dev/null +++ b/diffvg/apps/imgs/hawaii.svg @@ -0,0 +1,1151 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/hokusai.png b/diffvg/apps/imgs/hokusai.png new file mode 100644 index 0000000000000000000000000000000000000000..7556f7108ec04cae80e6d2ca84be17617d5257ad Binary files /dev/null and b/diffvg/apps/imgs/hokusai.png differ diff --git a/diffvg/apps/imgs/johnny_automatic_flower_pot.svg b/diffvg/apps/imgs/johnny_automatic_flower_pot.svg new file mode 100644 index 0000000000000000000000000000000000000000..19a9350287dad58fed5af317706aedd461bb4aa7 --- /dev/null +++ b/diffvg/apps/imgs/johnny_automatic_flower_pot.svg @@ -0,0 +1,376 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +image/svg+xmlOpenclipartflower pot2006-10-07T14:41:15a drawing of a flower pothttp://openclipart.org/detail/421/flower-pot-by-johnny_automaticjohnny_automaticbowlclip artclipartcontainerflowerflowerpothouseholdpotpublic domainvasevintage diff --git a/diffvg/apps/imgs/kitty.jpg b/diffvg/apps/imgs/kitty.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df4fa086f8f3c650749f0822e2c12a4d8dc7c1ce Binary files /dev/null and b/diffvg/apps/imgs/kitty.jpg differ diff --git a/diffvg/apps/imgs/kitty.svg b/diffvg/apps/imgs/kitty.svg new file mode 100644 index 0000000000000000000000000000000000000000..f2813a4aee2f15ced2db806942a51c14558806ba --- /dev/null +++ b/diffvg/apps/imgs/kitty.svg @@ -0,0 +1,3676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/license.txt b/diffvg/apps/imgs/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..582502d309e3da6a3a125180641a0a1296b2a63a --- /dev/null +++ b/diffvg/apps/imgs/license.txt @@ -0,0 +1,12 @@ +baboon.png is from USC-SIPI dataset http://sipi.usc.edu/database/database.php?volume=misc +boston.svg is from Ganacim et al. http://w3.impa.br/~diego/projects/GanEtAl14/ +contour.svg is from Ganacim et al. http://w3.impa.br/~diego/projects/GanEtAl14/ +eleven_below_single.svg is from NVprSDK https://developer.nvidia.com/gpu-accelerated-path-rendering +fallingwater.jpg is from wikipedia user Daderot https://en.wikipedia.org/wiki/Fallingwater#/media/File:Fallingwater_-_DSC05639.JPG +flower.jpg is from wikipedia user Eric Guinther https://en.wikipedia.org/wiki/Flower#/media/File:Crateva_religiosa.jpg +hawaii.svg is from Ganacim et al. http://w3.impa.br/~diego/projects/GanEtAl14/ +hokusai.svg is from OpenClipart @ freesvg.org https://freesvg.org/great-wave-off-kanagawa +mcseem2.svg is from NVprSDK https://developer.nvidia.com/gpu-accelerated-path-rendering +peppers.tiff is from USC-SIPI dataset http://sipi.usc.edu/database/database.php?volume=misc +reschart.svg is from Ganacim et al. http://w3.impa.br/~diego/projects/GanEtAl14/ +tiger.svg is from Ganacim et al. http://w3.impa.br/~diego/projects/GanEtAl14/ diff --git a/diffvg/apps/imgs/mcseem2.svg b/diffvg/apps/imgs/mcseem2.svg new file mode 100644 index 0000000000000000000000000000000000000000..14b091e5d5a82e7e9f4f8b6234611fc54d9bb7d4 --- /dev/null +++ b/diffvg/apps/imgs/mcseem2.svg @@ -0,0 +1,2191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/note_small.svg b/diffvg/apps/imgs/note_small.svg new file mode 100644 index 0000000000000000000000000000000000000000..56a19026e93346bd90236c6ee34876f532c64196 --- /dev/null +++ b/diffvg/apps/imgs/note_small.svg @@ -0,0 +1,25 @@ + +image/svg+xml \ No newline at end of file diff --git a/diffvg/apps/imgs/peppers.svg b/diffvg/apps/imgs/peppers.svg new file mode 100644 index 0000000000000000000000000000000000000000..5db63c92e4edd28ce2eb6ad8598f2343c7ec6f9c --- /dev/null +++ b/diffvg/apps/imgs/peppers.svg @@ -0,0 +1 @@ +peppers \ No newline at end of file diff --git a/diffvg/apps/imgs/peppers.tiff b/diffvg/apps/imgs/peppers.tiff new file mode 100644 index 0000000000000000000000000000000000000000..8c956f80ef1c698bab332d4f08e495e7fc321d3e Binary files /dev/null and b/diffvg/apps/imgs/peppers.tiff differ diff --git a/diffvg/apps/imgs/reschart.svg b/diffvg/apps/imgs/reschart.svg new file mode 100644 index 0000000000000000000000000000000000000000..2c9fa9ce70b1b2687d92ed1ddfc51cb6dc3f7cf1 --- /dev/null +++ b/diffvg/apps/imgs/reschart.svg @@ -0,0 +1,761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/seamcarving/cat.svg b/diffvg/apps/imgs/seamcarving/cat.svg new file mode 100644 index 0000000000000000000000000000000000000000..efd2d3d32bd9fd59d888f79a2b8130ef9619739c --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/cat.svg @@ -0,0 +1,355 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/seamcarving/hokusai.svg b/diffvg/apps/imgs/seamcarving/hokusai.svg new file mode 100644 index 0000000000000000000000000000000000000000..7f800c2af25e8352651c23f7957b8e58867ad9eb --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/hokusai.svg @@ -0,0 +1,9840 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/seamcarving/ice_cream.svg b/diffvg/apps/imgs/seamcarving/ice_cream.svg new file mode 100644 index 0000000000000000000000000000000000000000..18b4399fcc9c58b0f6f62265604bb7596c716649 --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/ice_cream.svg @@ -0,0 +1,353 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/seamcarving/license.txt b/diffvg/apps/imgs/seamcarving/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d41acb75fa0a35067b2e2992483737fd81c4f0c --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/license.txt @@ -0,0 +1,7 @@ +https://www.vecteezy.com/vector-art/192818-vector-landscape-illustration +https://www.vecteezy.com/vector-art/217221-vector-nature-landscape-illustration +https://www.vecteezy.com/vector-art/538989-a-panorama-view-od-urban-city +https://www.vecteezy.com/vector-art/419761-mushroom-house-in-the-dark-forest +https://www.vecteezy.com/vector-art/376425-brown-cat-looking-at-little-mouse +https://freesvg.org/great-wave-off-kanagawa +https://www.vecteezy.com/vector-art/298600-friendly-cat-and-dog-on-white-background diff --git a/diffvg/apps/imgs/seamcarving/seaside2.svg b/diffvg/apps/imgs/seamcarving/seaside2.svg new file mode 100644 index 0000000000000000000000000000000000000000..7f4dca4fa5c1584ce54edcad79a1dd49f44e25c2 --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/seaside2.svg @@ -0,0 +1,224 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/seamcarving/sunset2.svg b/diffvg/apps/imgs/seamcarving/sunset2.svg new file mode 100644 index 0000000000000000000000000000000000000000..eae3a0af6f7d677e21492b1bf6d660eecfc06ab2 --- /dev/null +++ b/diffvg/apps/imgs/seamcarving/sunset2.svg @@ -0,0 +1,899 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/imgs/shared_edge.svg b/diffvg/apps/imgs/shared_edge.svg new file mode 100644 index 0000000000000000000000000000000000000000..fb9afd28334d77797b1b6e123d87e8db2a0e8a29 --- /dev/null +++ b/diffvg/apps/imgs/shared_edge.svg @@ -0,0 +1,13 @@ + + + + + + + + diff --git a/diffvg/apps/imgs/tiger.svg b/diffvg/apps/imgs/tiger.svg new file mode 100644 index 0000000000000000000000000000000000000000..366de416db56e9e5db4e0301257708eac0c1095f --- /dev/null +++ b/diffvg/apps/imgs/tiger.svg @@ -0,0 +1,317 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/optimize_pixel_filter.py b/diffvg/apps/optimize_pixel_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..f5380fca1b35035cb103a4b36fa7a9337e419641 --- /dev/null +++ b/diffvg/apps/optimize_pixel_filter.py @@ -0,0 +1,115 @@ +import diffvg +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = torch.tensor(8.0))) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/target.png', gamma=2.2) +target = img.clone() + +# Change the pixel filter radius +radius = torch.tensor(1.0, requires_grad = True) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius], lr=1.0) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', radius) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/optimize_pixel_filter/iter_%d.png", "-vb", "20M", + "results/optimize_pixel_filter/out.mp4"]) diff --git a/diffvg/apps/painterly_rendering.py b/diffvg/apps/painterly_rendering.py new file mode 100644 index 0000000000000000000000000000000000000000..2b61f36c97085e6d1fc3c5ee03202e3bc67b2ae8 --- /dev/null +++ b/diffvg/apps/painterly_rendering.py @@ -0,0 +1,223 @@ +""" +Scream: python painterly_rendering.py imgs/scream.jpg --num_paths 2048 --max_width 4.0 +Fallingwater: python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 +Fallingwater: python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 --use_lpips_loss +Baboon: python painterly_rendering.py imgs/baboon.png --num_paths 1024 --max_width 4.0 --num_iter 250 +Baboon Lpips: python painterly_rendering.py imgs/baboon.png --num_paths 1024 --max_width 4.0 --num_iter 500 --use_lpips_loss +Kitty: python painterly_rendering.py imgs/kitty.jpg --num_paths 1024 --use_blob +""" +import pydiffvg +import torch +import skimage +import skimage.io +import random +import ttools.modules +import argparse +import math + +pydiffvg.set_print_timing(True) + +gamma = 1.0 + +def main(args): + # Use GPU if available + pydiffvg.set_use_gpu(torch.cuda.is_available()) + + perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + #target = torch.from_numpy(skimage.io.imread('imgs/lena.png')).to(torch.float32) / 255.0 + target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0 + target = target.pow(gamma) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + #target = torch.nn.functional.interpolate(target, size = [256, 256], mode = 'area') + canvas_width, canvas_height = target.shape[3], target.shape[2] + num_paths = args.num_paths + max_width = args.max_width + + random.seed(1234) + torch.manual_seed(1234) + + shapes = [] + shape_groups = [] + if args.use_blob: + for i in range(num_paths): + num_segments = random.randint(3, 5) + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= canvas_width + points[:, 1] *= canvas_height + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor(1.0), + is_closed = True) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = torch.tensor([random.random(), + random.random(), + random.random(), + random.random()])) + shape_groups.append(path_group) + else: + for i in range(num_paths): + num_segments = random.randint(1, 3) + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= canvas_width + points[:, 1] *= canvas_height + #points = torch.rand(3 * num_segments + 1, 2) * min(canvas_width, canvas_height) + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor(1.0), + is_closed = False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = None, + stroke_color = torch.tensor([random.random(), + random.random(), + random.random(), + random.random()])) + shape_groups.append(path_group) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/init.png', gamma=gamma) + + points_vars = [] + stroke_width_vars = [] + color_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + if not args.use_blob: + for path in shapes: + path.stroke_width.requires_grad = True + stroke_width_vars.append(path.stroke_width) + if args.use_blob: + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars.append(group.fill_color) + else: + for group in shape_groups: + group.stroke_color.requires_grad = True + color_vars.append(group.stroke_color) + + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + if len(stroke_width_vars) > 0: + width_optim = torch.optim.Adam(stroke_width_vars, lr=0.1) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + # Adam iterations. + for t in range(args.num_iter): + print('iteration:', t) + points_optim.zero_grad() + if len(stroke_width_vars) > 0: + width_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + t, # seed + None, + *scene_args) + # Compose img with white background + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4]) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/iter_{}.png'.format(t), gamma=gamma) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + if args.use_lpips_loss: + loss = perception_loss(img, target) + (img.mean() - target.mean()).pow(2) + else: + loss = (img - target).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + if len(stroke_width_vars) > 0: + width_optim.step() + color_optim.step() + if len(stroke_width_vars) > 0: + for path in shapes: + path.stroke_width.data.clamp_(1.0, max_width) + if args.use_blob: + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + else: + for group in shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + + if t % 10 == 0 or t == args.num_iter - 1: + pydiffvg.save_svg('results/painterly_rendering/iter_{}.svg'.format(t), + canvas_width, canvas_height, shapes, shape_groups) + + # Render the final result. + img = render(target.shape[1], # width + target.shape[0], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/final.png'.format(t), gamma=gamma) + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/painterly_rendering/iter_%d.png", "-vb", "20M", + "results/painterly_rendering/out.mp4"]) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("target", help="target image path") + parser.add_argument("--num_paths", type=int, default=512) + parser.add_argument("--max_width", type=float, default=2.0) + parser.add_argument("--use_lpips_loss", dest='use_lpips_loss', action='store_true') + parser.add_argument("--num_iter", type=int, default=500) + parser.add_argument("--use_blob", dest='use_blob', action='store_true') + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/quadratic_distance_approx.py b/diffvg/apps/quadratic_distance_approx.py new file mode 100644 index 0000000000000000000000000000000000000000..10e2a34dff5f8eb76b0f299961577af8d4c4fc2c --- /dev/null +++ b/diffvg/apps/quadratic_distance_approx.py @@ -0,0 +1,76 @@ +import pydiffvg +import torch +import skimage +import numpy as np +import matplotlib.pyplot as plt + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([1]) +points = torch.tensor([[ 50.0, 30.0], # base + [125.0, 400.0], # control point + [170.0, 30.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor([30.0]), + is_closed = False, + use_distance_approx = False) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.5, 0.5, 0.5, 0.5])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img /= 256.0 +cm = plt.get_cmap('viridis') +img = cm(img.squeeze()) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/ref_sdf.png') + +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/ref_color.png') + +shapes[0].use_distance_approx = True +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img /= 256.0 +img = cm(img.squeeze()) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/approx_sdf.png') + +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/approx_color.png') \ No newline at end of file diff --git a/diffvg/apps/refine_svg.py b/diffvg/apps/refine_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..7e324fb8fe5f84a64527f8def52b729b24951ec1 --- /dev/null +++ b/diffvg/apps/refine_svg.py @@ -0,0 +1,115 @@ +import pydiffvg +import argparse +import ttools.modules +import torch +import skimage.io + +gamma = 1.0 + +def main(args): + perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0 + target = target.pow(gamma) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # The output image is in linear RGB space. Do Gamma correction before saving the image. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/init.png', gamma=gamma) + + points_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + color_vars = {} + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars[group.fill_color.data_ptr()] = group.fill_color + color_vars = list(color_vars.values()) + + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + + # Adam iterations. + for t in range(args.num_iter): + print('iteration:', t) + points_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # Compose img with white background + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4]) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/iter_{}.png'.format(t), gamma=gamma) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + if args.use_lpips_loss: + loss = perception_loss(img, target) + else: + loss = (img - target).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + color_optim.step() + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + if t % 10 == 0 or t == args.num_iter - 1: + pydiffvg.save_svg('results/refine_svg/iter_{}.svg'.format(t), + canvas_width, canvas_height, shapes, shape_groups) + + # Render the final result. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/final.png'.format(t), gamma=gamma) + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/refine_svg/iter_%d.png", "-vb", "20M", + "results/refine_svg/out.mp4"]) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg", help="source SVG path") + parser.add_argument("target", help="target image path") + parser.add_argument("--use_lpips_loss", dest='use_lpips_loss', action='store_true') + parser.add_argument("--num_iter", type=int, default=250) + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/render_svg.py b/diffvg/apps/render_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..0aa9273647866d172e8869caac5a12aa04b4551d --- /dev/null +++ b/diffvg/apps/render_svg.py @@ -0,0 +1,41 @@ +""" +Simple utility to render an .svg to a .png +""" +import os +import argparse +import pydiffvg +import torch as th + + +def render(canvas_width, canvas_height, shapes, shape_groups): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def main(args): + pydiffvg.set_device(th.device('cuda:1')) + + # Load SVG + svg = os.path.join(args.svg) + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(svg) + + # Save initial state + ref = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(ref.cpu(), args.out, gamma=2.2) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg", help="source SVG path") + parser.add_argument("out", help="output image path") + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/seam_carving.py b/diffvg/apps/seam_carving.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0176ef963576aa1364179bd35dda8ebc8f4c2f --- /dev/null +++ b/diffvg/apps/seam_carving.py @@ -0,0 +1,284 @@ +"""Retargets an .svg using image-domain seam carving to shrink it.""" +import os +import pydiffvg +import argparse +import torch as th +import scipy.ndimage.filters as filters +import numba +import numpy as np +import skimage.io + + +def energy(im): + """Compute image energy. + + Args: + im(np.ndarray) with shape [h, w, 3]: input image. + + Returns: + (np.ndarray) with shape [h, w]: energy map. + """ + f_dx = np.array([ + [-1, 0, 1 ], + [-2, 0, 2 ], + [-1, 0, 1 ], + ]) + f_dy = f_dx.T + dx = filters.convolve(im.mean(2), f_dx) + dy = filters.convolve(im.mean(2), f_dy) + + return np.abs(dx) + np.abs(dy) + + +@numba.jit(nopython=True) +def min_seam(e): + """Finds the seam with minimal cost in an energy map. + + Args: + e(np.ndarray) with shape [h, w]: energy map. + + Returns: + min_e(np.ndarray) with shape [h, w]: for all (y,x) min_e[y, x] + is the cost of the minimal seam from 0 to y (top to bottom). + The minimal seam can be found by looking at the last row of min_e. + This is computed by dynamic programming. + argmin_e(np.ndarray) with shape [h, w]: for all (y,x) argmin_e[y, x] + contains the x coordinate corresponding to this seam in the + previous row (y-1). We use this for backtracking. + """ + # initialize to local energy + min_e = e.copy() + argmin_e = np.zeros_like(e, dtype=np.int64) + + h, w = e.shape + + # propagate vertically + for y in range(1, h): + for x in range(w): + if x == 0: + idx = np.argmin(e[y-1, x:x+2]) + argmin_e[y, x] = idx + x + mini = e[y-1, x + idx] + elif x == w-1: + idx = np.argmin(e[y-1, x-1:x+1]) + argmin_e[y, x] = idx + x - 1 + mini = e[y-1, x + idx - 1] + else: + idx = np.argmin(e[y-1, x-1:x+2]) + argmin_e[y, x] = idx + x - 1 + mini = e[y-1, x + idx - 1] + + min_e[y, x] = min_e[y, x] + mini + + return min_e, argmin_e + + +def carve_seam(im): + """Carves a vertical seam in an image, reducing it's horizontal size by 1. + + Args: + im(np.ndarray) with shape [h, w, 3]: input image. + + Returns: + (np.ndarray) with shape [h, w-1, 1]: the image with one seam removed. + """ + + e = energy(im) + min_e, argmin_e = min_seam(e) + h, w = im.shape[:2] + + # boolean flags for the pixels to preserve + to_keep = np.ones((h, w), dtype=np.bool) + + # get lowest energy (from last row) + x = np.argmin(min_e[-1]) + print("carving seam", x, "with energy", min_e[-1, x]) + + # backtract to identify the seam + for y in range(h-1, -1, -1): + # remove seam pixel + to_keep[y, x] = False + x = argmin_e[y, x] + + # replicate mask over color channels + to_keep = np.stack(3*[to_keep], axis=2) + new_im = im[to_keep].reshape((h, w-1, 3)) + return new_im + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + img = _render(canvas_width, # width + canvas_height, # height + samples, # num_samples_x + samples, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def vector_rescale(shapes, scale_x=1.00, scale_y=1.00): + new_shapes = [] + for path in shapes: + path.points[..., 0] *= scale_x + path.points[..., 1] *= scale_y + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--svg", default=os.path.join("imgs", "hokusai.svg")) + parser.add_argument("--optim_steps", default=10, type=int) + parser.add_argument("--lr", default=1e-1, type=int) + args = parser.parse_args() + + name = os.path.splitext(os.path.basename(args.svg))[0] + root = os.path.join("results", "seam_carving", name) + svg_root = os.path.join(root, "svg") + os.makedirs(root, exist_ok=True) + os.makedirs(os.path.join(root, "svg"), exist_ok=True) + + pydiffvg.set_use_gpu(False) + # pydiffvg.set_device(th.device('cuda')) + + # Load SVG + print("loading svg %s" % args.svg) + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg) + print("done loading") + + max_size = 512 + scale_factor = max_size / max(canvas_width, canvas_height) + print("rescaling from %dx%d with scale %f" % (canvas_width, canvas_height, scale_factor)) + canvas_width = int(canvas_width*scale_factor) + canvas_height = int(canvas_height*scale_factor) + print("new shape %dx%d" % (canvas_width, canvas_height)) + vector_rescale(shapes, scale_x=scale_factor, scale_y=scale_factor) + + # Shrink image by 33 % + # num_seams_to_remove = 2 + num_seams_to_remove = canvas_width // 3 + new_canvas_width = canvas_width - num_seams_to_remove + scaling = new_canvas_width * 1.0 / canvas_width + + # Naive scaling baseline + print("rendering naive rescaling...") + vector_rescale(shapes, scale_x=scaling) + resized = render(new_canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(resized.cpu(), os.path.join(root, 'uniform_scaling.png'), gamma=2.2) + pydiffvg.save_svg(os.path.join(svg_root, 'uniform_scaling.svg') , canvas_width, + canvas_height, shapes, shape_groups, use_gamma=False) + vector_rescale(shapes, scale_x=1.0/scaling) # bring back original coordinates + print("saved naiving scaling") + + # Save initial state + print("rendering initial state...") + im = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(im.cpu(), os.path.join(root, 'init.png'), gamma=2.2) + pydiffvg.save_svg(os.path.join(svg_root, 'init.svg'), canvas_width, + canvas_height, shapes, shape_groups, use_gamma=False) + print("saved initial state") + + # Optimize + # color_optim = th.optim.Adam(color_vars, lr=0.01) + + retargeted = im[..., :3].cpu().numpy() + previous_width = canvas_width + print("carving seams") + for seam_idx in range(num_seams_to_remove): + print('\nseam', seam_idx+1, 'of', num_seams_to_remove) + + # Remove a seam + retargeted = carve_seam(retargeted) + + current_width = canvas_width - seam_idx - 1 + scale_factor = current_width * 1.0 / previous_width + previous_width = current_width + + padded = np.zeros((canvas_height, canvas_width, 4)) + padded[:, :-seam_idx-1, :3] = retargeted + padded[:, :-seam_idx-1, -1] = 1.0 # alpha + padded = th.from_numpy(padded).to(im.device) + + # Remap points to the smaller canvas and + # collect variables to optimize + points_vars = [] + # width_vars = [] + mini, maxi = canvas_width, 0 + for path in shapes: + path.points.requires_grad = False + x = path.points[..., 0] + y = path.points[..., 1] + # rescale + + x = x * scale_factor + + # clip to canvas + path.points[..., 0] = th.clamp(x, 0, current_width) + path.points[..., 1] = th.clamp(y, 0, canvas_height) + + path.points.requires_grad = True + points_vars.append(path.points) + path.stroke_width.requires_grad = True + # width_vars.append(path.stroke_width) + + mini = min(mini, path.points.min().item()) + maxi = max(maxi, path.points.max().item()) + print("points", mini, maxi, "scale", scale_factor) + + # recreate an optimizer so we don't carry over the previous update + # (momentum)? + geom_optim = th.optim.Adam(points_vars, lr=args.lr) + + for step in range(args.optim_steps): + geom_optim.zero_grad() + + img = render(canvas_width, canvas_height, shapes, shape_groups, + samples=2) + + pydiffvg.imwrite( + img.cpu(), + os.path.join(root, "seam_%03d_iter_%02d.png" % (seam_idx, step)), gamma=2.2) + + # NO alpha + loss = (img - padded)[..., :3].pow(2).mean() + # loss = (img - padded).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + geom_optim.step() + pydiffvg.save_svg(os.path.join(svg_root, "seam%03d.svg" % seam_idx), + canvas_width-seam_idx, canvas_height, shapes, + shape_groups, use_gamma=False) + + for path in shapes: + mini = min(mini, path.points.min().item()) + maxi = max(maxi, path.points.max().item()) + print("points", mini, maxi) + + img = render(canvas_width, canvas_height, shapes, shape_groups) + img = img[:, :-num_seams_to_remove] + + pydiffvg.imwrite(img.cpu(), os.path.join(root, 'final.png'), + gamma=2.2) + pydiffvg.imwrite(retargeted, os.path.join(root, 'ref.png'), + gamma=2.2) + + pydiffvg.save_svg(os.path.join(svg_root, 'final.svg'), + canvas_width-seam_idx, canvas_height, shapes, + shape_groups, use_gamma=False) + + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", os.path.join(root, "seam_%03d_iter_00.png"), "-vb", "20M", + os.path.join(root, "out.mp4")]) + + +if __name__ == "__main__": + main() diff --git a/diffvg/apps/shared_edge_compare.py b/diffvg/apps/shared_edge_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a5aef8746efe7cfe42a0dd3c1af146a8d4893e --- /dev/null +++ b/diffvg/apps/shared_edge_compare.py @@ -0,0 +1,127 @@ +import pydiffvg +import diffvg +from matplotlib import cm +import matplotlib.pyplot as plt +import argparse +import torch + +def normalize(x, min_, max_): + range = max(abs(min_), abs(max_)) + return (x + range) / (2 * range) + +def main(args): + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_file) + + w = int(canvas_width * args.size_scale) + h = int(canvas_height * args.size_scale) + + pfilter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)) + + use_prefiltering = False + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + + num_samples_x = 16 + num_samples_y = 16 + render = pydiffvg.RenderFunction.apply + img = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/finite_difference_comp/img.png', gamma=1.0) + + epsilon = 0.1 + def perturb_scene(axis, epsilon): + shapes[2].points[:, axis] += epsilon + # for s in shapes: + # if isinstance(s, pydiffvg.Circle): + # s.center[axis] += epsilon + # elif isinstance(s, pydiffvg.Ellipse): + # s.center[axis] += epsilon + # elif isinstance(s, pydiffvg.Path): + # s.points[:, axis] += epsilon + # elif isinstance(s, pydiffvg.Polygon): + # s.points[:, axis] += epsilon + # elif isinstance(s, pydiffvg.Rect): + # s.p_min[axis] += epsilon + # s.p_max[axis] += epsilon + # for s in shape_groups: + # if isinstance(s.fill_color, pydiffvg.LinearGradient): + # s.fill_color.begin[axis] += epsilon + # s.fill_color.end[axis] += epsilon + + perturb_scene(0, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + + forward_diff = (img0 - img) / (epsilon) + forward_diff = forward_diff.sum(axis = 2) + x_diff_max = 1.5 + x_diff_min = -1.5 + print(forward_diff.max()) + print(forward_diff.min()) + forward_diff = cm.viridis(normalize(forward_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(forward_diff, 'results/finite_difference_comp/shared_edge_forward_diff.png', gamma=1.0) + + perturb_scene(0, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + backward_diff = (img - img1) / (epsilon) + backward_diff = backward_diff.sum(axis = 2) + print(backward_diff.max()) + print(backward_diff.min()) + backward_diff = cm.viridis(normalize(backward_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(backward_diff, 'results/finite_difference_comp/shared_edge_backward_diff.png', gamma=1.0) + perturb_scene(0, epsilon) + + num_samples_x = 4 + num_samples_y = 4 + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render_grad = pydiffvg.RenderFunction.render_grad + img_grad = render_grad(torch.ones(h, w, 4), + w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + *scene_args) + print(img_grad[:, :, 0].max()) + print(img_grad[:, :, 0].min()) + x_diff = cm.viridis(normalize(img_grad[:, :, 0], x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/ours_x_diff.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg_file", help="source SVG path") + parser.add_argument("--size_scale", type=float, default=1.0) + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/simple_transform_svg.py b/diffvg/apps/simple_transform_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..3faec31f0d6a840f9b66e7c2237e0dcef5c8da07 --- /dev/null +++ b/diffvg/apps/simple_transform_svg.py @@ -0,0 +1,237 @@ +import pydiffvg +import torch +import torchvision +from PIL import Image +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +def inv_exp(a,x,xpow=1): + return pow(a,pow(1.-x,xpow)) + +import math +import numbers +import torch +from torch import nn +from torch.nn import functional as F + +import visdom + +class GaussianSmoothing(nn.Module): + """ + Apply gaussian smoothing on a + 1d, 2d or 3d tensor. Filtering is performed seperately for each channel + in the input using a depthwise convolution. + Arguments: + channels (int, sequence): Number of channels of the input tensors. Output will + have this number of channels as well. + kernel_size (int, sequence): Size of the gaussian kernel. + sigma (float, sequence): Standard deviation of the gaussian kernel. + dim (int, optional): The number of dimensions of the data. + Default value is 2 (spatial). + """ + def __init__(self, channels, kernel_size, sigma, dim=2): + super(GaussianSmoothing, self).__init__() + if isinstance(kernel_size, numbers.Number): + kernel_size = [kernel_size] * dim + if isinstance(sigma, numbers.Number): + sigma = [sigma] * dim + + # The gaussian kernel is the product of the + # gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid( + [ + torch.arange(size, dtype=torch.float32) + for size in kernel_size + ] + ) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \ + torch.exp(-((mgrid - mean) / std) ** 2 / 2) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) + + self.register_buffer('weight', kernel) + self.groups = channels + + if dim == 1: + self.conv = F.conv1d + elif dim == 2: + self.conv = F.conv2d + elif dim == 3: + self.conv = F.conv3d + else: + raise RuntimeError( + 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim) + ) + + def forward(self, input): + """ + Apply gaussian filter to input. + Arguments: + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return self.conv(input, weight=self.weight, groups=self.groups) + +vis=visdom.Visdom(port=8080) + +smoothing = GaussianSmoothing(4, 5, 1) + +settings=pydiffvg.SvgOptimizationSettings() +settings.global_override(["optimize_color"],False) +settings.global_override(["optimize_alpha"],False) +settings.global_override(["gradients","optimize_color"],False) +settings.global_override(["gradients","optimize_alpha"],False) +settings.global_override(["gradients","optimize_stops"],False) +settings.global_override(["gradients","optimize_location"],False) +settings.global_override(["optimizer"],"Adam") +settings.global_override(["paths","optimize_points"],False) +settings.global_override(["transforms","transform_lr"],1e-2) +settings.undefault("linearGradient3152") +settings.retrieve("linearGradient3152")[0]["transforms"]["optimize_transforms"]=False + +#optim=pydiffvg.OptimizableSvg("note_small.svg",settings,verbose=True) +optim=pydiffvg.OptimizableSvg("heart_green.svg",settings,verbose=True) + +#img=torchvision.transforms.ToTensor()(Image.open("note_transformed.png")).permute(1,2,0) +img=torchvision.transforms.ToTensor()(Image.open("heart_green_90.png")).permute(1,2,0) + +name="heart_green_90" + +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/target.png') +target = img.clone().detach().requires_grad_(False) + +img=optim.render() +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/init.png') + +def smooth(input, kernel): + input=torch.nn.functional.pad(input.permute(2,0,1).unsqueeze(0), (2, 2, 2, 2), mode='reflect') + output=kernel(input) + return output + +def printimg(optim): + img=optim.render() + comp = img.clone().detach() + bg = torch.tensor([[[1., 1., 1.]]]) + comprgb = comp[:, :, 0:3] + compalpha = comp[:, :, 3].unsqueeze(2) + comp = comprgb * compalpha \ + + bg * (1 - compalpha) + return comp + +def comp_loss_and_grad(img, tgt, it, sz): + dif=img-tgt + + loss=dif.pow(2).mean() + + dif=dif.detach() + + cdif=dif.clone().abs() + cdif[:,:,3]=1. + + resdif=torch.nn.functional.interpolate(cdif.permute(2,0,1).unsqueeze(0),sz,mode='bilinear').squeeze().permute(1,2,0).abs() + pydiffvg.imwrite(resdif[:,:,0:4], 'results/simple_transform_svg/dif_{:04}.png'.format(it)) + + dif=dif.numpy() + padded=np.pad(dif,[(1,1),(1,1),(0,0)],mode='edge') + #print(padded[:-2,:,:].shape) + grad_x=(padded[:-2,:,:]-padded[2:,:,:])[:,1:-1,:] + grad_y=(padded[:,:-2,:]-padded[:,2:,:])[1:-1,:,:] + + resshape=dif.shape + resshape=(resshape[0],resshape[1],2) + res=np.zeros(resshape) + + for x in range(resshape[0]): + for y in range(resshape[1]): + A=np.concatenate((grad_x[x,y,:][:,np.newaxis],grad_y[x,y,:][:,np.newaxis]),axis=1) + b=-dif[x,y,:] + v=np.linalg.lstsq(np.dot(A.T,A),np.dot(A.T,b)) + res[x,y,:]=v[0] + + return loss, res + +import colorsys +def print_gradimg(gradimg,it,shape=None): + out=torch.zeros((gradimg.shape[0],gradimg.shape[1],3),requires_grad=False,dtype=torch.float32) + for x in range(gradimg.shape[0]): + for y in range(gradimg.shape[1]): + h=math.atan2(gradimg[x,y,1],gradimg[x,y,0]) + s=math.tanh(np.linalg.norm(gradimg[x,y,:])) + v=1. + vec=(gradimg[x,y,:].clip(min=-1,max=1)/2)+.5 + #out[x,y,:]=torch.tensor(colorsys.hsv_to_rgb(h,s,v),dtype=torch.float32) + out[x,y,:]=torch.tensor([vec[0],vec[1],0]) + + if shape is not None: + out=torch.nn.functional.interpolate(out.permute(2,0,1).unsqueeze(0),shape,mode='bilinear').squeeze().permute(1,2,0) + pydiffvg.imwrite(out.cpu(), 'results/simple_transform_svg/grad_{:04}.png'.format(it)) + +# Run 150 Adam iterations. +for t in range(1000): + print('iteration:', t) + optim.zero_grad() + with open('results/simple_transform_svg/viter_{:04}.svg'.format(t),"w") as f: + f.write(optim.write_xml()) + scale=inv_exp(1/16,math.pow(t/1000,1),0.5) + #print(scale) + #img = optim.render(seed=t+1,scale=scale) + img = optim.render(seed=t + 1, scale=None) + vis.line(torch.tensor([img.shape[0]]), X=torch.tensor([t]), win=name + " size", update="append", + opts={"title": name + " size"}) + #print(img.shape) + #img = optim.render(seed=t + 1) + + ptgt=target.permute(2,0,1).unsqueeze(0) + sz=img.shape[0:2] + restgt=torch.nn.functional.interpolate(ptgt,size=sz,mode='bilinear').squeeze().permute(1,2,0) + + # Compute the loss function. Here it is L2. + #loss = (smooth(img,smoothing) - smooth(restgt,smoothing)).pow(2).mean() + #loss = (img - restgt).pow(2).mean() + #loss=(img-target).pow(2).mean() + loss,gradimg=comp_loss_and_grad(img, restgt,t,target.shape[0:2]) + print_gradimg(gradimg,t,target.shape[0:2]) + print('loss:', loss.item()) + vis.line(loss.unsqueeze(0), X=torch.tensor([t]), win=name+" loss", update="append", + opts={"title": name + " loss"}) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + optim.step() + + # Save the intermediate render. + comp=printimg(optim) + pydiffvg.imwrite(comp.cpu(), 'results/simple_transform_svg/iter_{:04}.png'.format(t)) + + +# Render the final result. + +img = optim.render() +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/final.png') +with open('results/simple_transform_svg/final.svg', "w") as f: + f.write(optim.write_xml()) + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/simple_transform_svg/iter_%04d.png", "-vb", "20M", + "results/simple_transform_svg/out.mp4"]) + +call(["ffmpeg", "-framerate", "24", "-i", + "results/simple_transform_svg/grad_%04d.png", "-vb", "20M", + "results/simple_transform_svg/out_grad.mp4"]) + diff --git a/diffvg/apps/single_circle.py b/diffvg/apps/single_circle.py new file mode 100644 index 0000000000000000000000000000000000000000..6a00ef891836ad5cc65860627e1b0a3e26e4b443 --- /dev/null +++ b/diffvg/apps/single_circle.py @@ -0,0 +1,107 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_circle/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_circle/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle/iter_%d.png", "-vb", "20M", + "results/single_circle/out.mp4"]) diff --git a/diffvg/apps/single_circle_outline.py b/diffvg/apps/single_circle_outline.py new file mode 100644 index 0000000000000000000000000000000000000000..78952df51404448677456eec23f417c6e76c4ccb --- /dev/null +++ b/diffvg/apps/single_circle_outline.py @@ -0,0 +1,118 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0]), + stroke_width = torch.tensor(5.0)) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +fill_color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle.stroke_width = stroke_width_n * 100 +circle_group.fill_color = fill_color +circle_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, fill_color, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle.stroke_width = stroke_width_n * 100 + circle_group.fill_color = fill_color + circle_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('fill_color.grad:', fill_color.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('stroke_width:', circle.stroke_width) + print('fill_color:', circle_group.fill_color) + print('stroke_color:', circle_group.stroke_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_outline/iter_%d.png", "-vb", "20M", + "results/single_circle_outline/out.mp4"]) diff --git a/diffvg/apps/single_circle_sdf.py b/diffvg/apps/single_circle_sdf.py new file mode 100644 index 0000000000000000000000000000000000000000..60c10cd0c0a95f39dc281385c0c012258f676046 --- /dev/null +++ b/diffvg/apps/single_circle_sdf.py @@ -0,0 +1,114 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/target.png') +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/init.png') + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + img = img / 256 # Normalize SDF to [0, 1] + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_sdf/iter_%d.png", "-vb", "20M", + "results/single_circle_sdf/out.mp4"]) \ No newline at end of file diff --git a/diffvg/apps/single_circle_tf.py b/diffvg/apps/single_circle_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..d77285df20a49b2afd5de6eecd344bbadbb1f911 --- /dev/null +++ b/diffvg/apps/single_circle_tf.py @@ -0,0 +1,94 @@ +import pydiffvg_tensorflow as pydiffvg +import tensorflow as tf +import skimage +import numpy as np + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = tf.constant(40.0), + center = tf.constant([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = tf.constant([0], dtype = tf.int32), + fill_color = tf.constant([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.render +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(0), # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img, 'results/single_circle_tf/target.png', gamma=2.2) +target = tf.identity(img) + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = tf.Variable(20.0 / 256.0) +center_n = tf.Variable([108.0 / 256.0, 138.0 / 256.0]) +color = tf.Variable([0.3, 0.2, 0.8, 1.0]) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(1), # seed + *scene_args) +pydiffvg.imwrite(img, 'results/single_circle_tf/init.png', gamma=2.2) + +optimizer = tf.compat.v1.train.AdamOptimizer(1e-2) + +for t in range(100): + print('iteration:', t) + + with tf.GradientTape() as tape: + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + # Important to use a different seed every iteration, otherwise the result + # would be biased. + scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(t+1), # seed, + *scene_args) + loss_value = tf.reduce_sum(tf.square(img - target)) + + print(f"loss_value: {loss_value}") + pydiffvg.imwrite(img, 'results/single_circle_tf/iter_{}.png'.format(t)) + + grads = tape.gradient(loss_value, [radius_n, center_n, color]) + print(grads) + optimizer.apply_gradients(zip(grads, [radius_n, center_n, color])) + +# Render the final result. +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(101), # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_tf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_tf/iter_%d.png", "-vb", "20M", + "results/single_circle_tf/out.mp4"]) diff --git a/diffvg/apps/single_curve.py b/diffvg/apps/single_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..cd64f47705dd1811bb17fd686995f442546dd339 --- /dev/null +++ b/diffvg/apps/single_curve.py @@ -0,0 +1,121 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_curve/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('color:', path_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve/iter_%d.png", "-vb", "20M", + "results/single_curve/out.mp4"]) diff --git a/diffvg/apps/single_curve_outline.py b/diffvg/apps/single_curve_outline.py new file mode 100644 index 0000000000000000000000000000000000000000..bd7fa5bd2191fe09b58100e1aa5f83bc1977e15f --- /dev/null +++ b/diffvg/apps/single_curve_outline.py @@ -0,0 +1,136 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +fill_color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.fill_color = fill_color +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, fill_color, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.fill_color = fill_color + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('fill_color.grad:', fill_color.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('fill_color:', path_group.fill_color) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.fill_color = fill_color +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve_outline/iter_%d.png", "-vb", "20M", + "results/single_curve_outline/out.mp4"]) diff --git a/diffvg/apps/single_curve_sdf.py b/diffvg/apps/single_curve_sdf.py new file mode 100644 index 0000000000000000000000000000000000000000..4eccb0c41cfe6d3735ec91bbefb0a7fbeddafec0 --- /dev/null +++ b/diffvg/apps/single_curve_sdf.py @@ -0,0 +1,129 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img /= 256.0 +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/target.png', gamma=1.0) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +img /= 256.0 +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/init.png', gamma=1.0) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + img /= 256.0 + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/iter_{}.png'.format(t), gamma=1.0) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('color:', path_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +img /= 256.0 +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/final.png', gamma=1.0) + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve_sdf/iter_%d.png", "-vb", "20M", + "results/single_curve_sdf/out.mp4"]) diff --git a/diffvg/apps/single_curve_sdf_trans.py b/diffvg/apps/single_curve_sdf_trans.py new file mode 100644 index 0000000000000000000000000000000000000000..148b67a5cde31be7b042192b6ed192c980e1c8ec --- /dev/null +++ b/diffvg/apps/single_curve_sdf_trans.py @@ -0,0 +1,178 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +# points = torch.tensor([[120.0, 30.0], # base +# [150.0, 60.0], # control point +# [ 90.0, 198.0], # control point +# [ 60.0, 218.0], # base +# [ 90.0, 180.0], # control point +# [200.0, 65.0], # control point +# [210.0, 98.0], # base +# [220.0, 70.0], # control point +# [130.0, 55.0]]) # control point +points = torch.tensor([[ 20.0, 128.0], # base + [ 50.0, 128.0], # control point + [170.0, 128.0], # control point + [200.0, 128.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = torch.tensor(10.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + +path.points[:, 1] += 1e-3 +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img2 = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + +# diff = img2 - img +# diff = diff[:, :, 0] / 1e-3 +# import matplotlib.pyplot as plt +# plt.imshow(diff) +# plt.show() + +# # The output image is in linear RGB space. Do Gamma correction before saving the image. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/target.png', gamma=1.0) +# target = img.clone() + +render_grad = pydiffvg.RenderFunction.render_grad +img = render_grad(torch.ones(256, 256, 1), # grad_img + 256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img = img[:, :, 0] +import matplotlib.pyplot as plt +plt.imshow(img) +plt.show() + +# # Move the path to produce initial guess +# # normalize points for easier learning rate +# # points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base +# # [155.0/256.0, 65.0/256.0], # control point +# # [100.0/256.0, 180.0/256.0], # control point +# # [ 65.0/256.0, 238.0/256.0], # base +# # [100.0/256.0, 200.0/256.0], # control point +# # [170.0/256.0, 55.0/256.0], # control point +# # [220.0/256.0, 100.0/256.0], # base +# # [210.0/256.0, 80.0/256.0], # control point +# # [140.0/256.0, 60.0/256.0]], # control point +# # requires_grad = True) +# points_n = torch.tensor([[118.4274/256.0, 32.0159/256.0], +# [174.9657/256.0, 28.1877/256.0], +# [ 87.6629/256.0, 175.1049/256.0], +# [ 57.8093/256.0, 232.8987/256.0], +# [ 80.1829/256.0, 165.4280/256.0], +# [197.3640/256.0, 83.4058/256.0], +# [209.3676/256.0, 97.9176/256.0], +# [219.1048/256.0, 72.0000/256.0], +# [143.1226/256.0, 57.0636/256.0]], +# requires_grad = True) +# color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +# path.points = points_n * 256 +# path_group.fill_color = color +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# 1, # seed +# None, # background_image +# *scene_args) +# img /= 256.0 +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/init.png', gamma=1.0) + +# # Optimize +# optimizer = torch.optim.Adam([points_n, color], lr=1e-3) +# # Run 100 Adam iterations. +# for t in range(2): +# print('iteration:', t) +# optimizer.zero_grad() +# # Forward pass: render the image. +# path.points = points_n * 256 +# path_group.fill_color = color +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# t+1, # seed +# None, # background_image +# *scene_args) +# img /= 256.0 +# # Save the intermediate render. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/iter_{}.png'.format(t), gamma=1.0) +# # Compute the loss function. Here it is L2. +# loss = (img - target).pow(2).sum() +# print('loss:', loss.item()) + +# # Backpropagate the gradients. +# loss.backward() +# # Print the gradients +# print('points_n.grad:', points_n.grad) +# print('color.grad:', color.grad) + +# # Take a gradient descent step. +# optimizer.step() +# # Print the current params. +# print('points:', path.points) +# print('color:', path_group.fill_color) +# exit() + +# # Render the final result. +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# 102, # seed +# None, # background_image +# *scene_args) +# img /= 256.0 +# # Save the images and differences. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/final.png', gamma=1.0) + +# # Convert the intermediate renderings to a video. +# from subprocess import call +# call(["ffmpeg", "-framerate", "24", "-i", +# "results/single_curve_sdf/iter_%d.png", "-vb", "20M", +# "results/single_curve_sdf/out.mp4"]) diff --git a/diffvg/apps/single_curve_tf.py b/diffvg/apps/single_curve_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..cbd7634b91b74b633e2f8e907fc7e51fb36fadb4 --- /dev/null +++ b/diffvg/apps/single_curve_tf.py @@ -0,0 +1,109 @@ +import pydiffvg_tensorflow as pydiffvg +import tensorflow as tf +import skimage +import numpy as np + +canvas_width, canvas_height = 256, 256 +num_control_points = tf.constant([2, 2, 2]) + +points = tf.constant([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True) +shapes = [path] +path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32), + fill_color = tf.constant([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +render = pydiffvg.render +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(0), # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img, 'results/single_curve_tf/target.png', gamma=2.2) +target = tf.identity(img) + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]]) # control point + +color = tf.Variable([0.3, 0.2, 0.5, 1.0]) +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(1), # seed + *scene_args) +pydiffvg.imwrite(img, 'results/single_curve_tf/init.png', gamma=2.2) + +optimizer = tf.compat.v1.train.AdamOptimizer(1e-2) + +for t in range(100): + print('iteration:', t) + + with tf.GradientTape() as tape: + # Forward pass: render the image. + path.points = points_n * 256 + path_group.fill_color = color + # Important to use a different seed every iteration, otherwise the result + # would be biased. + scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(t+1), # seed, + *scene_args) + loss_value = tf.reduce_sum(tf.square(img - target)) + + print(f"loss_value: {loss_value}") + pydiffvg.imwrite(img, 'results/single_curve_tf/iter_{}.png'.format(t)) + + grads = tape.gradient(loss_value, [points_n, color]) + print(grads) + optimizer.apply_gradients(zip(grads, [points_n, color])) + +# Render the final result. +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(101), # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img, 'results/single_curve_tf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve_tf/iter_%d.png", "-vb", "20M", + "results/single_curve_tf/out.mp4"]) diff --git a/diffvg/apps/single_ellipse.py b/diffvg/apps/single_ellipse.py new file mode 100644 index 0000000000000000000000000000000000000000..f6981ae79d65a5ac6c5997d25f53c5795f5c14fd --- /dev/null +++ b/diffvg/apps/single_ellipse.py @@ -0,0 +1,109 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +ellipse = pydiffvg.Ellipse(radius = torch.tensor([60.0, 30.0]), + center = torch.tensor([128.0, 128.0])) +shapes = [ellipse] +ellipse_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [ellipse_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/target.png', gamma=2.2) +target = img.clone() + +# Move the ellipse to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor([20.0 / 256.0, 40.0 / 256.0], requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +ellipse.radius = radius_n * 256 +ellipse.center = center_n * 256 +ellipse_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 50 Adam iterations. +for t in range(50): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + ellipse.radius = radius_n * 256 + ellipse.center = center_n * 256 + ellipse_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', ellipse.radius) + print('center:', ellipse.center) + print('color:', ellipse_group.fill_color) + +# Render the final result. +ellipse.radius = radius_n * 256 +ellipse.center = center_n * 256 +ellipse_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_ellipse/iter_%d.png", "-vb", "20M", + "results/single_ellipse/out.mp4"]) diff --git a/diffvg/apps/single_ellipse_transform.py b/diffvg/apps/single_ellipse_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..4f84cc49115438492959c3b09f8c2d1ec9ccaae4 --- /dev/null +++ b/diffvg/apps/single_ellipse_transform.py @@ -0,0 +1,112 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +ellipse = pydiffvg.Ellipse(radius = torch.tensor([60.0, 30.0]), + center = torch.tensor([128.0, 128.0])) +shapes = [ellipse] +ellipse_group = pydiffvg.ShapeGroup(\ + shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + shape_to_canvas = torch.eye(3, 3)) +shape_groups = [ellipse_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/target.png', gamma=2.2) +target = img.clone() + +# Affine transform the ellipse to produce initial guess +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +affine = torch.zeros(2, 3) +affine[0, 0] = 1.3 +affine[0, 1] = 0.2 +affine[0, 2] = 0.1 +affine[1, 0] = 0.2 +affine[1, 1] = 0.6 +affine[1, 2] = 0.3 +affine.requires_grad = True +shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) +ellipse_group.fill_color = color +ellipse_group.shape_to_canvas = shape_to_canvas +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([color, affine], lr=1e-2) +# Run 150 Adam iterations. +for t in range(150): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + ellipse_group.fill_color = color + ellipse_group.shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('color.grad:', color.grad) + print('affine.grad:', affine.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('color:', ellipse_group.fill_color) + print('affine:', affine) + +# Render the final result. +ellipse_group.fill_color = color +ellipse_group.shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_ellipse_transform/iter_%d.png", "-vb", "20M", + "results/single_ellipse_transform/out.mp4"]) diff --git a/diffvg/apps/single_gradient.py b/diffvg/apps/single_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..29300a226efc081976c67b02126b5055be188275 --- /dev/null +++ b/diffvg/apps/single_gradient.py @@ -0,0 +1,131 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +color = pydiffvg.LinearGradient(\ + begin = torch.tensor([50.0, 50.0]), + end = torch.tensor([200.0, 200.0]), + offsets = torch.tensor([0.0, 1.0]), + stop_colors = torch.tensor([[0.2, 0.5, 0.7, 1.0], + [0.7, 0.2, 0.5, 1.0]])) +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), fill_color = color) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +begin_n = torch.tensor([100.0 / 256.0, 100.0 / 256.0], requires_grad=True) +end_n = torch.tensor([150.0 / 256.0, 150.0 / 256.0], requires_grad=True) +stop_colors = torch.tensor([[0.1, 0.9, 0.2, 1.0], + [0.5, 0.3, 0.6, 1.0]], requires_grad=True) +color.begin = begin_n * 256 +color.end = end_n * 256 +color.stop_colors = stop_colors +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +shapes = [circle] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, begin_n, end_n, stop_colors], lr=1e-2) +# Run 50 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + color.begin = begin_n * 256 + color.end = end_n * 256 + color.stop_colors = stop_colors + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_gradient/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('begin.grad:', begin_n.grad) + print('end.grad:', end_n.grad) + print('stop_colors.grad:', stop_colors.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('begin:', begin_n) + print('end:', end_n) + print('stop_colors:', stop_colors) + +# Render the final result. +color.begin = begin_n * 256 +color.end = end_n * 256 +color.stop_colors = stop_colors +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_gradient/iter_%d.png", "-vb", "20M", + "results/single_gradient/out.mp4"]) diff --git a/diffvg/apps/single_open_curve.py b/diffvg/apps/single_open_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae047e61f7affb8d8ead1e817ee311ba7acb47f --- /dev/null +++ b/diffvg/apps/single_open_curve.py @@ -0,0 +1,120 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_open_curve/iter_%d.png", "-vb", "20M", + "results/single_open_curve/out.mp4"]) diff --git a/diffvg/apps/single_open_curve_thickness.py b/diffvg/apps/single_open_curve_thickness.py new file mode 100644 index 0000000000000000000000000000000000000000..44b2bf15af6b42f3bfe9e2cef37e3d676c89e581 --- /dev/null +++ b/diffvg/apps/single_open_curve_thickness.py @@ -0,0 +1,124 @@ +import pydiffvg +import torch +import skimage + +pydiffvg.set_print_timing(True) + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +thickness = torch.tensor([10.0, 5.0, 4.0, 20.0]) +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = thickness) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +thickness_n = torch.tensor([10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0], + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +path.points = points_n * 256 +path.stroke_width = thickness_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, thickness_n, stroke_color], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = thickness_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('thickness_n.grad:', thickness_n.grad) + print('stroke_color.grad:', stroke_color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('thickness:', path.stroke_width) + print('stroke_color:', path_group.stroke_color) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = thickness_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_open_curve_thickness/iter_%d.png", "-vb", "20M", + "results/single_open_curve_thickness/out.mp4"]) diff --git a/diffvg/apps/single_path.py b/diffvg/apps/single_path.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ceb51bc3abd5f920ab5b9be8674d859c3eafce --- /dev/null +++ b/diffvg/apps/single_path.py @@ -0,0 +1,103 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 510, 510 +# https://www.flaticon.com/free-icon/black-plane_61212#term=airplane&page=1&position=8 +shapes = pydiffvg.from_svg_path('M510,255c0-20.4-17.85-38.25-38.25-38.25H331.5L204,12.75h-51l63.75,204H76.5l-38.25-51H0L25.5,255L0,344.25h38.25l38.25-51h140.25l-63.75,204h51l127.5-204h140.25C492.15,293.25,510,275.4,510,255z') +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_path/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +noise = torch.FloatTensor(shapes[0].points.shape).uniform_(0.0, 1.0) +points_n = (shapes[0].points.clone() + (noise * 60 - 30)) / 510.0 +points_n.requires_grad = True +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_path/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + shapes[0].points = points_n * 510 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_path/iter_{:02}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', shapes[0].points) + print('color:', path_group.fill_color) + +# Render the final result. +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_path/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "20", "-i", + "results/single_path/iter_%02d.png", "-vb", "20M", + "results/single_path/out.mp4"]) diff --git a/diffvg/apps/single_path_sdf.py b/diffvg/apps/single_path_sdf.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c152cd8f4f27b2023eb0d28634eb806cd062ad --- /dev/null +++ b/diffvg/apps/single_path_sdf.py @@ -0,0 +1,109 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 510, 510 +# https://www.flaticon.com/free-icon/black-plane_61212#term=airplane&page=1&position=8 +shapes = pydiffvg.from_svg_path('M510,255c0-20.4-17.85-38.25-38.25-38.25H331.5L204,12.75h-51l63.75,204H76.5l-38.25-51H0L25.5,255L0,344.25h38.25l38.25-51h140.25l-63.75,204h51l127.5-204h140.25C492.15,293.25,510,275.4,510,255z') +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img = img / 510 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/target.png', gamma=1.0) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +noise = torch.FloatTensor(shapes[0].points.shape).uniform_(0.0, 1.0) +points_n = (shapes[0].points.clone() + (noise * 60 - 30)) / 510.0 +points_n.requires_grad = True +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +img = img / 510 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/init.png', gamma=1.0) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + shapes[0].points = points_n * 510 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + img = img / 510 # Normalize SDF to [0, 1] + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/iter_{}.png'.format(t), gamma=1.0) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', shapes[0].points) + print('color:', path_group.fill_color) + +# Render the final result. +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_path_sdf/iter_%d.png", "-vb", "20M", + "results/single_path_sdf/out.mp4"]) diff --git a/diffvg/apps/single_polygon.py b/diffvg/apps/single_polygon.py new file mode 100644 index 0000000000000000000000000000000000000000..c0fc27823a74aa953f664001e930a40f37244079 --- /dev/null +++ b/diffvg/apps/single_polygon.py @@ -0,0 +1,112 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +# https://www.w3schools.com/graphics/svg_polygon.asp +points = torch.tensor([[120.0, 30.0], + [ 60.0, 218.0], + [210.0, 98.0], + [ 30.0, 98.0], + [180.0, 218.0]]) +polygon = pydiffvg.Polygon(points = points, is_closed = True) +shapes = [polygon] +polygon_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [polygon_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/target.png', gamma=2.2) +target = img.clone() + +# Move the polygon to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[140.0 / 256.0, 20.0 / 256.0], + [ 65.0 / 256.0, 228.0 / 256.0], + [215.0 / 256.0, 100.0 / 256.0], + [ 35.0 / 256.0, 90.0 / 256.0], + [160.0 / 256.0, 208.0 / 256.0]], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +polygon.points = points_n * 256 +polygon_group.color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + polygon.points = points_n * 256 + polygon_group.color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_polygon/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', polygon.points) + print('color:', polygon_group.fill_color) + +# Render the final result. +polygon.points = points_n * 256 +polygon_group.color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_polygon/iter_%d.png", "-vb", "20M", + "results/single_polygon/out.mp4"]) diff --git a/diffvg/apps/single_rect.py b/diffvg/apps/single_rect.py new file mode 100644 index 0000000000000000000000000000000000000000..75bfacd351a3ac78a26f1c543ea06ff33ef8373d --- /dev/null +++ b/diffvg/apps/single_rect.py @@ -0,0 +1,106 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256 ,256 +rect = pydiffvg.Rect(p_min = torch.tensor([40.0, 40.0]), + p_max = torch.tensor([160.0, 160.0])) +shapes = [rect] +rect_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [rect_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_rect/target.png', gamma=2.2) +target = img.clone() + +# Move the rect to produce initial guess +# normalize p_min & p_max for easier learning rate +p_min_n = torch.tensor([80.0 / 256.0, 20.0 / 256.0], requires_grad=True) +p_max_n = torch.tensor([100.0 / 256.0, 60.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +rect.p_min = p_min_n * 256 +rect.p_max = p_max_n * 256 +rect_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_rect/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([p_min_n, p_max_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + rect.p_min = p_min_n * 256 + rect.p_max = p_max_n * 256 + rect_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_rect/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('p_min.grad:', p_min_n.grad) + print('p_max.grad:', p_max_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('p_min:', rect.p_min) + print('p_max:', rect.p_max) + print('color:', rect_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_rect/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_rect/iter_%d.png", "-vb", "20M", + "results/single_rect/out.mp4"]) diff --git a/diffvg/apps/single_stroke.py b/diffvg/apps/single_stroke.py new file mode 100644 index 0000000000000000000000000000000000000000..604fba8f55f03143a1e069d664938dd20806e249 --- /dev/null +++ b/diffvg/apps/single_stroke.py @@ -0,0 +1,121 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_stroke/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, # background_image + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_stroke/iter_%d.png", "-vb", "20M", + "results/single_stroke/out.mp4"]) diff --git a/diffvg/apps/single_stroke_tf.py b/diffvg/apps/single_stroke_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..42ad5fb657e045e132910dd5e02b7e0b5910ab70 --- /dev/null +++ b/diffvg/apps/single_stroke_tf.py @@ -0,0 +1,109 @@ +import pydiffvg_tensorflow as pydiffvg +import tensorflow as tf +import skimage +import numpy as np + +canvas_width, canvas_height = 256, 256 +num_control_points = tf.constant([2]) + +points = tf.constant([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = tf.constant(15.0)) + +shapes = [path] +path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32), + fill_color = tf.constant([0.0, 0.0, 0.0, 0.0]), + stroke_color = tf.constant([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +render = pydiffvg.render +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(0), # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img, 'results/single_stroke_tf/target.png', gamma=2.2) +target = tf.identity(img) + + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]] # base + ) +stroke_color = tf.Variable([0.4, 0.7, 0.5, 0.5]) +stroke_width_n = tf.Variable(5.0 / 100.0) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(1), # seed + *scene_args) +pydiffvg.imwrite(img, 'results/single_stroke_tf/init.png', gamma=2.2) + + + +optimizer = tf.compat.v1.train.AdamOptimizer(1e-2) + +for t in range(100): + print('iteration:', t) + + with tf.GradientTape() as tape: + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.stroke_color = stroke_color + # Important to use a different seed every iteration, otherwise the result + # would be biased. + scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(t+1), # seed, + *scene_args) + loss_value = tf.reduce_sum(tf.square(img - target)) + + print(f"loss_value: {loss_value}") + pydiffvg.imwrite(img, 'results/single_stroke_tf/iter_{}.png'.format(t)) + + grads = tape.gradient(loss_value, [points_n, stroke_width_n, stroke_color]) + print(grads) + optimizer.apply_gradients(zip(grads, [points_n, stroke_width_n, stroke_color])) + + +# Render the final result. +path.points = points_n * 256 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(101), # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img, 'results/single_stroke_tf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_stroke_tf/iter_%d.png", "-vb", "20M", + "results/single_curve_tf/out.mp4"]) diff --git a/diffvg/apps/sketch_gan.py b/diffvg/apps/sketch_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..6f3da593c3b4c3da2149d9ecd571cfca24794733 --- /dev/null +++ b/diffvg/apps/sketch_gan.py @@ -0,0 +1,213 @@ +"""A simple training interface using ttools.""" +import argparse +import os +import logging +import random + +import numpy as np +import torch +from torchvision.datasets import MNIST +import torchvision.transforms as xforms +from torch.utils.data import DataLoader + +import ttools +import ttools.interfaces + +import pydiffvg + +LOG = ttools.get_logger(__name__) + +pydiffvg.render_pytorch.print_timing = False + +torch.manual_seed(123) +np.random.seed(123) +torch.backends.cudnn.deterministic = True + +latent_dim = 100 +img_size = 32 +num_paths = 8 +num_segments = 8 + +def weights_init_normal(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + torch.nn.init.normal_(m.weight.data, 0.0, 0.02) + elif classname.find("BatchNorm2d") != -1: + torch.nn.init.normal_(m.weight.data, 1.0, 0.02) + torch.nn.init.constant_(m.bias.data, 0.0) + +class VisdomImageCallback(ttools.callbacks.ImageDisplayCallback): + def visualized_image(self, batch, fwd_result): + return torch.cat([batch[0], fwd_result.cpu()], dim = 2) + +# From https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/dcgan/dcgan.py +class Generator(torch.nn.Module): + def __init__(self): + super(Generator, self).__init__() + + self.fc = torch.nn.Sequential( + torch.nn.Linear(latent_dim, 128), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(128, 256), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(256, 512), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(512, 1024), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(1024, 2 * num_paths * (num_segments + 1) + num_paths + num_paths), + torch.nn.Sigmoid() + ) + + def forward(self, z): + out = self.fc(z) + # construct paths + imgs = [] + for b in range(out.shape[0]): + index = 0 + shapes = [] + shape_groups = [] + for i in range(num_paths): + points = img_size * out[b, index: index + 2 * (num_segments + 1)].view(-1, 2).cpu() + index += 2 * (num_segments + 1) + stroke_width = img_size * out[b, index].view(1).cpu() + index += 1 + + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = stroke_width, + is_closed = False) + shapes.append(path) + + stroke_color = out[b, index].view(1).cpu() + index += 1 + stroke_color = torch.cat([stroke_color, torch.tensor([0.0, 0.0, 1.0])]) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = None, + stroke_color = stroke_color) + shape_groups.append(path_group) + scene_args = pydiffvg.RenderFunction.serialize_scene(img_size, img_size, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(img_size, # width + img_size, # height + 2, # num_samples_x + 2, # num_samples_y + random.randint(0, 1048576), # seed + None, + *scene_args) + img = img[:, :, :1] + # HWC -> NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + imgs.append(img) + img = torch.cat(imgs, dim = 0) + return img + +class Discriminator(torch.nn.Module): + def __init__(self): + super(Discriminator, self).__init__() + + def discriminator_block(in_filters, out_filters, bn=True): + block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Dropout2d(0.25)] + if bn: + block.append(torch.nn.BatchNorm2d(out_filters, 0.8)) + return block + + self.model = torch.nn.Sequential( + *discriminator_block(1, 16, bn=False), + *discriminator_block(16, 32), + *discriminator_block(32, 64), + *discriminator_block(64, 128), + ) + + # The height and width of downsampled image + ds_size = img_size // 2 ** 4 + self.adv_layer = torch.nn.Sequential( + torch.nn.Linear(128 * ds_size ** 2, 1), + torch.nn.Sigmoid()) + + def forward(self, img): + out = self.model(img) + out = out.view(out.shape[0], -1) + validity = self.adv_layer(out) + + return validity + +class MNISTInterface(ttools.interfaces.SGANInterface): + """An adapter to run or train a model.""" + + def __init__(self, gen, discrim, lr=2e-4): + super(MNISTInterface, self).__init__(gen, discrim, lr, opt = 'adam') + + def forward(self, batch): + return self.gen(torch.zeros([batch[0].shape[0], latent_dim], device = self.device).normal_()) + + def _discriminator_input(self, batch, fwd_data, fake=False): + if fake: + return fwd_data + else: + return batch[0].to(self.device) + +def train(args): + """Train a MNIST classifier.""" + + # Setup train and val data + _xform = xforms.Compose([xforms.Resize([32, 32]), xforms.ToTensor()]) + data = MNIST("data/mnist", train=True, download=True, transform=_xform) + + # Initialize asynchronous dataloaders + loader = DataLoader(data, batch_size=args.bs, num_workers=2) + + # Instantiate the models + gen = Generator() + discrim = Discriminator() + + gen.apply(weights_init_normal) + discrim.apply(weights_init_normal) + + # Checkpointer to save/recall model parameters + checkpointer_gen = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=gen, prefix="gen_") + checkpointer_discrim = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=discrim, prefix="discrim_") + + # resume from a previous checkpoint, if any + checkpointer_gen.load_latest() + checkpointer_discrim.load_latest() + + # Setup a training interface for the model + interface = MNISTInterface(gen, discrim, lr=args.lr) + + # Create a training looper with the interface we defined + trainer = ttools.Trainer(interface) + + # Adds several callbacks, that will be called by the trainer -------------- + # A periodic checkpointing operation + trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_gen)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_discrim)) + # A simple progress bar + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=["loss_g", "loss_d", "loss"])) + # A volatile logging using visdom + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=["loss_g", "loss_d", "loss"], + port=8080, env="mnist_demo")) + # Image + trainer.add_callback(VisdomImageCallback(port=8080, env="mnist_demo")) + # ------------------------------------------------------------------------- + + # Start the training + LOG.info("Training started, press Ctrl-C to interrupt.") + trainer.train(loader, num_epochs=args.epochs) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # TODO: subparsers + parser.add_argument("data", help="directory where we download and store the MNIST dataset.") + parser.add_argument("out", help="directory where we write the checkpoints and visualizations.") + parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for the optimizer.") + parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for.") + parser.add_argument("--bs", type=int, default=64, help="number of elements per batch.") + args = parser.parse_args() + ttools.set_logger(True) # activate debug prints + train(args) diff --git a/diffvg/apps/style_transfer.py b/diffvg/apps/style_transfer.py new file mode 100644 index 0000000000000000000000000000000000000000..43ba38c925f38fde3653d5cd4de14dbbd8cbadf2 --- /dev/null +++ b/diffvg/apps/style_transfer.py @@ -0,0 +1,291 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torchvision.transforms as transforms +import torchvision.models as models +from PIL import Image +import copy +import pydiffvg +import argparse + +def main(args): + pydiffvg.set_use_gpu(torch.cuda.is_available()) + + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(args.content_file) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Transform to gamma space + pydiffvg.imwrite(img.cpu(), 'results/style_transfer/init.png', gamma=1.0) + # HWC -> NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + + loader = transforms.Compose([ + transforms.ToTensor()]) # transform it into a torch tensor + + def image_loader(image_name): + image = Image.open(image_name) + # fake batch dimension required to fit network's input dimensions + image = loader(image).unsqueeze(0) + return image.to(pydiffvg.get_device(), torch.float) + + style_img = image_loader(args.style_img) + # alpha blend content with a gray background + content_img = img[:, :3, :, :] * img[:, 3, :, :] + \ + 0.5 * torch.ones([1, 3, img.shape[2], img.shape[3]]) * \ + (1 - img[:, 3, :, :]) + + assert style_img.size() == content_img.size(), \ + "we need to import style and content images of the same size" + + unloader = transforms.ToPILImage() # reconvert into PIL image + + class ContentLoss(nn.Module): + def __init__(self, target,): + super(ContentLoss, self).__init__() + # we 'detach' the target content from the tree used + # to dynamically compute the gradient: this is a stated value, + # not a variable. Otherwise the forward method of the criterion + # will throw an error. + self.target = target.detach() + + def forward(self, input): + self.loss = F.mse_loss(input, self.target) + return input + + def gram_matrix(input): + a, b, c, d = input.size() # a=batch size(=1) + # b=number of feature maps + # (c,d)=dimensions of a f. map (N=c*d) + + features = input.view(a * b, c * d) # resise F_XL into \hat F_XL + + G = torch.mm(features, features.t()) # compute the gram product + + # we 'normalize' the values of the gram matrix + # by dividing by the number of element in each feature maps. + return G.div(a * b * c * d) + + class StyleLoss(nn.Module): + + def __init__(self, target_feature): + super(StyleLoss, self).__init__() + self.target = gram_matrix(target_feature).detach() + + def forward(self, input): + G = gram_matrix(input) + self.loss = F.mse_loss(G, self.target) + return input + + device = pydiffvg.get_device() + cnn = models.vgg19(pretrained=True).features.to(device).eval() + + cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) + cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) + + # create a module to normalize input image so we can easily put it in a + # nn.Sequential + class Normalization(nn.Module): + def __init__(self, mean, std): + super(Normalization, self).__init__() + # .view the mean and std to make them [C x 1 x 1] so that they can + # directly work with image Tensor of shape [B x C x H x W]. + # B is batch size. C is number of channels. H is height and W is width. + self.mean = mean.clone().view(-1, 1, 1) + self.std = std.clone().view(-1, 1, 1) + + def forward(self, img): + # normalize img + return (img - self.mean) / self.std + + # desired depth layers to compute style/content losses : + content_layers_default = ['conv_4'] + style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] + + def get_style_model_and_losses(cnn, normalization_mean, normalization_std, + style_img, content_img, + content_layers=content_layers_default, + style_layers=style_layers_default): + cnn = copy.deepcopy(cnn) + + # normalization module + normalization = Normalization(normalization_mean, normalization_std).to(device) + + # just in order to have an iterable access to or list of content/syle + # losses + content_losses = [] + style_losses = [] + + # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential + # to put in modules that are supposed to be activated sequentially + model = nn.Sequential(normalization) + + i = 0 # increment every time we see a conv + for layer in cnn.children(): + if isinstance(layer, nn.Conv2d): + i += 1 + name = 'conv_{}'.format(i) + elif isinstance(layer, nn.ReLU): + name = 'relu_{}'.format(i) + # The in-place version doesn't play very nicely with the ContentLoss + # and StyleLoss we insert below. So we replace with out-of-place + # ones here. + layer = nn.ReLU(inplace=False) + elif isinstance(layer, nn.MaxPool2d): + name = 'pool_{}'.format(i) + elif isinstance(layer, nn.BatchNorm2d): + name = 'bn_{}'.format(i) + else: + raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) + + model.add_module(name, layer) + + if name in content_layers: + # add content loss: + target = model(content_img).detach() + content_loss = ContentLoss(target) + model.add_module("content_loss_{}".format(i), content_loss) + content_losses.append(content_loss) + + if name in style_layers: + # add style loss: + target_feature = model(style_img).detach() + style_loss = StyleLoss(target_feature) + model.add_module("style_loss_{}".format(i), style_loss) + style_losses.append(style_loss) + + # now we trim off the layers after the last content and style losses + for i in range(len(model) - 1, -1, -1): + if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): + break + + model = model[:(i + 1)] + + return model, style_losses, content_losses + + def run_style_transfer(cnn, normalization_mean, normalization_std, + content_img, style_img, + canvas_width, canvas_height, + shapes, shape_groups, + num_steps=500, style_weight=5000, content_weight=1): + """Run the style transfer.""" + print('Building the style transfer model..') + model, style_losses, content_losses = get_style_model_and_losses(cnn, + normalization_mean, normalization_std, style_img, content_img) + point_params = [] + color_params = [] + stroke_width_params = [] + for shape in shapes: + if isinstance(shape, pydiffvg.Path): + point_params.append(shape.points.requires_grad_()) + stroke_width_params.append(shape.stroke_width.requires_grad_()) + for shape_group in shape_groups: + if isinstance(shape_group.fill_color, torch.Tensor): + color_params.append(shape_group.fill_color.requires_grad_()) + elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + point_params.append(shape_group.fill_color.begin.requires_grad_()) + point_params.append(shape_group.fill_color.end.requires_grad_()) + color_params.append(shape_group.fill_color.stop_colors.requires_grad_()) + if isinstance(shape_group.stroke_color, torch.Tensor): + color_params.append(shape_group.stroke_color.requires_grad_()) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + point_params.append(shape_group.stroke_color.begin.requires_grad_()) + point_params.append(shape_group.stroke_color.end.requires_grad_()) + color_params.append(shape_group.stroke_color.stop_colors.requires_grad_()) + + point_optimizer = optim.Adam(point_params, lr=1.0) + color_optimizer = optim.Adam(color_params, lr=0.01) + stroke_width_optimizers = optim.Adam(stroke_width_params, lr=0.1) + print('Optimizing..') + run = [0] + while run[0] <= num_steps: + point_optimizer.zero_grad() + color_optimizer.zero_grad() + stroke_width_optimizers.zero_grad() + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # alpha blend img with a gray background + img = img[:, :, :3] * img[:, :, 3:4] + \ + 0.5 * torch.ones([img.shape[0], img.shape[1], 3]) * \ + (1 - img[:, :, 3:4]) + + pydiffvg.imwrite(img.cpu(), + 'results/style_transfer/step_{}.png'.format(run[0]), + gamma=1.0) + + # HWC to NCHW + img = img.permute([2, 0, 1]).unsqueeze(0) + model(img) + style_score = 0 + content_score = 0 + + for sl in style_losses: + style_score += sl.loss + for cl in content_losses: + content_score += cl.loss + + style_score *= style_weight + content_score *= content_weight + + loss = style_score + content_score + loss.backward() + + run[0] += 1 + if run[0] % 1 == 0: + print("run {}:".format(run)) + print('Style Loss : {:4f} Content Loss: {:4f}'.format( + style_score.item(), content_score.item())) + print() + + point_optimizer.step() + color_optimizer.step() + stroke_width_optimizers.step() + + for color in color_params: + color.data.clamp_(0, 1) + for w in stroke_width_params: + w.data.clamp_(0.5, 4.0) + + return shapes, shape_groups + + shapes, shape_groups = run_style_transfer(\ + cnn, cnn_normalization_mean, cnn_normalization_std, + content_img, style_img, + canvas_width, canvas_height, shapes, shape_groups) + + scene_args = pydiffvg.RenderFunction.serialize_scene(shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Transform to gamma space + pydiffvg.imwrite(img.cpu(), 'results/style_transfer/output.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("content_file", help="source SVG path") + parser.add_argument("style_img", help="target image path") + args = parser.parse_args() + main(args) diff --git a/diffvg/apps/svg_brush.py b/diffvg/apps/svg_brush.py new file mode 100644 index 0000000000000000000000000000000000000000..de54e48e81e1245c788d5609eefaaa03185c453c --- /dev/null +++ b/diffvg/apps/svg_brush.py @@ -0,0 +1,167 @@ +import sys +sys.path.append("../svg") +from geometry import GeometryLoss +import numpy as np +import pygame as pg +import torch +import pydiffvg +import tkinter as tk +from tkinter import filedialog + +def box_kernel(val): + return np.heaviside(-val+1,0) + +def cone_kernel(val): + return np.maximum(0,1-val) + +def nptosurf(arr): + if arr.shape[2]==1: + #greyscale + shape=arr.shape + shape=(shape[0],shape[1],3) + arr=np.broadcast_to(arr,shape) + return pg.surfarray.make_surface(arr*255) + +def brush_tensor(screen_size,coords,radius,kernel): + coordarr=np.stack(np.meshgrid(np.linspace(0,screen_size[0]-1,screen_size[0]),np.linspace(0,screen_size[1]-1,screen_size[1]),indexing='ij'),axis=2) + ctrarr = np.reshape(np.array(coords), [1, 1, 2]) + distarr=np.sqrt(np.sum(np.power(coordarr-ctrarr,2),axis=2)) + valarr=kernel(distarr/radius) + return torch.tensor(valarr,requires_grad=False,dtype=torch.float32) + +def checkerboard(shape, square_size=2): + xv,yv=np.meshgrid(np.floor(np.linspace(0,shape[1]-1,shape[1])/square_size),np.floor(np.linspace(0,shape[0]-1,shape[0])/square_size)) + bin=np.expand_dims(((xv+yv)%2),axis=2) + res=bin*np.array([[[1., 1., 1.,]]])+(1-bin)*np.array([[[.75, .75, .75,]]]) + return torch.tensor(res,requires_grad=False,dtype=torch.float32) + +def render(optim, viewport): + scene_args = pydiffvg.RenderFunction.serialize_scene(*optim.build_scene()) + render = pydiffvg.RenderFunction.apply + img = render(viewport[0], # width + viewport[1], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + +def optimize(optim, viewport, brush_kernel, increase=True, strength=0.1): + optim.zero_grad() + + geomLoss=torch.tensor(0.) + + for shape, gloss in zip(optim.scene[2],geometryLosses): + geomLoss+=gloss.compute(shape) + + img=render(optim,viewport) + + imalpha=img[:,:,3] + + multiplied=imalpha*brush_kernel + + loss=((1-multiplied).mean() if increase else multiplied.mean())*strength + + loss+=geomLoss + + loss.backward() + + optim.step() + + return render(optim,viewport) + +def get_infile(): + pydiffvg.set_use_gpu(False) + root = tk.Tk() + #root.withdraw() + + file_path = filedialog.askopenfilename(initialdir = ".",title = "Select graphic to optimize",filetypes = (("SVG files","*.svg"),("all files","*.*"))) + + root.destroy() + + return file_path + +def compositebg(img): + bg=checkerboard(img.shape,2) + color=img[:,:,0:3] + alpha=img[:,:,3] + composite=alpha.unsqueeze(2)*color+(1-alpha).unsqueeze(2)*bg + + return composite + +def main(): + infile=get_infile() + + settings=pydiffvg.SvgOptimizationSettings() + settings.global_override(["optimize_color"],False) + settings.global_override(["transforms","optimize_transforms"], False) + settings.global_override(["optimizer"], "SGD") + settings.global_override(["paths","shape_lr"], 1e-1) + + optim=pydiffvg.OptimizableSvg(infile,settings) + + global geometryLosses + geometryLosses = [] + + for shape in optim.build_scene()[2]: + geometryLosses.append(GeometryLoss(shape)) + + scaling=1 + brush_radius=100 + graphic_size=optim.canvas + screen_size=(graphic_size[1]*scaling, graphic_size[0]*scaling) + + pg.init() + + screen=pg.display.set_mode(screen_size) + screen.fill((255,255,255)) + + img=render(optim,graphic_size) + print(img.max()) + + npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size) + + screen.blit(npsurf,(0,0)) + + pg.display.update() + clock=pg.time.Clock() + + z=0 + btn=0 + + while True: + clock.tick(60) + for event in pg.event.get(): + if event.type==pg.QUIT: + pg.quit() + sys.exit() + + y, x = pg.mouse.get_pos() + if event.type == pg.MOUSEBUTTONDOWN: + if event.button in [1,3]: + z=1 + btn=event.button + elif event.button == 4: + brush_radius*=1.1 + elif event.button == 5: + brush_radius/=1.1 + brush_radius=max(brush_radius,5) + elif event.type == pg.MOUSEBUTTONUP: + if event.button in [1,3]: + z=0 + + if z==1: + brush=brush_tensor((graphic_size[0],graphic_size[1]), (x/scaling, y/scaling), brush_radius, box_kernel) + img=optimize(optim,graphic_size,brush,btn==1) + npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size) + + + screen.blit(npsurf,(0,0)) + pg.draw.circle(screen, (255,255,255), (y,x), int(brush_radius*scaling), 1) + pg.display.update() + + +if __name__ == '__main__': + main() + diff --git a/diffvg/apps/svg_parse_test.py b/diffvg/apps/svg_parse_test.py new file mode 100644 index 0000000000000000000000000000000000000000..abe6e35471a2b200d40035a2882b38d67011500b --- /dev/null +++ b/diffvg/apps/svg_parse_test.py @@ -0,0 +1,65 @@ +import pydiffvg +import sys +import numpy as np +import torch +sys.path.append("../pydiffvg") + +from optimize_svg import OptimizableSvg + +pydiffvg.set_use_gpu(False) + +""" +for x in range(100000): + inmat=np.eye(3) + inmat[0:2,:]=(np.random.rand(2,3)-0.5)*2 + decomp=OptimizableSvg.TransformTools.decompose(inmat) + outmat=OptimizableSvg.TransformTools.recompose(torch.tensor(decomp[0],dtype=torch.float32),torch.tensor(decomp[1],dtype=torch.float32),torch.tensor(decomp[2],dtype=torch.float32),torch.tensor(decomp[3],dtype=torch.float32)).numpy() + dif=np.linalg.norm(inmat-outmat) + if dif > 1e-3: + print(dif) + print(inmat) + print(outmat) + print(decomp)""" + + +infile='./imgs/note_small.svg' + + +canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(infile) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +render = pydiffvg.RenderFunction.apply +img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'test_old.png', gamma=1.0) + +#optim=OptimizableSvg('linux.svg',verbose=True) +optim=OptimizableSvg(infile,verbose=True) + +scene=optim.build_scene() +scene_args = pydiffvg.RenderFunction.serialize_scene(*scene) +render = pydiffvg.RenderFunction.apply +img = render(scene[0], # width + scene[1], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + + + +with open("resaved.svg","w") as f: + f.write(optim.write_xml()) + +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'test_new.png', gamma=1.0) + +print("Done!") \ No newline at end of file diff --git a/diffvg/apps/test_eval_positions.py b/diffvg/apps/test_eval_positions.py new file mode 100644 index 0000000000000000000000000000000000000000..0b0078b900a75a12689f90a6dd4e8ba0cb1ada72 --- /dev/null +++ b/diffvg/apps/test_eval_positions.py @@ -0,0 +1,113 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/target.png') +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, # background_image + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/init.png') + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + # Evaluate 1000 positions + eval_positions = torch.rand(1000, 2).to(img.device) * 256 + # for grid_sample() + grid_eval_positions = (eval_positions / 256.0) * 2.0 - 1.0 + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf, + eval_positions = eval_positions) + samples = render(256, # width + 256, # height + 0, # num_samples_x + 0, # num_samples_y + t+1, # seed + None, # background_image + *scene_args) + samples = samples / 256 # Normalize SDF to [0, 1] + target_sampled = torch.nn.functional.grid_sample(\ + target.view(1, 1, target.shape[0], target.shape[1]), + grid_eval_positions.view(1, -1, 1, 2), mode='nearest') + loss = (samples - target_sampled).pow(2).mean() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, # background_image + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/final.png') diff --git a/diffvg/apps/textureSyn/1.jpg b/diffvg/apps/textureSyn/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e35118c31771b60c14771ee8f94d4aa0adb352d Binary files /dev/null and b/diffvg/apps/textureSyn/1.jpg differ diff --git a/diffvg/apps/textureSyn/2.jpg b/diffvg/apps/textureSyn/2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2f6ea1b8fda16e1b2c39e92b1d4e1ad9561d733 Binary files /dev/null and b/diffvg/apps/textureSyn/2.jpg differ diff --git a/diffvg/apps/textureSyn/3.jpg b/diffvg/apps/textureSyn/3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3270d6b4ba5c93b533d455642e4dfa6e258093f Binary files /dev/null and b/diffvg/apps/textureSyn/3.jpg differ diff --git a/diffvg/apps/textureSyn/make_gif.py b/diffvg/apps/textureSyn/make_gif.py new file mode 100644 index 0000000000000000000000000000000000000000..906c0eebe56f8ede00e1f6021de40fe23e0e947b --- /dev/null +++ b/diffvg/apps/textureSyn/make_gif.py @@ -0,0 +1,26 @@ +# for gif making +import imageio +import numpy as np +import os +from PIL import Image +from math import floor + +def make_gif(savePath, outputPath, frame_every_X_steps=15, repeat_ending=15, total_iter=200): + number_files = len(os.listdir(savePath)) - 2 + frame_every_X_steps = frame_every_X_steps + repeat_ending = repeat_ending + steps = np.arange(floor(number_files / frame_every_X_steps)) * frame_every_X_steps + steps = steps + (number_files - np.max(steps)) + + images = [] + for f in range(total_iter-1): + # for f in steps: + filename = savePath + 'iter_' + str(f+1) + '.png' + images.append(imageio.imread(filename)) + + # repeat ending + for _ in range(repeat_ending): + filename = savePath + 'final.png' + # filename = savePath + 'iter_' + str(number_files) + '.png' + images.append(imageio.imread(filename)) + imageio.mimsave(outputPath, images) \ No newline at end of file diff --git a/diffvg/apps/textureSyn/patchBasedTextureSynthesis.py b/diffvg/apps/textureSyn/patchBasedTextureSynthesis.py new file mode 100644 index 0000000000000000000000000000000000000000..91ce58592a3435ab4b25c99a021d8470dcb8c5ec --- /dev/null +++ b/diffvg/apps/textureSyn/patchBasedTextureSynthesis.py @@ -0,0 +1,388 @@ +#imports +import numpy as np +import matplotlib.pyplot as plt +import os + +from math import floor, ceil +from random import randint + +from sklearn.neighbors import KDTree +from skimage.util.shape import view_as_windows +from skimage import io + +from PIL import Image, ImageDraw +from IPython.display import clear_output + +class patchBasedTextureSynthesis: + + def __init__(self, exampleMapPath, in_outputPath, in_outputSize, in_patchSize, in_overlapSize, in_windowStep = 5, in_mirror_hor = True, in_mirror_vert = True, in_shapshots = True): + self.exampleMap = self.loadExampleMap(exampleMapPath) + self.snapshots = in_shapshots + self.outputPath = in_outputPath + self.outputSize = in_outputSize + self.patchSize = in_patchSize + self.overlapSize = in_overlapSize + self.mirror_hor = in_mirror_hor + self.mirror_vert = in_mirror_vert + self.total_patches_count = 0 #excluding mirrored versions + self.windowStep = 5 + self.iter = 0 + + self.checkIfDirectoryExists() #check if output directory exists + self.examplePatches = self.prepareExamplePatches() + self.canvas, self.filledMap, self.idMap = self.initCanvas() + self.initFirstPatch() #place random block to start with + self.kdtree_topOverlap, self.kdtree_leftOverlap, self.kdtree_combined = self.initKDtrees() + + self.PARM_truncation = 0.8 + self.PARM_attenuation = 2 + + def checkIfDirectoryExists(self): + if not os.path.exists(self.outputPath): + os.makedirs(self.outputPath) + + def resolveAll(self): + self.saveParams() + #resolve all unresolved patches + for i in range(np.sum(1-self.filledMap).astype(int)): + self.resolveNext() + + if not self.snapshots: + img = Image.fromarray(np.uint8(self.canvas*255)) + img = img.resize((self.outputSize[0], self.outputSize[1]), resample=0, box=None) + img.save(self.outputPath + 'out.jpg') + # else: + # self.visualize([0,0], [], [], showCandidates=False) + return img + def saveParams(self): + #write + text_file = open(self.outputPath + 'params.txt', "w") + text_file.write("PatchSize: %d \nOverlapSize: %d \nMirror Vert: %d \nMirror Hor: %d" % (self.patchSize, self.overlapSize, self.mirror_vert, self.mirror_hor)) + text_file.close() + + def resolveNext(self): + #coordinate of the next one to resolve + coord = self.idCoordTo2DCoord(np.sum(self.filledMap), np.shape(self.filledMap)) #get 2D coordinate of next to resolve patch + #get overlap areas of the patch we want to resolve + overlapArea_Top = self.getOverlapAreaTop(coord) + overlapArea_Left = self.getOverlapAreaLeft(coord) + #find most similar patch from the examples + dist, ind = self.findMostSimilarPatches(overlapArea_Top, overlapArea_Left, coord) + + if self.mirror_hor or self.mirror_vert: + #check that top and left neighbours are not mirrors + dist, ind = self.checkForMirrors(dist, ind, coord) + + #choose random valid patch + probabilities = self.distances2probability(dist, self.PARM_truncation, self.PARM_attenuation) + chosenPatchId = np.random.choice(ind, 1, p=probabilities) + + #update canvas + blend_top = (overlapArea_Top is not None) + blend_left = (overlapArea_Left is not None) + self.updateCanvas(chosenPatchId, coord[0], coord[1], blend_top, blend_left) + + #update filledMap and id map ;) + self.filledMap[coord[0], coord[1]] = 1 + self.idMap[coord[0], coord[1]] = chosenPatchId + + #visualize + # self.visualize(coord, chosenPatchId, ind) + + self.iter += 1 + + def visualize(self, coord, chosenPatchId, nonchosenPatchId, showCandidates = True): + #full visualization includes both example and generated img + canvasSize = np.shape(self.canvas) + #insert generated image + vis = np.zeros((canvasSize[0], canvasSize[1] * 2, 3)) + 0.2 + vis[:, 0:canvasSize[1]] = self.canvas + #insert example + exampleHighlited = np.copy(self.exampleMap) + if showCandidates: + exampleHighlited = self.hightlightPatchCandidates(chosenPatchId, nonchosenPatchId) + h = floor(canvasSize[0] / 2) + w = floor(canvasSize[1] / 2) + exampleResized = self.resize(exampleHighlited, [h, w]) + offset_h = floor((canvasSize[0] - h) / 2) + offset_w = floor((canvasSize[1] - w) / 2) + + vis[offset_h:offset_h+h, canvasSize[1]+offset_w:canvasSize[1]+offset_w+w] = exampleResized + + #show live update + plt.imshow(vis) + clear_output(wait=True) + display(plt.show()) + + if self.snapshots: + img = Image.fromarray(np.uint8(vis*255)) + img = img.resize((self.outputSize[0]*2, self.outputSize[1]), resample=0, box=None) + img.save(self.outputPath + 'out' + str(self.iter) + '.jpg') + + def hightlightPatchCandidates(self, chosenPatchId, nonchosenPatchId): + + result = np.copy(self.exampleMap) + + #mod patch ID + chosenPatchId = chosenPatchId[0] % self.total_patches_count + if len(nonchosenPatchId)>0: + nonchosenPatchId = nonchosenPatchId % self.total_patches_count + #exlcude chosen from nonchosen + nonchosenPatchId = np.delete(nonchosenPatchId, np.where(nonchosenPatchId == chosenPatchId)) + #highlight non chosen candidates + c = [0.25, 0.9 ,0.45] + self.highlightPatches(result, nonchosenPatchId, color=c, highlight_width = 4, alpha = 0.5) + + #hightlight chosen + c = [1.0, 0.25, 0.15] + self.highlightPatches(result, [chosenPatchId], color=c, highlight_width = 4, alpha = 1) + + return result + + def highlightPatches(self, writeResult, patchesIDs, color, highlight_width = 2, solid = False, alpha = 0.1): + + searchWindow = self.patchSize + 2*self.overlapSize + + #number of possible steps + row_steps = floor((np.shape(writeResult)[0] - searchWindow) / self.windowStep) + 1 + col_steps = floor((np.shape(writeResult)[1] - searchWindow) / self.windowStep) + 1 + + for i in range(len(patchesIDs)): + + chosenPatchId = patchesIDs[i] + + #patch Id to step + patch_row = floor(chosenPatchId / col_steps) + patch_col = chosenPatchId - patch_row * col_steps + + #highlight chosen patch (below are boundaries of the example patch) + row_start = self.windowStep* patch_row + row_end = self.windowStep * patch_row + searchWindow + col_start = self.windowStep * patch_col + col_end = self.windowStep * patch_col + searchWindow + + if not solid: + w = highlight_width + overlap = np.copy(writeResult[row_start:row_start+w, col_start:col_end]) + writeResult[row_start:row_start+w, col_start:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #top + overlap = np.copy(writeResult[row_end-w:row_end, col_start:col_end]) + writeResult[row_end-w:row_end, col_start:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #bot + overlap = np.copy( writeResult[row_start:row_end, col_start:col_start+w]) + writeResult[row_start:row_end, col_start:col_start+w] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #left + overlap = np.copy(writeResult[row_start:row_end, col_end-w:col_end]) + writeResult[row_start:row_end, col_end-w:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #end + else: + a = alpha + writeResult[row_start:row_end, col_start:col_end] = writeResult[row_start:row_end, col_start:col_end] * (1-a) + (np.zeros(np.shape(writeResult[row_start:row_end, col_start:col_end]))+color) * a + + + def resize(self, imgArray, targetSize): + img = Image.fromarray(np.uint8(imgArray*255)) + img = img.resize((targetSize[0], targetSize[1]), resample=0, box=None) + return np.array(img)/255 + + def findMostSimilarPatches(self, overlapArea_Top, overlapArea_Left, coord, in_k=5): + + #check which KD tree we need to use + if (overlapArea_Top is not None) and (overlapArea_Left is not None): + combined = self.getCombinedOverlap(overlapArea_Top.reshape(-1), overlapArea_Left.reshape(-1)) + dist, ind = self.kdtree_combined.query([combined], k=in_k) + elif overlapArea_Top is not None: + dist, ind = self.kdtree_topOverlap.query([overlapArea_Top.reshape(-1)], k=in_k) + elif overlapArea_Left is not None: + dist, ind = self.kdtree_leftOverlap.query([overlapArea_Left.reshape(-1)], k=in_k) + else: + raise Exception("ERROR: no valid overlap area is passed to -findMostSimilarPatch-") + dist = dist[0] + ind = ind[0] + + return dist, ind + + #disallow visually similar blocks to be placed next to each other + def checkForMirrors(self, dist, ind, coord, thres = 3): + remove_i = [] + #do I have a top or left neighbour + if coord[0]-1>-1: + top_neigh = int(self.idMap[coord[0]-1, coord[1]]) + for i in range(len(ind)): + if (abs(ind[i]%self.total_patches_count - top_neigh%self.total_patches_count) < thres): + remove_i.append(i) + if coord[1]-1>-1: + left_neigh = int(self.idMap[coord[0], coord[1]-1]) + for i in range(len(ind)): + if (abs(ind[i]%self.total_patches_count - left_neigh%self.total_patches_count) < thres): + remove_i.append(i) + + dist = np.delete(dist, remove_i) + ind = np.delete(ind, remove_i) + + return dist, ind + + + def distances2probability(self, distances, PARM_truncation, PARM_attenuation): + + probabilities = 1 - distances / np.max(distances) + probabilities *= (probabilities > PARM_truncation) + probabilities = pow(probabilities, PARM_attenuation) #attenuate the values + #check if we didn't truncate everything! + if np.sum(probabilities) == 0: + #then just revert it + probabilities = 1 - distances / np.max(distances) + probabilities *= (probabilities > PARM_truncation*np.max(probabilities)) # truncate the values (we want top truncate%) + probabilities = pow(probabilities, PARM_attenuation) + probabilities /= np.sum(probabilities) #normalize so they add up to one + + return probabilities + + def getOverlapAreaTop(self, coord): + #do I have a top neighbour + if coord[0]-1>-1: + canvasPatch = self.patchCoord2canvasPatch(coord) + return canvasPatch[0:self.overlapSize, :, :] + else: + return None + + def getOverlapAreaLeft(self, coord): + #do I have a left neighbour + if coord[1]-1>-1: + canvasPatch = self.patchCoord2canvasPatch(coord) + return canvasPatch[:, 0:self.overlapSize, :] + else: + return None + + def initKDtrees(self): + #prepate overlap patches + topOverlap = self.examplePatches[:, 0:self.overlapSize, :, :] + leftOverlap = self.examplePatches[:, :, 0:self.overlapSize, :] + shape_top = np.shape(topOverlap) + shape_left = np.shape(leftOverlap) + + flatten_top = topOverlap.reshape(shape_top[0], -1) + flatten_left = leftOverlap.reshape(shape_left[0], -1) + flatten_combined = self.getCombinedOverlap(flatten_top, flatten_left) + + tree_top = KDTree(flatten_top) + tree_left = KDTree(flatten_left) + tree_combined = KDTree(flatten_combined) + + return tree_top, tree_left, tree_combined + + #the corner of 2 overlaps is counted double + def getCombinedOverlap(self, top, left): + shape = np.shape(top) + if len(shape) > 1: + combined = np.zeros((shape[0], shape[1]*2)) + combined[0:shape[0], 0:shape[1]] = top + combined[0:shape[0], shape[1]:shape[1]*2] = left + else: + combined = np.zeros((shape[0]*2)) + combined[0:shape[0]] = top + combined[shape[0]:shape[0]*2] = left + return combined + + def initFirstPatch(self): + #grab a random block + patchId = randint(0, np.shape(self.examplePatches)[0]) + #mark out fill map + self.filledMap[0, 0] = 1 + self.idMap[0, 0] = patchId % self.total_patches_count + #update canvas + self.updateCanvas(patchId, 0, 0, False, False) + #visualize + # self.visualize([0,0], [patchId], []) + + + def prepareExamplePatches(self): + + searchKernelSize = self.patchSize + 2 * self.overlapSize + + result = view_as_windows(self.exampleMap, [searchKernelSize, searchKernelSize, 3] , self.windowStep) + shape = np.shape(result) + result = result.reshape(shape[0]*shape[1], searchKernelSize, searchKernelSize, 3) + + self.total_patches_count = shape[0]*shape[1] + + if self.mirror_hor: + #flip along horizonal axis + hor_result = np.zeros(np.shape(result)) + + for i in range(self.total_patches_count): + hor_result[i] = result[i][::-1, :, :] + + result = np.concatenate((result, hor_result)) + if self.mirror_vert: + vert_result = np.zeros((shape[0]*shape[1], searchKernelSize, searchKernelSize, 3)) + + for i in range(self.total_patches_count): + vert_result[i] = result[i][:, ::-1, :] + + result = np.concatenate((result, vert_result)) + + return result + + def initCanvas(self): + + #check whether the outputSize adheres to patch+overlap size + num_patches_X = ceil((self.outputSize[0]-self.overlapSize)/(self.patchSize+self.overlapSize)) + num_patches_Y = ceil((self.outputSize[1]-self.overlapSize)/(self.patchSize+self.overlapSize)) + #calc needed output image size + required_size_X = num_patches_X*self.patchSize + (num_patches_X+1)*self.overlapSize + required_size_Y = num_patches_Y*self.patchSize + (num_patches_X+1)*self.overlapSize + + #create empty canvas + canvas = np.zeros((required_size_X, required_size_Y, 3)) + filledMap = np.zeros((num_patches_X, num_patches_Y)) #map showing which patches have been resolved + idMap = np.zeros((num_patches_X, num_patches_Y)) - 1 #stores patches id + + print("modified output size: ", np.shape(canvas)) + print("number of patches: ", np.shape(filledMap)[0]) + + return canvas, filledMap, idMap + + def idCoordTo2DCoord(self, idCoord, imgSize): + row = int(floor(idCoord / imgSize[0])) + col = int(idCoord - row * imgSize[1]) + return [row, col] + + def updateCanvas(self, inputPatchId, coord_X, coord_Y, blendTop = False, blendLeft = False): + #translate Patch coordinate into Canvas coordinate + x_range = self.patchCoord2canvasCoord(coord_X) + y_range = self.patchCoord2canvasCoord(coord_Y) + examplePatch = self.examplePatches[inputPatchId] + if blendLeft: + canvasOverlap = self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[0]+self.overlapSize] + examplePatchOverlap = np.copy(examplePatch[0][:, 0:self.overlapSize]) + examplePatch[0][:, 0:self.overlapSize] = self.linearBlendOverlaps(canvasOverlap, examplePatchOverlap, 'left') + if blendTop: + canvasOverlap = self.canvas[x_range[0]:x_range[0]+self.overlapSize, y_range[0]:y_range[1]] + examplePatchOverlap = np.copy(examplePatch[0][0:self.overlapSize, :]) + examplePatch[0][0:self.overlapSize, :] = self.linearBlendOverlaps(canvasOverlap, examplePatchOverlap, 'top') + self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[1]] = examplePatch + + def linearBlendOverlaps(self, canvasOverlap, examplePatchOverlap, mode): + if mode == 'left': + mask = np.repeat(np.arange(self.overlapSize)[np.newaxis, :], np.shape(canvasOverlap)[0], axis=0) / self.overlapSize + elif mode == 'top': + mask = np.repeat(np.arange(self.overlapSize)[:, np.newaxis], np.shape(canvasOverlap)[1], axis=1) / self.overlapSize + mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2) #cast to 3d array + return canvasOverlap * (1 - mask) + examplePatchOverlap * mask + + #def minimumBoundaryError(self, canvasOverlap, examplePatchOverlap, mode) + + def patchCoord2canvasCoord(self, coord): + return [(self.patchSize+self.overlapSize)*coord, (self.patchSize+self.overlapSize)*(coord+1) + self.overlapSize] + + def patchCoord2canvasPatch(self, coord): + x_range = self.patchCoord2canvasCoord(coord[0]) + y_range = self.patchCoord2canvasCoord(coord[1]) + return np.copy(self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[1]]) + + def loadExampleMap(self, exampleMapPath): + exampleMap = io.imread(exampleMapPath) #returns an MxNx3 array + exampleMap = exampleMap / 255.0 #normalize + #make sure it is 3channel RGB + if (np.shape(exampleMap)[-1] > 3): + exampleMap = exampleMap[:,:,:3] #remove Alpha Channel + elif (len(np.shape(exampleMap)) == 2): + exampleMap = np.repeat(exampleMap[np.newaxis, :, :], 3, axis=0) #convert from Grayscale to RGB + return exampleMap diff --git a/diffvg/apps/textureSyn/traced_1.png b/diffvg/apps/textureSyn/traced_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f3af50ea29cc7d6258ba75732fc1d09c2014c1a8 Binary files /dev/null and b/diffvg/apps/textureSyn/traced_1.png differ diff --git a/diffvg/apps/textureSyn/traced_1.svg b/diffvg/apps/textureSyn/traced_1.svg new file mode 100644 index 0000000000000000000000000000000000000000..d811c65d82bbea34076224ad010f528f5fb5de09 --- /dev/null +++ b/diffvg/apps/textureSyn/traced_1.svg @@ -0,0 +1,1969 @@ + + + + +traced_1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/textureSyn/traced_2.png b/diffvg/apps/textureSyn/traced_2.png new file mode 100644 index 0000000000000000000000000000000000000000..614fed620b6901f1758c5912998e6ab2ba21c896 Binary files /dev/null and b/diffvg/apps/textureSyn/traced_2.png differ diff --git a/diffvg/apps/textureSyn/traced_2.svg b/diffvg/apps/textureSyn/traced_2.svg new file mode 100644 index 0000000000000000000000000000000000000000..478292ea7e48743e6b081c794c7ff0d596c1ebe6 --- /dev/null +++ b/diffvg/apps/textureSyn/traced_2.svg @@ -0,0 +1,8204 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/textureSyn/traced_3.png b/diffvg/apps/textureSyn/traced_3.png new file mode 100644 index 0000000000000000000000000000000000000000..f0fe2f25507abfb34bf528e46732a58518739383 Binary files /dev/null and b/diffvg/apps/textureSyn/traced_3.png differ diff --git a/diffvg/apps/textureSyn/traced_3.svg b/diffvg/apps/textureSyn/traced_3.svg new file mode 100644 index 0000000000000000000000000000000000000000..0403cdc2a999711a576a72c0c5d8e6476d1bf7a1 --- /dev/null +++ b/diffvg/apps/textureSyn/traced_3.svg @@ -0,0 +1,13302 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/apps/texture_synthesis.py b/diffvg/apps/texture_synthesis.py new file mode 100644 index 0000000000000000000000000000000000000000..3a7ccce083bdccfa56a61bfc27ec049742ed117f --- /dev/null +++ b/diffvg/apps/texture_synthesis.py @@ -0,0 +1,180 @@ +import os, sys +import pydiffvg +import argparse +import torch +# import torch as th +import scipy.ndimage.filters as filters +# import numba +import numpy as np +from skimage import io +sys.path.append('./textureSyn') +from patchBasedTextureSynthesis import * +from make_gif import make_gif +import random +import ttools.modules + +from svgpathtools import svg2paths2, Path, is_path_segment +""" +python texture_synthesis.py textureSyn/traced_1.png --svg-path textureSyn/traced_1.svg --case 1 +""" + +def texture_syn(img_path): + ## get the width and height first + # input_img = io.imread(img_path) # returns an MxNx3 array + # output_size = [input_img.shape[1], input_img.shape[0]] + # output_path = "textureSyn/1/" + output_path = "results/texture_synthesis/%d"%(args.case) + patch_size = 40 # size of the patch (without the overlap) + overlap_size = 10 # the width of the overlap region + output_size = [300, 300] + pbts = patchBasedTextureSynthesis(img_path, output_path, output_size, patch_size, overlap_size, in_windowStep=5, + in_mirror_hor=True, in_mirror_vert=True, in_shapshots=False) + target_img = pbts.resolveAll() + return np.array(target_img) + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + samples, # num_samples_x + samples, # num_samples_y + 0, # seed + None, + *scene_args) + return img + +def big_bounding_box(paths_n_stuff): + """Finds a BB containing a collection of paths, Bezier path segments, and + points (given as complex numbers).""" + bbs = [] + for thing in paths_n_stuff: + if is_path_segment(thing) or isinstance(thing, Path): + bbs.append(thing.bbox()) + elif isinstance(thing, complex): + bbs.append((thing.real, thing.real, thing.imag, thing.imag)) + else: + try: + complexthing = complex(thing) + bbs.append((complexthing.real, complexthing.real, + complexthing.imag, complexthing.imag)) + except ValueError: + raise TypeError( + "paths_n_stuff can only contains Path, CubicBezier, " + "QuadraticBezier, Line, and complex objects.") + xmins, xmaxs, ymins, ymaxs = list(zip(*bbs)) + xmin = min(xmins) + xmax = max(xmaxs) + ymin = min(ymins) + ymax = max(ymaxs) + return xmin, xmax, ymin, ymax + + +def main(args): + ## set device -> use cpu now since I haven't solved the nvcc issue + pydiffvg.set_use_gpu(False) + # pydiffvg.set_device(torch.device('cuda:1')) + ## use L2 for now + # perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + ## generate a texture synthesized + target_img = texture_syn(args.target) + tar_h, tar_w = target_img.shape[1], target_img.shape[0] + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_path) + + + ## svgpathtools for checking the bounding box + # paths, _, _ = svg2paths2(args.svg_path) + # print(len(paths)) + # xmin, xmax, ymin, ymax = big_bounding_box(paths) + # print(xmin, xmax, ymin, ymax) + # input("check") + + + print('tar h : %d tar w : %d'%(tar_h, tar_w)) + print('canvas h : %d canvas w : %d' % (canvas_height, canvas_width)) + scale_ratio = tar_h / canvas_height + print("scale ratio : ", scale_ratio) + # input("check") + for path in shapes: + path.points[..., 0] = path.points[..., 0] * scale_ratio + path.points[..., 1] = path.points[..., 1] * scale_ratio + + init_img = render(tar_w, tar_h, shapes, shape_groups) + pydiffvg.imwrite(init_img.cpu(), 'results/texture_synthesis/%d/init.png'%(args.case), gamma=2.2) + # input("check") + random.seed(1234) + torch.manual_seed(1234) + + points_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + color_vars = [] + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars.append(group.fill_color) + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + + target = torch.from_numpy(target_img).to(torch.float32) / 255.0 + target = target.pow(2.2) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + canvas_width, canvas_height = target.shape[3], target.shape[2] + # print('canvas h : %d canvas w : %d' % (canvas_height, canvas_width)) + # input("check") + + for t in range(args.max_iter): + print('iteration:', t) + points_optim.zero_grad() + color_optim.zero_grad() + cur_img = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(cur_img.cpu(), 'results/texture_synthesis/%d/iter_%d.png'%(args.case, t), gamma=2.2) + cur_img = cur_img[:, :, :3] + cur_img = cur_img.unsqueeze(0) + cur_img = cur_img.permute(0, 3, 1, 2) # NHWC -> NCHW + + ## perceptual loss + # loss = perception_loss(cur_img, target) + ## l2 loss + loss = (cur_img - target).pow(2).mean() + print('render loss:', loss.item()) + loss.backward() + + points_optim.step() + color_optim.step() + + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + ## write svg + if t % 10 == 0 or t == args.max_iter - 1: + pydiffvg.save_svg('results/texture_synthesis/%d/iter_%d.svg'%(args.case, t), + canvas_width, canvas_height, shapes, shape_groups) + + ## render final result + final_img = render(tar_h, tar_w, shapes, shape_groups) + pydiffvg.imwrite(final_img.cpu(), 'results/texture_synthesis/%d/final.png'%(args.case), gamma=2.2) + + + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/texture_synthesis/%d/iter_%d.png"%(args.case), "-vb", "20M", + "results/texture_synthesis/%d/out.mp4"%(args.case)]) + ## make gif + make_gif("results/texture_synthesis/%d"%(args.case), "results/texture_synthesis/%d/out.gif"%(args.case), frame_every_X_steps=1, repeat_ending=3, total_iter=args.max_iter) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + ## target image path + parser.add_argument("target", help="target image path") + parser.add_argument("--svg-path", type=str, help="the corresponding svg file path") + parser.add_argument("--max-iter", type=int, default=500, help="the max optimization iterations") + parser.add_argument("--case", type=int, default=1, help="just the case id for a separate result folder") + args = parser.parse_args() + main(args) \ No newline at end of file diff --git a/diffvg/atomic.cpp b/diffvg/atomic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9c642b9b84357a10f2155d28324517f36d00b0cb --- /dev/null +++ b/diffvg/atomic.cpp @@ -0,0 +1,27 @@ +//A hacky solution to get around the Ellipse include + +#ifdef WIN32 +#include +#include + +float win_atomic_add(float &target, float source) { + union { int i; float f; } old_val; + union { int i; float f; } new_val; + do { + old_val.f = target; + new_val.f = old_val.f + (float)source; + } while (InterlockedCompareExchange((LONG*)&target, (LONG)new_val.i, (LONG)old_val.i) != old_val.i); + return old_val.f; +} + +double win_atomic_add(double &target, double source) { + union { int64_t i; double f; } old_val; + union { int64_t i; double f; } new_val; + do { + old_val.f = target; + new_val.f = old_val.f + (double)source; + } while (InterlockedCompareExchange64((LONG64*)&target, (LONG64)new_val.i, (LONG64)old_val.i) != old_val.i); + return old_val.f; +} + +#endif \ No newline at end of file diff --git a/diffvg/atomic.h b/diffvg/atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..c721722df23f17097c67b79b05b57eecd12c5912 --- /dev/null +++ b/diffvg/atomic.h @@ -0,0 +1,139 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include "matrix.h" + +// https://stackoverflow.com/questions/39274472/error-function-atomicadddouble-double-has-already-been-defined +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 +#else +static inline DEVICE double atomicAdd(double *address, double val) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + if (val == 0.0) + return __longlong_as_double(old); + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} +#endif + +#ifndef WIN32 + template + DEVICE + inline T0 atomic_add_(T0 &target, T1 source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, (T0)source); + #else + T0 old_val; + T0 new_val; + do { + old_val = target; + new_val = old_val + source; + } while (!__atomic_compare_exchange(&target, &old_val, &new_val, true, + std::memory_order::memory_order_seq_cst, + std::memory_order::memory_order_seq_cst)); + return old_val; + #endif + } + + DEVICE + inline + float atomic_add(float &target, float source) { + return atomic_add_(target, source); + } + DEVICE + inline + double atomic_add(double &target, double source) { + return atomic_add_(target, source); + } +#else + float win_atomic_add(float &target, float source); + double win_atomic_add(double &target, double source); + DEVICE + static float atomic_add(float &target, float source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, source); + #else + return win_atomic_add(target, source); + #endif + } + DEVICE + static double atomic_add(double &target, double source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, (double)source); + #else + return win_atomic_add(target, source); + #endif + } +#endif + +template +DEVICE +inline T0 atomic_add(T0 *target, T1 source) { + return atomic_add(*target, (T0)source); +} + +template +DEVICE +inline TVector2 atomic_add(TVector2 &target, const TVector2 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector2 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); +} + +template +DEVICE +inline TVector3 atomic_add(TVector3 &target, const TVector3 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + atomic_add(target[2], source[2]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector3 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); + atomic_add(target[2], (T0)source[2]); +} + +template +DEVICE +inline TVector4 atomic_add(TVector4 &target, const TVector4 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + atomic_add(target[2], source[2]); + atomic_add(target[3], source[3]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector4 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); + atomic_add(target[2], (T0)source[2]); + atomic_add(target[3], (T0)source[3]); +} + +template +DEVICE +inline void atomic_add(T0 *target, const TMatrix3x3 &source) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + atomic_add(target[3 * i + j], (T0)source(i, j)); + } + } +} + diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so b/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so new file mode 100644 index 0000000000000000000000000000000000000000..0c4d6399381996588883bf9d86f59a4e4aabdb6c --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93aca3d74b090dbc13313c0bfef63a2ec62c4fbdedf6dce355fb961cc1eb639d +size 3744344 diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/__init__.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..24f3dd14e9a7f2da7b6fa7e731b6fd698fb45821 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/__init__.py @@ -0,0 +1,9 @@ +from .device import * +from .shape import * +from .pixel_filter import * +from .render_pytorch import * +from .image import * +from .parse_svg import * +from .color import * +from .optimize_svg import * +from .save_svg import * \ No newline at end of file diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/color.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/color.py new file mode 100644 index 0000000000000000000000000000000000000000..68c360f1ce1601e87b34a6fd36c70274e24dad94 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/color.py @@ -0,0 +1,24 @@ +import pydiffvg +import torch + +class LinearGradient: + def __init__(self, + begin = torch.tensor([0.0, 0.0]), + end = torch.tensor([0.0, 0.0]), + offsets = torch.tensor([0.0]), + stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])): + self.begin = begin + self.end = end + self.offsets = offsets + self.stop_colors = stop_colors + +class RadialGradient: + def __init__(self, + center = torch.tensor([0.0, 0.0]), + radius = torch.tensor([0.0, 0.0]), + offsets = torch.tensor([0.0]), + stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])): + self.center = center + self.radius = radius + self.offsets = offsets + self.stop_colors = stop_colors diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/device.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/device.py new file mode 100644 index 0000000000000000000000000000000000000000..420883d60130a8f21e96bae19ba6025ffd0ed55e --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/device.py @@ -0,0 +1,25 @@ +import torch + +use_gpu = torch.cuda.is_available() +device = torch.device('cuda') if use_gpu else torch.device('cpu') + +def set_use_gpu(v): + global use_gpu + global device + use_gpu = v + if not use_gpu: + device = torch.device('cpu') + +def get_use_gpu(): + global use_gpu + return use_gpu + +def set_device(d): + global device + global use_gpu + device = d + use_gpu = device.type == 'cuda' + +def get_device(): + global device + return device diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/image.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/image.py new file mode 100644 index 0000000000000000000000000000000000000000..f83fea259fa25df503e67d9793f1b939a0f16177 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/image.py @@ -0,0 +1,22 @@ +import numpy as np +import skimage +import skimage.io +import os + +def imwrite(img, filename, gamma = 2.2, normalize = False): + directory = os.path.dirname(filename) + if directory != '' and not os.path.exists(directory): + os.makedirs(directory) + + if not isinstance(img, np.ndarray): + img = img.data.numpy() + if normalize: + img_rng = np.max(img) - np.min(img) + if img_rng > 0: + img = (img - np.min(img)) / img_rng + img = np.clip(img, 0.0, 1.0) + if img.ndim==2: + #repeat along the third dimension + img=np.expand_dims(img,2) + img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma) + skimage.io.imsave(filename, (img * 255).astype(np.uint8)) \ No newline at end of file diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/optimize_svg.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/optimize_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..ce0097f51afca413cfd6a2dcf7ef257a443002ec --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/optimize_svg.py @@ -0,0 +1,1607 @@ +import json +import copy +import xml.etree.ElementTree as etree +from xml.dom import minidom +import warnings +import torch +import numpy as np +import re +import sys +import pydiffvg +import math +from collections import namedtuple +import cssutils + +class SvgOptimizationSettings: + + default_params = { + "optimize_color": True, + "color_lr": 2e-3, + "optimize_alpha": False, + "alpha_lr": 2e-3, + "optimizer": "Adam", + "transforms": { + "optimize_transforms":True, + "transform_mode":"rigid", + "translation_mult":1e-3, + "transform_lr":2e-3 + }, + "circles": { + "optimize_center": True, + "optimize_radius": True, + "shape_lr": 2e-1 + }, + "paths": { + "optimize_points": True, + "shape_lr": 2e-1 + }, + "gradients": { + "optimize_stops": True, + "stop_lr": 2e-3, + "optimize_color": True, + "color_lr": 2e-3, + "optimize_alpha": False, + "alpha_lr": 2e-3, + "optimize_location": True, + "location_lr": 2e-1 + } + } + + optims = { + "Adam": torch.optim.Adam, + "SGD": torch.optim.SGD, + "ASGD": torch.optim.ASGD, + } + + #region methods + def __init__(self, f=None): + self.store = {} + if f is None: + self.store["default"] = copy.deepcopy(SvgOptimizationSettings.default_params) + else: + self.store = json.load(f) + + # create default alias for root + def default_name(self, dname): + self.dname = dname + if dname not in self.store: + self.store[dname] = self.store["default"] + + def retrieve(self, node_id): + if node_id not in self.store: + return (self.store["default"], False) + else: + return (self.store[node_id], True) + + def reset_to_defaults(self, node_id): + if node_id in self.store: + del self.store[node_id] + + return self.store["default"] + + def undefault(self, node_id): + if node_id not in self.store: + self.store[node_id] = copy.deepcopy(self.store["default"]) + + return self.store[node_id] + + def override_optimizer(self, optimizer): + if optimizer is not None: + for v in self.store.values(): + v["optimizer"] = optimizer + + def global_override(self, path, value): + for store in self.store.values(): + d = store + for key in path[:-1]: + d = d[key] + + d[path[-1]] = value + + def save(self, file): + self.store["default"] = self.store[self.dname] + json.dump(self.store, file, indent="\t") + #endregion + +class OptimizableSvg: + + class TransformTools: + @staticmethod + def parse_matrix(vals): + assert(len(vals)==6) + return np.array([[vals[0],vals[2],vals[4]],[vals[1], vals[3], vals[5]],[0,0,1]]) + + @staticmethod + def parse_translate(vals): + assert(len(vals)>=1 and len(vals)<=2) + mat=np.eye(3) + mat[0,2]=vals[0] + if len(vals)>1: + mat[1,2]=vals[1] + return mat + + @staticmethod + def parse_rotate(vals): + assert (len(vals) == 1 or len(vals) == 3) + mat = np.eye(3) + rads=math.radians(vals[0]) + sint=math.sin(rads) + cost=math.cos(rads) + mat[0:2, 0:2] = np.array([[cost,-sint],[sint,cost]]) + if len(vals) > 1: + tr1=parse_translate(vals[1:3]) + tr2=parse_translate([-vals[1],-vals[2]]) + mat=tr1 @ mat @ tr2 + return mat + + @staticmethod + def parse_scale(vals): + assert (len(vals) >= 1 and len(vals) <= 2) + d=np.array([vals[0], vals[1] if len(vals)>1 else vals[0],1]) + return np.diag(d) + + @staticmethod + def parse_skewx(vals): + assert(len(vals)==1) + m=np.eye(3) + m[0,1]=vals[0] + return m + + @staticmethod + def parse_skewy(vals): + assert (len(vals) == 1) + m = np.eye(3) + m[1, 0] = vals[0] + return m + + @staticmethod + def transformPoints(pointsTensor, transform): + assert(transform is not None) + one=torch.ones((pointsTensor.shape[0],1),device=pointsTensor.device) + homo_points = torch.cat([pointsTensor, one], dim=1) + mult = transform.mm(homo_points.permute(1,0)).permute(1,0) + tfpoints=mult[:, 0:2].contiguous() + #print(torch.norm(mult[:,2]-one)) + assert(pointsTensor.shape == tfpoints.shape) + return tfpoints + + @staticmethod + def promote_numpy(M): + ret = np.eye(3) + ret[0:2, 0:2] = M + return ret + + @staticmethod + def recompose_numpy(Theta,ScaleXY,ShearX,TXY): + cost=math.cos(Theta) + sint=math.sin(Theta) + Rot=np.array([[cost, -sint],[sint, cost]]) + Scale=np.diag(ScaleXY) + Shear=np.eye(2) + Shear[0,1]=ShearX + + Translate=np.eye(3) + Translate[0:2,2]=TXY + + M=OptimizableSvg.TransformTools.promote_numpy(Rot @ Scale @ Shear) @ Translate + return M + + @staticmethod + def promote(m): + M=torch.eye(3).to(m.device) + M[0:2,0:2]=m + return M + + @staticmethod + def make_rot(Theta): + sint=Theta.sin().squeeze() + cost=Theta.cos().squeeze() + #m=torch.tensor([[cost, -sint],[sint, cost]]) + Rot=torch.stack((torch.stack((cost,-sint)),torch.stack((sint,cost)))) + return Rot + + @staticmethod + def make_scale(ScaleXY): + if ScaleXY.squeeze().dim()==0: + ScaleXY=ScaleXY.squeeze() + #uniform scale + return torch.diag(torch.stack([ScaleXY,ScaleXY])).to(ScaleXY.device) + else: + return torch.diag(ScaleXY).to(ScaleXY.device) + + @staticmethod + def make_shear(ShearX): + m=torch.eye(2).to(ShearX.device) + m[0,1]=ShearX + return m + + @staticmethod + def make_translate(TXY): + m=torch.eye(3).to(TXY.device) + m[0:2,2]=TXY + return m + + @staticmethod + def recompose(Theta,ScaleXY,ShearX,TXY): + Rot=OptimizableSvg.TransformTools.make_rot(Theta) + Scale=OptimizableSvg.TransformTools.make_scale(ScaleXY) + Shear=OptimizableSvg.TransformTools.make_shear(ShearX) + Translate=OptimizableSvg.TransformTools.make_translate(TXY) + + return OptimizableSvg.TransformTools.promote(Rot.mm(Scale).mm(Shear)).mm(Translate) + + TransformDecomposition=namedtuple("TransformDecomposition","theta scale shear translate") + TransformProperties=namedtuple("TransformProperties", "has_rotation has_scale has_mirror scale_uniform has_shear has_translation") + + @staticmethod + def make_named(decomp): + if not isinstance(decomp,OptimizableSvg.TransformTools.TransformDecomposition): + decomp=OptimizableSvg.TransformTools.TransformDecomposition(theta=decomp[0],scale=decomp[1],shear=decomp[2],translate=decomp[3]) + return decomp + + @staticmethod + def analyze_transform(decomp): + decomp=OptimizableSvg.TransformTools.make_named(decomp) + epsilon=1e-3 + has_rotation=abs(decomp.theta)>epsilon + has_scale=abs((abs(decomp.scale)-1)).max()>epsilon + scale_len=decomp.scale.squeeze().ndim>0 if isinstance(decomp.scale,np.ndarray) else decomp.scale.squeeze().dim() > 0 + has_mirror=scale_len and decomp.scale[0]*decomp.scale[1] < 0 + scale_uniform=not scale_len or abs(abs(decomp.scale[0])-abs(decomp.scale[1]))epsilon + has_translate=max(abs(decomp.translate[0]),abs(decomp.translate[1]))>epsilon + + return OptimizableSvg.TransformTools.TransformProperties(has_rotation=has_rotation,has_scale=has_scale,has_mirror=has_mirror,scale_uniform=scale_uniform,has_shear=has_shear,has_translation=has_translate) + + @staticmethod + def check_and_decomp(M): + decomp=OptimizableSvg.TransformTools.decompose(M) if M is not None else OptimizableSvg.TransformTools.TransformDecomposition(theta=0,scale=(1,1),shear=0,translate=(0,0)) + props=OptimizableSvg.TransformTools.analyze_transform(decomp) + return (decomp, props) + + @staticmethod + def tf_to_string(M): + tfstring = "matrix({} {} {} {} {} {})".format(M[0, 0], M[1, 0], M[0, 1], M[1, 1], M[0, 2], M[1, 2]) + return tfstring + + @staticmethod + def decomp_to_string(decomp): + decomp = OptimizableSvg.TransformTools.make_named(decomp) + ret="" + props=OptimizableSvg.TransformTools.analyze_transform(decomp) + if props.has_rotation: + ret+="rotate({}) ".format(math.degrees(decomp.theta.item())) + if props.has_scale: + if decomp.scale.dim()==0: + ret += "scale({}) ".format(decomp.scale.item()) + else: + ret+="scale({} {}) ".format(decomp.scale[0], decomp.scale[1]) + if props.has_shear: + ret+="skewX({}) ".format(decomp.shear.item()) + if props.has_translation: + ret+="translate({} {}) ".format(decomp.translate[0],decomp.translate[1]) + + return ret + + @staticmethod + def decompose(M): + m = M[0:2, 0:2] + t0=M[0:2, 2] + #get translation so that we can post-multiply with it + TXY=np.linalg.solve(m,t0) + + T=np.eye(3) + T[0:2,2]=TXY + + q, r = np.linalg.qr(m) + + ref = np.array([[1, 0], [0, np.sign(np.linalg.det(q))]]) + + Rot = np.dot(q, ref) + + ref2 = np.array([[1, 0], [0, np.sign(np.linalg.det(r))]]) + + r2 = np.dot(ref2, r) + + Ref = np.dot(ref, ref2) + + sc = np.diag(r2) + Scale = np.diagflat(sc) + + Shear = np.eye(2) + Shear[0, 1] = r2[0, 1] / sc[0] + #the actual shear coefficient + ShearX=r2[0, 1] / sc[0] + + if np.sum(sc) < 0: + # both scales are negative, flip this and add a 180 rotation + Rot = np.dot(Rot, -np.eye(2)) + Scale = -Scale + + Theta = math.atan2(Rot[1, 0], Rot[0, 0]) + ScaleXY = np.array([Scale[0,0],Scale[1,1]*Ref[1,1]]) + + return OptimizableSvg.TransformTools.TransformDecomposition(theta=Theta, scale=ScaleXY, shear=ShearX, translate=TXY) + + #region suboptimizers + + #optimizes color, but really any tensor that needs to stay between 0 and 1 per-entry + class ColorOptimizer: + def __init__(self,tensor,optim_type,lr): + self.tensor=tensor + self.optim=optim_type([tensor],lr=lr) + + def zero_grad(self): + self.optim.zero_grad() + + def step(self): + self.optim.step() + self.tensor.data.clamp_(min=1e-4,max=1.) + + #optimizes gradient stop positions + class StopOptimizer: + def __init__(self,stops,optim_type,lr): + self.stops=stops + self.optim=optim_type([stops],lr=lr) + + def zero_grad(self): + self.optim.zero_grad() + + def step(self): + self.optim.step() + self.stops.data.clamp_(min=0., max=1.) + self.stops.data, _ = self.stops.sort() + self.stops.data[0] = 0. + self.stops.data[-1]=1. + + #optimizes gradient: stop, positions, colors+opacities, locations + class GradientOptimizer: + def __init__(self, begin, end, offsets, stops, optim_params): + self.begin=begin.clone().detach() if begin is not None else None + self.end=end.clone().detach() if end is not None else None + self.offsets=offsets.clone().detach() if offsets is not None else None + self.stop_colors=stops[:,0:3].clone().detach() if stops is not None else None + self.stop_alphas=stops[:,3].clone().detach() if stops is not None else None + self.optimizers=[] + + if optim_params["gradients"]["optimize_stops"] and self.offsets is not None: + self.offsets.requires_grad_(True) + self.optimizers.append(OptimizableSvg.StopOptimizer(self.offsets,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["stop_lr"])) + if optim_params["gradients"]["optimize_color"] and self.stop_colors is not None: + self.stop_colors.requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_colors,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["color_lr"])) + if optim_params["gradients"]["optimize_alpha"] and self.stop_alphas is not None: + self.stop_alphas.requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_alphas,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["alpha_lr"])) + if optim_params["gradients"]["optimize_location"] and self.begin is not None and self.end is not None: + self.begin.requires_grad_(True) + self.end.requires_grad_(True) + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.begin,self.end],lr=optim_params["gradients"]["location_lr"])) + + + def get_vals(self): + return self.begin, self.end, self.offsets, torch.cat((self.stop_colors,self.stop_alphas.unsqueeze(1)),1) if self.stop_colors is not None and self.stop_alphas is not None else None + + def zero_grad(self): + for optim in self.optimizers: + optim.zero_grad() + + def step(self): + for optim in self.optimizers: + optim.step() + + class TransformOptimizer: + def __init__(self,transform,optim_params): + self.transform=transform + self.optimizes=optim_params["transforms"]["optimize_transforms"] and transform is not None + self.params=copy.deepcopy(optim_params) + self.transform_mode=optim_params["transforms"]["transform_mode"] + + if self.optimizes: + optimvars=[] + self.residual=None + lr=optim_params["transforms"]["transform_lr"] + tmult=optim_params["transforms"]["translation_mult"] + decomp,props=OptimizableSvg.TransformTools.check_and_decomp(transform.cpu().numpy()) + if self.transform_mode=="move": + #only translation and rotation should be set + if props.has_scale or props.has_shear or props.has_mirror: + print("Warning: set to optimize move only, but input transform has residual scale or shear") + self.residual=self.transform.clone().detach().requires_grad_(False) + self.Theta=torch.tensor(0,dtype=torch.float32,requires_grad=True,device=transform.device) + self.translation=torch.tensor([0, 0],dtype=torch.float32,requires_grad=True,device=transform.device) + else: + self.residual=None + self.Theta=torch.tensor(decomp.theta,dtype=torch.float32,requires_grad=True,device=transform.device) + self.translation=torch.tensor(decomp.translate,dtype=torch.float32,requires_grad=True,device=transform.device) + optimvars+=[{'params':x,'lr':lr} for x in [self.Theta]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="rigid": + #only translation, rotation, and uniform scale should be set + if props.has_shear or props.has_mirror or not props.scale_uniform: + print("Warning: set to optimize rigid transform only, but input transform has residual shear, mirror or non-uniform scale") + self.residual = self.transform.clone().detach().requires_grad_(False) + self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) + else: + self.residual = None + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="similarity": + if props.has_shear or not props.scale_uniform: + print("Warning: set to optimize rigid transform only, but input transform has residual shear or non-uniform scale") + self.residual = self.transform.clone().detach().requires_grad_(False) + self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale_sign=torch.tensor(1,dtype=torch.float32,requires_grad=False,device=transform.device) + else: + self.residual = None + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale_sign = torch.tensor(np.sign(decomp.scale[0]*decomp.scale[1]), dtype=torch.float32, requires_grad=False,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="affine": + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale, dtype=torch.float32, requires_grad=True,device=transform.device) + self.shear = torch.tensor(decomp.shear, dtype=torch.float32, requires_grad=True,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale, self.shear]]+[{'params':self.translation,'lr':lr*tmult}] + else: + raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) + self.optimizer=SvgOptimizationSettings.optims[optim_params["optimizer"]](optimvars) + + def get_transform(self): + if not self.optimizes: + return self.transform + else: + if self.transform_mode == "move": + composed=OptimizableSvg.TransformTools.recompose(self.Theta,torch.tensor([1.],device=self.Theta.device),torch.tensor(0.,device=self.Theta.device),self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "rigid": + composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, torch.tensor(0.,device=self.Theta.device), + self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "similarity": + composed=OptimizableSvg.TransformTools.recompose(self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.,device=self.Theta.device),self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "affine": + composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, self.shear, self.translation) + return composed + else: + raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) + + def tfToString(self): + if self.transform is None: + return None + elif not self.optimizes: + return OptimizableSvg.TransformTools.tf_to_string(self.transform) + else: + if self.transform_mode == "move": + str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta,torch.tensor([1.]),torch.tensor(0.),self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "rigid": + str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, torch.tensor(0.), + self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "similarity": + str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.),self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "affine": + str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, self.shear, self.translation)) + return composed + + def zero_grad(self): + if self.optimizes: + self.optimizer.zero_grad() + + def step(self): + if self.optimizes: + self.optimizer.step() + + #endregion + + #region Nodes + class SvgNode: + def __init__(self,id,transform,appearance,settings): + self.id=id + self.children=[] + self.optimizers=[] + self.device = settings.device + self.transform=torch.tensor(transform,dtype=torch.float32,device=self.device) if transform is not None else None + self.transform_optim=OptimizableSvg.TransformOptimizer(self.transform,settings.retrieve(self.id)[0]) + self.optimizers.append(self.transform_optim) + self.proc_appearance(appearance,settings.retrieve(self.id)[0]) + + def tftostring(self): + return self.transform_optim.tfToString() + + def appearanceToString(self): + appstring="" + for key,value in self.appearance.items(): + if key in ["fill", "stroke"]: + #a paint-type value + if value[0] == "none": + appstring+="{}:none;".format(key) + elif value[0] == "solid": + appstring += "{}:{};".format(key,OptimizableSvg.rgb_to_string(value[1])) + elif value[0] == "url": + appstring += "{}:url(#{});".format(key,value[1].id) + #appstring += "{}:{};".format(key,"#ff00ff") + elif key in ["opacity", "fill-opacity", "stroke-opacity", "stroke-width", "fill-rule"]: + appstring+="{}:{};".format(key,value) + else: + raise ValueError("Don't know how to write appearance parameter '{}'".format(key)) + return appstring + + + def write_xml_common_attrib(self,node,tfname="transform"): + if self.transform is not None: + node.set(tfname,self.tftostring()) + if len(self.appearance)>0: + node.set('style',self.appearanceToString()) + if self.id is not None: + node.set('id',self.id) + + + def proc_appearance(self,appearance,optim_params): + self.appearance=appearance + for key, value in appearance.items(): + if key == "fill" or key == "stroke": + if optim_params["optimize_color"] and value[0]=="solid": + value[1].requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1],SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["color_lr"])) + elif key == "fill-opacity" or key == "stroke-opacity" or key == "opacity": + if optim_params["optimize_alpha"]: + value[1].requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1], optim_params["optimizer"], + optim_params["alpha_lr"])) + elif key == "fill-rule" or key == "stroke-width": + pass + else: + raise RuntimeError("Unrecognized appearance key '{}'".format(key)) + + def prop_transform(self,intform): + return intform.matmul(self.transform_optim.get_transform()) if self.transform is not None else intform + + def prop_appearance(self,inappearance): + outappearance=copy.copy(inappearance) + for key,value in self.appearance.items(): + if key == "fill": + #gets replaced + outappearance[key]=value + elif key == "fill-opacity": + #gets multiplied + outappearance[key] = outappearance[key]*value + elif key == "fill-rule": + #gets replaced + outappearance[key] = value + elif key =="opacity": + # gets multiplied + outappearance[key] = outappearance[key]*value + elif key == "stroke": + # gets replaced + outappearance[key] = value + elif key == "stroke-opacity": + # gets multiplied + outappearance[key] = outappearance[key]*value + elif key =="stroke-width": + # gets replaced + outappearance[key] = value + else: + raise RuntimeError("Unrecognized appearance key '{}'".format(key)) + return outappearance + + def zero_grad(self): + for optim in self.optimizers: + optim.zero_grad() + for child in self.children: + child.zero_grad() + + def step(self): + for optim in self.optimizers: + optim.step() + for child in self.children: + child.step() + + def get_type(self): + return "Generic node" + + def is_shape(self): + return False + + def build_scene(self,shapes,shape_groups,transform,appearance): + raise NotImplementedError("Abstract SvgNode cannot recurse") + + class GroupNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def get_type(self): + return "Group node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + outtf=self.prop_transform(transform) + outapp=self.prop_appearance(appearance) + for child in self.children: + child.build_scene(shapes,shape_groups,outtf,outapp) + + def write_xml(self, parent): + elm=etree.SubElement(parent,"g") + self.write_xml_common_attrib(elm) + + for child in self.children: + child.write_xml(elm) + + class RootNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def write_xml(self,document): + elm=etree.Element('svg') + self.write_xml_common_attrib(elm) + elm.set("version","2.0") + elm.set("width",str(document.canvas[0])) + elm.set("height", str(document.canvas[1])) + elm.set("xmlns","http://www.w3.org/2000/svg") + elm.set("xmlns:xlink","http://www.w3.org/1999/xlink") + #write definitions before we write any children + document.write_defs(elm) + + #write the children + for child in self.children: + child.write_xml(elm) + + return elm + + def get_type(self): + return "Root node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + outtf = self.prop_transform(transform).to(self.device) + for child in self.children: + child.build_scene(shapes,shape_groups,outtf,appearance) + + @staticmethod + def get_default_appearance(device): + default_appearance = {"fill": ("solid", torch.tensor([0., 0., 0.],device=device)), + "fill-opacity": torch.tensor([1.],device=device), + "fill-rule": "nonzero", + "opacity": torch.tensor([1.],device=device), + "stroke": ("none", None), + "stroke-opacity": torch.tensor([1.],device=device), + "stroke-width": torch.tensor([0.],device=device)} + return default_appearance + + @staticmethod + def get_default_transform(): + return torch.eye(3) + + + + class ShapeNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def get_type(self): + return "Generic shape node" + + def is_shape(self): + return True + + def construct_paint(self,value,combined_opacity,transform): + if value[0] == "none": + return None + elif value[0] == "solid": + return torch.cat([value[1],combined_opacity]).to(self.device) + elif value[0] == "url": + #get the gradient object from this node + return value[1].getGrad(combined_opacity,transform) + else: + raise ValueError("Unknown paint value type '{}'".format(value[0])) + + def make_shape_group(self,appearance,transform,num_shapes,num_subobjects): + fill=self.construct_paint(appearance["fill"],appearance["opacity"]*appearance["fill-opacity"],transform) + stroke=self.construct_paint(appearance["stroke"],appearance["opacity"]*appearance["stroke-opacity"],transform) + sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)), + fill_color=fill, + use_even_odd_rule=appearance["fill-rule"]=="evenodd", + stroke_color=stroke, + shape_to_canvas=transform, + id=self.id) + return sg + + class PathNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, paths): + super().__init__(id, transform, appearance,settings) + self.proc_paths(paths,settings.retrieve(self.id)[0]) + + def proc_paths(self,paths,optim_params): + self.paths=paths + if optim_params["paths"]["optimize_points"]: + ptlist=[] + for path in paths: + ptlist.append(path.points.requires_grad_(True)) + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]](ptlist,lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Path node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),len(self.paths)) + for path in self.paths: + disp_path=pydiffvg.Path(path.num_control_points,path.points,path.is_closed,applyapp["stroke-width"],path.id) + shapes.append(disp_path) + shape_groups.append(sg) + + def path_to_string(self,path): + path_string = "M {},{} ".format(path.points[0][0].item(), path.points[0][1].item()) + idx = 1 + numpoints = path.points.shape[0] + for type in path.num_control_points: + toproc = type + 1 + if type == 0: + # add line + path_string += "L " + elif type == 1: + # add quadric + path_string += "Q " + elif type == 2: + # add cubic + path_string += "C " + while toproc > 0: + path_string += "{},{} ".format(path.points[idx % numpoints][0].item(), + path.points[idx % numpoints][1].item()) + idx += 1 + toproc -= 1 + if path.is_closed: + path_string += "Z " + + return path_string + + def paths_string(self): + pstr="" + for path in self.paths: + pstr+=self.path_to_string(path) + return pstr + + def write_xml(self, parent): + elm = etree.SubElement(parent, "path") + self.write_xml_common_attrib(elm) + elm.set("d",self.paths_string()) + + for child in self.children: + child.write_xml(elm) + + class RectNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, rect): + super().__init__(id, transform, appearance,settings) + self.rect=torch.tensor(rect,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.rect],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Rect node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Rect(self.rect[0:2],self.rect[0:2]+self.rect[2:4],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "rect") + self.write_xml_common_attrib(elm) + elm.set("x",str(self.rect[0])) + elm.set("y", str(self.rect[1])) + elm.set("width", str(self.rect[2])) + elm.set("height", str(self.rect[3])) + + for child in self.children: + child.write_xml(elm) + + class CircleNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, rect): + super().__init__(id, transform, appearance,settings) + self.circle=torch.tensor(rect,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.circle],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Circle node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Circle(self.circle[2],self.circle[0:2],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "circle") + self.write_xml_common_attrib(elm) + elm.set("cx",str(self.circle[0])) + elm.set("cy", str(self.circle[1])) + elm.set("r", str(self.circle[2])) + + for child in self.children: + child.write_xml(elm) + + + class EllipseNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, ellipse): + super().__init__(id, transform, appearance,settings) + self.ellipse=torch.tensor(ellipse,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.ellipse],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Ellipse node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Ellipse(self.ellipse[2:4],self.ellipse[0:2],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "ellipse") + self.write_xml_common_attrib(elm) + elm.set("cx", str(self.ellipse[0])) + elm.set("cy", str(self.ellipse[1])) + elm.set("rx", str(self.ellipse[2])) + elm.set("ry", str(self.ellipse[3])) + + for child in self.children: + child.write_xml(elm) + + class PolygonNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, points): + super().__init__(id, transform, appearance,settings) + self.points=points + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.points],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Polygon node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Polygon(self.points,True,applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def point_string(self): + ret="" + for i in range(self.points.shape[0]): + pt=self.points[i,:] + #assert pt.shape == (1,2) + ret+= str(pt[0])+","+str(pt[1])+" " + return ret + + def write_xml(self, parent): + elm = etree.SubElement(parent, "polygon") + self.write_xml_common_attrib(elm) + elm.set("points",self.point_string()) + + for child in self.children: + child.write_xml(elm) + + class GradientNode(SvgNode): + def __init__(self, id, transform,settings,begin,end,offsets,stops,href): + super().__init__(id, transform, {},settings) + self.optim=OptimizableSvg.GradientOptimizer(begin, end, offsets, stops, settings.retrieve(id)[0]) + self.optimizers.append(self.optim) + self.href=href + + def is_ref(self): + return self.href is not None + + def get_type(self): + return "Gradient node" + + def get_stops(self): + _, _, offsets, stops=self.optim.get_vals() + return offsets, stops + + def get_points(self): + begin, end, _, _ =self.optim.get_vals() + return begin, end + + def write_xml(self, parent): + elm = etree.SubElement(parent, "linearGradient") + self.write_xml_common_attrib(elm,tfname="gradientTransform") + + begin, end, offsets, stops = self.optim.get_vals() + + if self.href is None: + #we have stops + for idx, offset in enumerate(offsets): + stop=etree.SubElement(elm,"stop") + stop.set("offset",str(offset.item())) + stop.set("stop-color",OptimizableSvg.rgb_to_string(stops[idx,0:3])) + stop.set("stop-opacity",str(stops[idx,3].item())) + else: + elm.set('xlink:href', "#{}".format(self.href.id)) + + if begin is not None and end is not None: + #no stops + elm.set('x1', str(begin[0].item())) + elm.set('y1', str(begin[1].item())) + elm.set('x2', str(end[0].item())) + elm.set('y2', str(end[1].item())) + + # magic value to make this work + elm.set("gradientUnits", "userSpaceOnUse") + + for child in self.children: + child.write_xml(elm) + + def getGrad(self,combined_opacity,transform): + if self.is_ref(): + offsets, stops=self.href.get_stops() + else: + offsets, stops=self.get_stops() + + stops=stops.clone() + stops[:,3]*=combined_opacity + + begin,end = self.get_points() + + applytf=self.prop_transform(transform) + begin=OptimizableSvg.TransformTools.transformPoints(begin.unsqueeze(0),applytf).squeeze() + end = OptimizableSvg.TransformTools.transformPoints(end.unsqueeze(0), applytf).squeeze() + + return pydiffvg.LinearGradient(begin, end, offsets, stops) + #endregion + + def __init__(self, filename, settings=SvgOptimizationSettings(),optimize_background=False, verbose=False, device=torch.device("cpu")): + self.settings=settings + self.verbose=verbose + self.device=device + self.settings.device=device + + tree = etree.parse(filename) + root = tree.getroot() + + #in case we need global optimization + self.optimizers=[] + self.background=torch.tensor([1.,1.,1.],dtype=torch.float32,requires_grad=optimize_background,device=self.device) + + if optimize_background: + p=settings.retrieve("default")[0] + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.background,SvgOptimizationSettings.optims[p["optimizer"]],p["color_lr"])) + + self.defs={} + + self.depth=0 + + self.dirty=True + self.scene=None + + self.parseRoot(root) + + recognised_shapes=["path","circle","rect","ellipse","polygon"] + + #region core functionality + def build_scene(self): + if self.dirty: + shape_groups=[] + shapes=[] + self.root.build_scene(shapes,shape_groups,OptimizableSvg.RootNode.get_default_transform().to(self.device),OptimizableSvg.RootNode.get_default_appearance(self.device)) + self.scene=(self.canvas[0],self.canvas[1],shapes,shape_groups) + self.dirty=False + return self.scene + + def zero_grad(self): + self.root.zero_grad() + for optim in self.optimizers: + optim.zero_grad() + for item in self.defs.values(): + if issubclass(item.__class__,OptimizableSvg.SvgNode): + item.zero_grad() + + def render(self,scale=None,seed=0): + #render at native resolution + scene = self.build_scene() + scene_args = pydiffvg.RenderFunction.serialize_scene(*scene) + render = pydiffvg.RenderFunction.apply + out_size=(scene[0],scene[1]) if scale is None else (int(scene[0]*scale),int(scene[1]*scale)) + img = render(out_size[0], # width + out_size[1], # height + 2, # num_samples_x + 2, # num_samples_y + seed, # seed + None, # background_image + *scene_args) + return img + + def step(self): + self.dirty=True + self.root.step() + for optim in self.optimizers: + optim.step() + for item in self.defs.values(): + if issubclass(item.__class__, OptimizableSvg.SvgNode): + item.step() + #endregion + + #region reporting + + def offset_str(self,s): + return ("\t"*self.depth)+s + + def reportSkippedAttribs(self, node, non_skipped=[]): + skipped=set([k for k in node.attrib.keys() if not OptimizableSvg.is_namespace(k)])-set(non_skipped) + if len(skipped)>0: + tag=OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag),node.attrib["id"]) + print(self.offset_str("Warning: Skipping the following attributes of node '{}': {}".format(tag,", ".join(["'{}'".format(atr) for atr in skipped])))) + + def reportSkippedChildren(self,node,skipped): + skipped_names=["{}#{}".format(elm.tag,elm.attrib["id"]) if "id" in elm.attrib else elm.tag for elm in skipped] + if len(skipped)>0: + tag = OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag), + node.attrib["id"]) + print(self.offset_str("Warning: Skipping the following children of node '{}': {}".format(tag,", ".join(["'{}'".format(name) for name in skipped_names])))) + + #endregion + + #region parsing + @staticmethod + def remove_namespace(s): + """ + {...} ... -> ... + """ + return re.sub('{.*}', '', s) + + @staticmethod + def is_namespace(s): + return re.match('{.*}', s) is not None + + @staticmethod + def parseTransform(node): + if "transform" not in node.attrib and "gradientTransform" not in node.attrib: + return None + + tf_string=node.attrib["transform"] if "transform" in node.attrib else node.attrib["gradientTransform"] + tforms=tf_string.split(")")[:-1] + mat=np.eye(3) + for tform in tforms: + type = tform.split("(")[0] + args = [float(val) for val in re.split("[, ]+",tform.split("(")[1])] + if type == "matrix": + mat=mat @ OptimizableSvg.TransformTools.parse_matrix(args) + elif type == "translate": + mat = mat @ OptimizableSvg.TransformTools.parse_translate(args) + elif type == "rotate": + mat = mat @ OptimizableSvg.TransformTools.parse_rotate(args) + elif type == "scale": + mat = mat @ OptimizableSvg.TransformTools.parse_scale(args) + elif type == "skewX": + mat = mat @ OptimizableSvg.TransformTools.parse_skewx(args) + elif type == "skewY": + mat = mat @ OptimizableSvg.TransformTools.parse_skewy(args) + else: + raise ValueError("Unknown transform type '{}'".format(type)) + return mat + + #dictionary that defines what constant do we need to multiply different units to get the value in pixels + #gleaned from the CSS definition + unit_dict = {"px":1, + "mm":4, + "cm":40, + "in":25.4*4, + "pt":25.4*4/72, + "pc":25.4*4/6 + } + + @staticmethod + def parseLength(s): + #length is a number followed possibly by a unit definition + #we assume that default unit is the pixel (px) equal to 0.25mm + #last two characters might be unit + val=None + for i in range(len(s)): + try: + val=float(s[:len(s)-i]) + unit=s[len(s)-i:] + break + except ValueError: + continue + if len(unit)>0 and unit not in OptimizableSvg.unit_dict: + raise ValueError("Unknown or unsupported unit '{}' encountered while parsing".format(unit)) + if unit != "": + val*=OptimizableSvg.unit_dict[unit] + return val + + @staticmethod + def parseOpacity(s): + is_percent=s.endswith("%") + s=s.rstrip("%") + val=float(s) + if is_percent: + val=val/100 + return np.clip(val,0.,1.) + + @staticmethod + def parse_color(s): + """ + Hex to tuple + """ + if s[0] != '#': + raise ValueError("Color argument `{}` not supported".format(s)) + s = s.lstrip('#') + if len(s)==6: + rgb = tuple(int(s[i:i + 2], 16) for i in (0, 2, 4)) + return torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]) + elif len(s)==3: + rgb = tuple((int(s[i:i + 1], 16)) for i in (0, 1, 2)) + return torch.tensor([rgb[0] / 15.0, rgb[1] / 15.0, rgb[2] / 15.0]) + else: + raise ValueError("Color argument `{}` not supported".format(s)) + # sRGB to RGB + # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) + + + @staticmethod + def rgb_to_string(val): + byte_rgb=(val.clone().detach()*255).type(torch.int) + byte_rgb.clamp_(min=0,max=255) + s="#{:02x}{:02x}{:02x}".format(*byte_rgb) + return s + + #parses a "paint" string for use in fill and stroke definitions + @staticmethod + def parsePaint(paintStr,defs,device): + paintStr=paintStr.strip() + if paintStr=="none": + return ("none", None) + elif paintStr[0]=="#": + return ("solid",OptimizableSvg.parse_color(paintStr).to(device)) + elif paintStr.startswith("url"): + url=paintStr.lstrip("url(").rstrip(")").strip("\'\"").lstrip("#") + if url not in defs: + raise ValueError("Paint-type attribute referencing an unknown object with ID '#{}'".format(url)) + return ("url",defs[url]) + else: + raise ValueError("Unrecognized paint string: '{}'".format(paintStr)) + + appearance_keys=["fill","fill-opacity","fill-rule","opacity","stroke","stroke-opacity","stroke-width"] + + @staticmethod + def parseAppearance(node, defs, device): + ret={} + parse_keys = OptimizableSvg.appearance_keys + local_dict={key:value for key,value in node.attrib.items() if key in parse_keys} + css_dict={} + style_dict={} + appearance_dict={} + if "class" in node.attrib: + cls=node.attrib["class"] + if "."+cls in defs: + css_string=defs["."+cls] + css_dict={item.split(":")[0]:item.split(":")[1] for item in css_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} + if "style" in node.attrib: + style_string=node.attrib["style"] + style_dict={item.split(":")[0]:item.split(":")[1] for item in style_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} + appearance_dict.update(css_dict) + appearance_dict.update(style_dict) + appearance_dict.update(local_dict) + for key,value in appearance_dict.items(): + if key=="fill": + ret[key]=OptimizableSvg.parsePaint(value,defs,device) + elif key == "fill-opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "fill-rule": + ret[key]=value + elif key == "opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "stroke": + ret[key]=OptimizableSvg.parsePaint(value,defs,device) + elif key == "stroke-opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "stroke-width": + ret[key]=torch.tensor(OptimizableSvg.parseLength(value),device=device) + else: + raise ValueError("Error while parsing appearance attributes: key '{}' should not be here".format(key)) + + return ret + + def parseRoot(self,root): + if self.verbose: + print(self.offset_str("Parsing root")) + self.depth += 1 + + # get document canvas dimensions + self.parseViewport(root) + canvmax=np.max(self.canvas) + self.settings.global_override(["transforms","translation_mult"],canvmax) + id=root.attrib["id"] if "id" in root.attrib else None + + transform=OptimizableSvg.parseTransform(root) + appearance=OptimizableSvg.parseAppearance(root,self.defs,self.device) + + version=root.attrib["version"] if "version" in root.attrib else "" + if version != "2.0": + print(self.offset_str("Warning: Version {} is not 2.0, strange things may happen".format(version))) + + self.root=OptimizableSvg.RootNode(id,transform,appearance,self.settings) + + if self.verbose: + self.reportSkippedAttribs(root, ["width", "height", "id", "transform","version", "style"]+OptimizableSvg.appearance_keys) + + #go through the root children and parse them appropriately + skipped=[] + for child in root: + if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + self.parseShape(child,self.root) + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + self.parseDefs(child) + elif OptimizableSvg.remove_namespace(child.tag) == "style": + self.parseStyle(child) + elif OptimizableSvg.remove_namespace(child.tag) == "g": + self.parseGroup(child,self.root) + else: + skipped.append(child) + + if self.verbose: + self.reportSkippedChildren(root,skipped) + + self.depth-=1 + + def parseShape(self,shape,parent): + tag=OptimizableSvg.remove_namespace(shape.tag) + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag,shape.attrib["id"] if "id" in shape.attrib else ""))) + + self.depth+=1 + if tag == "path": + self.parsePath(shape,parent) + elif tag == "circle": + self.parseCircle(shape,parent) + elif tag == "rect": + self.parseRect(shape,parent) + elif tag == "ellipse": + self.parseEllipse(shape,parent) + elif tag == "polygon": + self.parsePolygon(shape,parent) + else: + raise ValueError("Encountered unknown shape type '{}'".format(tag)) + self.depth -= 1 + + def parsePath(self,shape,parent): + path_string=shape.attrib['d'] + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + paths = pydiffvg.from_svg_path(path_string) + for idx, path in enumerate(paths): + path.stroke_width = torch.tensor([0.],device=self.device) + path.num_control_points=path.num_control_points.to(self.device) + path.points=path.points.to(self.device) + path.source_id = name + path.id = "{}-{}".format(name,idx) if len(paths)>1 else name + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape,self.defs,self.device) + node=OptimizableSvg.PathNode(name,transform,appearance,self.settings,paths) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id","d","transform","style"]+OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape,list(shape)) + + def parseEllipse(self, shape, parent): + cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. + cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. + rx = float(shape.attrib["rx"]) + ry = float(shape.attrib["ry"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.EllipseNode(name, transform, appearance, self.settings, (cx, cy, rx, ry)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", + "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parsePolygon(self, shape, parent): + points_string = shape.attrib['points'] + name = '' + points=[] + for point_string in points_string.split(" "): + if len(point_string) == 0: + continue + coord_strings=point_string.split(",") + assert len(coord_strings)==2 + points.append([float(coord_strings[0]),float(coord_strings[1])]) + points=torch.tensor(points,dtype=torch.float,device=self.device) + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.PolygonNode(name, transform, appearance, self.settings, points) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "points", "transform", "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseCircle(self,shape,parent): + cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. + cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. + r = float(shape.attrib["r"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.CircleNode(name, transform, appearance, self.settings, (cx, cy, r)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", + "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseRect(self,shape,parent): + x = float(shape.attrib["x"]) if "x" in shape.attrib else 0. + y = float(shape.attrib["y"]) if "y" in shape.attrib else 0. + width = float(shape.attrib["width"]) + height = float(shape.attrib["height"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.RectNode(name, transform, appearance, self.settings, (x,y,width,height)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "width", "height", "transform", "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseGroup(self,group,parent): + tag = OptimizableSvg.remove_namespace(group.tag) + id = group.attrib["id"] if "id" in group.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth+=1 + + transform=self.parseTransform(group) + + #todo process more attributes + appearance=OptimizableSvg.parseAppearance(group,self.defs,self.device) + node=OptimizableSvg.GroupNode(id,transform,appearance,self.settings) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(group,["id","transform","style"]+OptimizableSvg.appearance_keys) + + skipped_children=[] + for child in group: + if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + self.parseShape(child,node) + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + self.parseDefs(child) + elif OptimizableSvg.remove_namespace(child.tag) == "style": + self.parseStyle(child) + elif OptimizableSvg.remove_namespace(child.tag) == "g": + self.parseGroup(child,node) + else: + skipped_children.append(child) + + if self.verbose: + self.reportSkippedChildren(group,skipped_children) + + self.depth-=1 + + def parseStyle(self,style_node): + tag = OptimizableSvg.remove_namespace(style_node.tag) + id = style_node.attrib["id"] if "id" in style_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + if style_node.attrib["type"] != "text/css": + raise ValueError("Only text/css style recognized, got {}".format(style_node.attrib["type"])) + + self.depth += 1 + + # creating only a dummy node + node = OptimizableSvg.SvgNode(id, None, {}, self.settings) + + if self.verbose: + self.reportSkippedAttribs(def_node, ["id"]) + + if len(style_node)>0: + raise ValueError("Style node should not have children (has {})".format(len(style_node))) + + # collect CSS classes + sheet = cssutils.parseString(style_node.text) + for rule in sheet: + if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): + name = rule.selectorText + if len(name) >= 2 and name[0] == '.': + self.defs[name] = rule.style.getCssText().replace("\n","") + else: + raise ValueError("Unrecognized CSS selector {}".format(name)) + else: + raise ValueError("No style or selector text in CSS rule") + + if self.verbose: + self.reportSkippedChildren(def_node, skipped_children) + + self.depth -= 1 + + def parseDefs(self,def_node): + #only linear gradients are currently supported + tag = OptimizableSvg.remove_namespace(def_node.tag) + id = def_node.attrib["id"] if "id" in def_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth += 1 + + + # creating only a dummy node + node = OptimizableSvg.SvgNode(id, None, {},self.settings) + + if self.verbose: + self.reportSkippedAttribs(def_node, ["id"]) + + skipped_children = [] + for child in def_node: + if OptimizableSvg.remove_namespace(child.tag) == "linearGradient": + self.parseGradient(child,node) + elif OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + raise NotImplementedError("Definition/instantiation of shapes not supported") + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + raise NotImplementedError("Definition within definition not supported") + elif OptimizableSvg.remove_namespace(child.tag) == "g": + raise NotImplementedError("Groups within definition not supported") + else: + skipped_children.append(child) + + if len(node.children)>0: + #take this node out and enter it into defs + self.defs[node.children[0].id]=node.children[0] + node.children.pop() + + + if self.verbose: + self.reportSkippedChildren(def_node, skipped_children) + + self.depth -= 1 + + def parseGradientStop(self,stop): + param_dict={key:value for key,value in stop.attrib.items() if key in ["id","offset","stop-color","stop-opacity"]} + style_dict={} + if "style" in stop.attrib: + style_dict={item.split(":")[0]:item.split(":")[1] for item in stop.attrib["style"].split(";") if len(item)>0} + param_dict.update(style_dict) + + offset=OptimizableSvg.parseOpacity(param_dict["offset"]) + color=OptimizableSvg.parse_color(param_dict["stop-color"]) + opacity=OptimizableSvg.parseOpacity(param_dict["stop-opacity"]) if "stop-opacity" in param_dict else 1. + + return offset, color, opacity + + def parseGradient(self, gradient_node, parent): + tag = OptimizableSvg.remove_namespace(gradient_node.tag) + id = gradient_node.attrib["id"] if "id" in gradient_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth += 1 + if "stop" not in [OptimizableSvg.remove_namespace(child.tag) for child in gradient_node]\ + and "href" not in [OptimizableSvg.remove_namespace(key) for key in gradient_node.attrib.keys()]: + raise ValueError("Gradient {} has neither stops nor a href link to them".format(id)) + + transform=self.parseTransform(gradient_node) + begin=None + end = None + offsets=[] + stops=[] + href=None + + if "x1" in gradient_node.attrib or "y1" in gradient_node.attrib: + begin=np.array([0.,0.]) + if "x1" in gradient_node.attrib: + begin[0] = float(gradient_node.attrib["x1"]) + if "y1" in gradient_node.attrib: + begin[1] = float(gradient_node.attrib["y1"]) + begin = torch.tensor(begin.transpose(),dtype=torch.float32) + + if "x2" in gradient_node.attrib or "y2" in gradient_node.attrib: + end=np.array([0.,0.]) + if "x2" in gradient_node.attrib: + end[0] = float(gradient_node.attrib["x2"]) + if "y2" in gradient_node.attrib: + end[1] = float(gradient_node.attrib["y2"]) + end=torch.tensor(end.transpose(),dtype=torch.float32) + + stop_nodes=[node for node in list(gradient_node) if OptimizableSvg.remove_namespace(node.tag)=="stop"] + if len(stop_nodes)>0: + stop_nodes=sorted(stop_nodes,key=lambda n: float(n.attrib["offset"])) + + for stop in stop_nodes: + offset, color, opacity = self.parseGradientStop(stop) + offsets.append(offset) + stops.append(np.concatenate((color,np.array([opacity])))) + + hkey=next((value for key,value in gradient_node.attrib.items() if OptimizableSvg.remove_namespace(key)=="href"),None) + if hkey is not None: + href=self.defs[hkey.lstrip("#")] + + parent.children.append(OptimizableSvg.GradientNode(id,transform,self.settings,begin.to(self.device) if begin is not None else begin,end.to(self.device) if end is not None else end,torch.tensor(offsets,dtype=torch.float32,device=self.device) if len(offsets)>0 else None,torch.tensor(np.array(stops),dtype=torch.float32,device=self.device) if len(stops)>0 else None,href)) + + self.depth -= 1 + + def parseViewport(self, root): + if "width" in root.attrib and "height" in root.attrib: + self.canvas = np.array([int(math.ceil(float(root.attrib["width"]))), int(math.ceil(float(root.attrib["height"])))]) + elif "viewBox" in root.attrib: + s=root.attrib["viewBox"].split(" ") + w=s[2] + h=s[3] + self.canvas = np.array( + [int(math.ceil(float(w))), int(math.ceil(float(h)))]) + else: + raise ValueError("Size information is missing from document definition") + #endregion + + #region writing + def write_xml(self): + tree=self.root.write_xml(self) + + return minidom.parseString(etree.tostring(tree, 'utf-8')).toprettyxml(indent=" ") + + def write_defs(self,root): + if len(self.defs)==0: + return + + defnode = etree.SubElement(root, 'defs') + stylenode = etree.SubElement(root,'style') + stylenode.set('type','text/css') + stylenode.text="" + + defcpy=copy.copy(self.defs) + while len(defcpy)>0: + torem=[] + for key,value in defcpy.items(): + if issubclass(value.__class__,OptimizableSvg.SvgNode): + if value.href is None or value.href not in defcpy: + value.write_xml(defnode) + torem.append(key) + else: + continue + else: + #this is a string, and hence a CSS attribute + stylenode.text+=key+" {"+value+"}\n" + torem.append(key) + + for key in torem: + del defcpy[key] + #endregion + + diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/parse_svg.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/parse_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..84caf4d938051723ac670604506d51c53d44708d --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/parse_svg.py @@ -0,0 +1,586 @@ +import torch +import xml.etree.ElementTree as etree +import numpy as np +import diffvg +import os +import pydiffvg +import svgpathtools +import svgpathtools.parser +import re +import warnings +import cssutils +import logging +import matplotlib.colors +cssutils.log.setLevel(logging.ERROR) + +def remove_namespaces(s): + """ + {...} ... -> ... + """ + return re.sub('{.*}', '', s) + +def parse_style(s, defs): + style_dict = {} + for e in s.split(';'): + key_value = e.split(':') + if len(key_value) == 2: + key = key_value[0].strip() + value = key_value[1].strip() + if key == 'fill' or key == 'stroke': + # Special case: convert colors into tensor in definitions so + # that different shapes can share the same color + value = parse_color(value, defs) + style_dict[key] = value + return style_dict + +def parse_hex(s): + """ + Hex to tuple + """ + s = s.lstrip('#') + if len(s) == 3: + s = s[0] + s[0] + s[1] + s[1] + s[2] + s[2] + rgb = tuple(int(s[i:i+2], 16) for i in (0, 2, 4)) + # sRGB to RGB + # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) + return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 1.0) + +def parse_int(s): + """ + trim alphabets + """ + return int(float(''.join(i for i in s if (not i.isalpha())))) + +def parse_color(s, defs): + if s is None: + return None + if isinstance(s, torch.Tensor): + return s + s = s.lstrip(' ') + color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + if s[0] == '#': + color[:3] = parse_hex(s) + elif s[:3] == 'url': + # url(#id) + color = defs[s[4:-1].lstrip('#')] + elif s == 'none': + color = None + elif s[:4] == 'rgb(': + rgb = s[4:-1].split(',') + color = torch.tensor([int(rgb[0]) / 255.0, int(rgb[1]) / 255.0, int(rgb[2]) / 255.0, 1.0]) + elif s == 'none': + return None + else: + try : + rgba = matplotlib.colors.to_rgba(s) + color = torch.tensor(rgba) + except ValueError : + warnings.warn('Unknown color command ' + s) + return color + +# https://github.com/mathandy/svgpathtools/blob/7ebc56a831357379ff22216bec07e2c12e8c5bc6/svgpathtools/parser.py +def _parse_transform_substr(transform_substr): + type_str, value_str = transform_substr.split('(') + value_str = value_str.replace(',', ' ') + values = list(map(float, filter(None, value_str.split(' ')))) + + transform = np.identity(3) + if 'matrix' in type_str: + transform[0:2, 0:3] = np.array([values[0:6:2], values[1:6:2]]) + elif 'translate' in transform_substr: + transform[0, 2] = values[0] + if len(values) > 1: + transform[1, 2] = values[1] + elif 'scale' in transform_substr: + x_scale = values[0] + y_scale = values[1] if (len(values) > 1) else x_scale + transform[0, 0] = x_scale + transform[1, 1] = y_scale + elif 'rotate' in transform_substr: + angle = values[0] * np.pi / 180.0 + if len(values) == 3: + offset = values[1:3] + else: + offset = (0, 0) + tf_offset = np.identity(3) + tf_offset[0:2, 2:3] = np.array([[offset[0]], [offset[1]]]) + tf_rotate = np.identity(3) + tf_rotate[0:2, 0:2] = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) + tf_offset_neg = np.identity(3) + tf_offset_neg[0:2, 2:3] = np.array([[-offset[0]], [-offset[1]]]) + + transform = tf_offset.dot(tf_rotate).dot(tf_offset_neg) + elif 'skewX' in transform_substr: + transform[0, 1] = np.tan(values[0] * np.pi / 180.0) + elif 'skewY' in transform_substr: + transform[1, 0] = np.tan(values[0] * np.pi / 180.0) + else: + # Return an identity matrix if the type of transform is unknown, and warn the user + warnings.warn('Unknown SVG transform type: {0}'.format(type_str)) + return transform + +def parse_transform(transform_str): + """ + Converts a valid SVG transformation string into a 3x3 matrix. + If the string is empty or null, this returns a 3x3 identity matrix + """ + if not transform_str: + return np.identity(3) + elif not isinstance(transform_str, str): + raise TypeError('Must provide a string to parse') + + total_transform = np.identity(3) + transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty + for substr in transform_substrs: + total_transform = total_transform.dot(_parse_transform_substr(substr)) + + return torch.from_numpy(total_transform).type(torch.float32) + +def parse_linear_gradient(node, transform, defs): + begin = torch.tensor([0.0, 0.0]) + end = torch.tensor([0.0, 0.0]) + offsets = [] + stop_colors = [] + # Inherit from parent + for key in node.attrib: + if remove_namespaces(key) == 'href': + value = node.attrib[key] + parent = defs[value.lstrip('#')] + begin = parent.begin + end = parent.end + offsets = parent.offsets + stop_colors = parent.stop_colors + + for attrib in node.attrib: + attrib = remove_namespaces(attrib) + if attrib == 'x1': + begin[0] = float(node.attrib['x1']) + elif attrib == 'y1': + begin[1] = float(node.attrib['y1']) + elif attrib == 'x2': + end[0] = float(node.attrib['x2']) + elif attrib == 'y2': + end[1] = float(node.attrib['y2']) + elif attrib == 'gradientTransform': + transform = transform @ parse_transform(node.attrib['gradientTransform']) + + begin = transform @ torch.cat((begin, torch.ones([1]))) + begin = begin / begin[2] + begin = begin[:2] + end = transform @ torch.cat((end, torch.ones([1]))) + end = end / end[2] + end = end[:2] + + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'stop': + offset = float(child.attrib['offset']) + color = [0.0, 0.0, 0.0, 1.0] + if 'stop-color' in child.attrib: + c = parse_color(child.attrib['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in child.attrib: + color[3] = float(child.attrib['stop-opacity']) + if 'style' in child.attrib: + style = parse_style(child.attrib['style'], defs) + if 'stop-color' in style: + c = parse_color(style['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in style: + color[3] = float(style['stop-opacity']) + offsets.append(offset) + stop_colors.append(color) + if isinstance(offsets, list): + offsets = torch.tensor(offsets) + if isinstance(stop_colors, list): + stop_colors = torch.tensor(stop_colors) + + return pydiffvg.LinearGradient(begin, end, offsets, stop_colors) + + +def parse_radial_gradient(node, transform, defs): + begin = torch.tensor([0.0, 0.0]) + end = torch.tensor([0.0, 0.0]) + center = torch.tensor([0.0, 0.0]) + radius = torch.tensor([0.0, 0.0]) + offsets = [] + stop_colors = [] + # Inherit from parent + for key in node.attrib: + if remove_namespaces(key) == 'href': + value = node.attrib[key] + parent = defs[value.lstrip('#')] + begin = parent.begin + end = parent.end + offsets = parent.offsets + stop_colors = parent.stop_colors + + for attrib in node.attrib: + attrib = remove_namespaces(attrib) + if attrib == 'cx': + center[0] = float(node.attrib['cx']) + elif attrib == 'cy': + center[1] = float(node.attrib['cy']) + elif attrib == 'fx': + radius[0] = float(node.attrib['fx']) + elif attrib == 'fy': + radius[1] = float(node.attrib['fy']) + elif attrib == 'fr': + radius[0] = float(node.attrib['fr']) + radius[1] = float(node.attrib['fr']) + elif attrib == 'gradientTransform': + transform = transform @ parse_transform(node.attrib['gradientTransform']) + + # TODO: this is incorrect + center = transform @ torch.cat((center, torch.ones([1]))) + center = center / center[2] + center = center[:2] + + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'stop': + offset = float(child.attrib['offset']) + color = [0.0, 0.0, 0.0, 1.0] + if 'stop-color' in child.attrib: + c = parse_color(child.attrib['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in child.attrib: + color[3] = float(child.attrib['stop-opacity']) + if 'style' in child.attrib: + style = parse_style(child.attrib['style'], defs) + if 'stop-color' in style: + c = parse_color(style['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in style: + color[3] = float(style['stop-opacity']) + offsets.append(offset) + stop_colors.append(color) + if isinstance(offsets, list): + offsets = torch.tensor(offsets) + if isinstance(stop_colors, list): + stop_colors = torch.tensor(stop_colors) + + return pydiffvg.RadialGradient(begin, end, offsets, stop_colors) + +def parse_stylesheet(node, transform, defs): + # collect CSS classes + sheet = cssutils.parseString(node.text) + for rule in sheet: + if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): + name = rule.selectorText + if len(name) >= 2 and name[0] == '.': + defs[name[1:]] = parse_style(rule.style.getCssText(), defs) + return defs + +def parse_defs(node, transform, defs): + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'linearGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) + elif tag == 'radialGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) + elif tag == 'style': + defs = parse_stylesheet(child, transform, defs) + return defs + +def parse_common_attrib(node, transform, fill_color, defs): + attribs = {} + if 'class' in node.attrib: + attribs.update(defs[node.attrib['class']]) + attribs.update(node.attrib) + + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + + stroke_color = None + stroke_width = torch.tensor(0.5) + use_even_odd_rule = False + + new_transform = transform + if 'transform' in attribs: + new_transform = transform @ parse_transform(attribs['transform']) + if 'fill' in attribs: + fill_color = parse_color(attribs['fill'], defs) + fill_opacity = 1.0 + if 'fill-opacity' in attribs: + fill_opacity *= float(attribs['fill-opacity']) + if 'opacity' in attribs: + fill_opacity *= float(attribs['opacity']) + # Ignore opacity if the color is a gradient + if isinstance(fill_color, torch.Tensor): + fill_color[3] = fill_opacity + + if 'fill-rule' in attribs: + if attribs['fill-rule'] == "evenodd": + use_even_odd_rule = True + elif attribs['fill-rule'] == "nonzero": + use_even_odd_rule = False + else: + warnings.warn('Unknown fill-rule: {}'.format(attribs['fill-rule'])) + + if 'stroke' in attribs: + stroke_color = parse_color(attribs['stroke'], defs) + + if 'stroke-width' in attribs: + stroke_width = attribs['stroke-width'] + if stroke_width[-2:] == 'px': + stroke_width = stroke_width[:-2] + stroke_width = torch.tensor(float(stroke_width) / 2.0) + + if 'stroke-opacity' in attribs: + stroke_color[3] = torch.tensor(float(attribs['stroke-opacity'])) + + if 'style' in attribs: + style = parse_style(attribs['style'], defs) + if 'fill' in style: + fill_color = parse_color(style['fill'], defs) + fill_opacity = 1.0 + if 'fill-opacity' in style: + fill_opacity *= float(style['fill-opacity']) + if 'opacity' in style: + fill_opacity *= float(style['opacity']) + if 'fill-rule' in style: + if style['fill-rule'] == "evenodd": + use_even_odd_rule = True + elif style['fill-rule'] == "nonzero": + use_even_odd_rule = False + else: + warnings.warn('Unknown fill-rule: {}'.format(style['fill-rule'])) + # Ignore opacity if the color is a gradient + if isinstance(fill_color, torch.Tensor): + fill_color[3] = fill_opacity + if 'stroke' in style: + if style['stroke'] != 'none': + stroke_color = parse_color(style['stroke'], defs) + # Ignore opacity if the color is a gradient + if isinstance(stroke_color, torch.Tensor): + if 'stroke-opacity' in style: + stroke_color[3] = float(style['stroke-opacity']) + if 'opacity' in style: + stroke_color[3] *= float(style['opacity']) + if 'stroke-width' in style: + stroke_width = style['stroke-width'] + if stroke_width[-2:] == 'px': + stroke_width = stroke_width[:-2] + stroke_width = torch.tensor(float(stroke_width) / 2.0) + + if isinstance(fill_color, pydiffvg.LinearGradient): + fill_color.begin = new_transform @ torch.cat((fill_color.begin, torch.ones([1]))) + fill_color.begin = fill_color.begin / fill_color.begin[2] + fill_color.begin = fill_color.begin[:2] + fill_color.end = new_transform @ torch.cat((fill_color.end, torch.ones([1]))) + fill_color.end = fill_color.end / fill_color.end[2] + fill_color.end = fill_color.end[:2] + if isinstance(stroke_color, pydiffvg.LinearGradient): + stroke_color.begin = new_transform @ torch.cat((stroke_color.begin, torch.ones([1]))) + stroke_color.begin = stroke_color.begin / stroke_color.begin[2] + stroke_color.begin = stroke_color.begin[:2] + stroke_color.end = new_transform @ torch.cat((stroke_color.end, torch.ones([1]))) + stroke_color.end = stroke_color.end / stroke_color.end[2] + stroke_color.end = stroke_color.end[:2] + if 'filter' in style: + print('*** WARNING ***: Ignoring filter for path with id "{}"'.format(name)) + + return new_transform, fill_color, stroke_color, stroke_width, use_even_odd_rule + +def is_shape(tag): + return tag == 'path' or tag == 'polygon' or tag == 'line' or tag == 'circle' or tag == 'rect' + +def parse_shape(node, transform, fill_color, shapes, shape_groups, defs): + tag = remove_namespaces(node.tag) + new_transform, new_fill_color, stroke_color, stroke_width, use_even_odd_rule = \ + parse_common_attrib(node, transform, fill_color, defs) + if tag == 'path': + d = node.attrib['d'] + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + force_closing = new_fill_color is not None + paths = pydiffvg.from_svg_path(d, new_transform, force_closing) + for idx, path in enumerate(paths): + assert(path.points.shape[1] == 2) + path.stroke_width = stroke_width + path.source_id = name + path.id = "{}-{}".format(name,idx) if len(paths)>1 else name + prev_shapes_size = len(shapes) + shapes = shapes + paths + shape_ids = torch.tensor(list(range(prev_shapes_size, len(shapes)))) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + id = name)) + elif tag == 'polygon': + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + force_closing = new_fill_color is not None + pts = node.attrib['points'].strip() + pts = pts.split(' ') + # import ipdb; ipdb.set_trace() + pts = [[float(y) for y in re.split(',| ', x)] for x in pts if x] + pts = torch.tensor(pts, dtype=torch.float32).view(-1, 2) + polygon = pydiffvg.Polygon(pts, force_closing) + polygon.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(polygon) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform, + id = name)) + elif tag == 'line': + x1 = float(node.attrib['x1']) + y1 = float(node.attrib['y1']) + x2 = float(node.attrib['x2']) + y2 = float(node.attrib['y2']) + p1 = torch.tensor([x1, y1]) + p2 = torch.tensor([x2, y2]) + points = torch.stack((p1, p2)) + line = pydiffvg.Polygon(points, False) + line.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(line) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'circle': + radius = float(node.attrib['r']) + cx = float(node.attrib['cx']) + cy = float(node.attrib['cy']) + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + center = torch.tensor([cx, cy]) + circle = pydiffvg.Circle(radius = torch.tensor(radius), + center = center) + circle.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(circle) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'ellipse': + rx = float(node.attrib['rx']) + ry = float(node.attrib['ry']) + cx = float(node.attrib['cx']) + cy = float(node.attrib['cy']) + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + center = torch.tensor([cx, cy]) + circle = pydiffvg.Circle(radius = torch.tensor(radius), + center = center) + circle.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(circle) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'rect': + x = 0.0 + y = 0.0 + if x in node.attrib: + x = float(node.attrib['x']) + if y in node.attrib: + y = float(node.attrib['y']) + w = float(node.attrib['width']) + h = float(node.attrib['height']) + p_min = torch.tensor([x, y]) + p_max = torch.tensor([x + w, x + h]) + rect = pydiffvg.Rect(p_min = p_min, p_max = p_max) + rect.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(rect) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + return shapes, shape_groups + +def parse_group(node, transform, fill_color, shapes, shape_groups, defs): + if 'transform' in node.attrib: + transform = transform @ parse_transform(node.attrib['transform']) + if 'fill' in node.attrib: + fill_color = parse_color(node.attrib['fill'], defs) + for child in node: + tag = remove_namespaces(child.tag) + if is_shape(tag): + shapes, shape_groups = parse_shape(\ + child, transform, fill_color, shapes, shape_groups, defs) + elif tag == 'g': + shapes, shape_groups = parse_group(\ + child, transform, fill_color, shapes, shape_groups, defs) + return shapes, shape_groups + +def parse_scene(node): + canvas_width = -1 + canvas_height = -1 + defs = {} + shapes = [] + shape_groups = [] + fill_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + transform = torch.eye(3) + if 'viewBox' in node.attrib: + view_box_array = node.attrib['viewBox'].split() + canvas_width = parse_int(view_box_array[2]) + canvas_height = parse_int(view_box_array[3]) + else: + if 'width' in node.attrib: + canvas_width = parse_int(node.attrib['width']) + else: + print('Warning: Can\'t find canvas width.') + if 'height' in node.attrib: + canvas_height = parse_int(node.attrib['height']) + else: + print('Warning: Can\'t find canvas height.') + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'defs': + defs = parse_defs(child, transform, defs) + elif tag == 'style': + defs = parse_stylesheet(child, transform, defs) + elif tag == 'linearGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) + elif tag == 'radialGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) + elif is_shape(tag): + shapes, shape_groups = parse_shape(\ + child, transform, fill_color, shapes, shape_groups, defs) + elif tag == 'g': + shapes, shape_groups = parse_group(\ + child, transform, fill_color, shapes, shape_groups, defs) + return canvas_width, canvas_height, shapes, shape_groups + +def svg_to_scene(filename): + """ + Load from a SVG file and convert to PyTorch tensors. + """ + + tree = etree.parse(filename) + root = tree.getroot() + cwd = os.getcwd() + if (os.path.dirname(filename) != ''): + os.chdir(os.path.dirname(filename)) + ret = parse_scene(root) + os.chdir(cwd) + return ret diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/pixel_filter.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/pixel_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..9b0ff22507613e01a0fb9ac9701d1c49c68266e8 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/pixel_filter.py @@ -0,0 +1,9 @@ +import torch +import pydiffvg + +class PixelFilter: + def __init__(self, + type, + radius = torch.tensor(0.5)): + self.type = type + self.radius = radius diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/render_pytorch.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/render_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..a686fb1ab51356910722b2d96a0c8fedc6cc1090 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/render_pytorch.py @@ -0,0 +1,868 @@ +import torch +import diffvg +import pydiffvg +import time +from enum import IntEnum +import warnings + +print_timing = False + +def set_print_timing(val): + global print_timing + print_timing=val + +class OutputType(IntEnum): + color = 1 + sdf = 2 + +class RenderFunction(torch.autograd.Function): + """ + The PyTorch interface of diffvg. + """ + @staticmethod + def serialize_scene(canvas_width, + canvas_height, + shapes, + shape_groups, + filter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)), + output_type = OutputType.color, + use_prefiltering = False, + eval_positions = torch.tensor([])): + """ + Given a list of shapes, convert them to a linear list of argument, + so that we can use it in PyTorch. + """ + num_shapes = len(shapes) + num_shape_groups = len(shape_groups) + args = [] + args.append(canvas_width) + args.append(canvas_height) + args.append(num_shapes) + args.append(num_shape_groups) + args.append(output_type) + args.append(use_prefiltering) + args.append(eval_positions.to(pydiffvg.get_device())) + for shape in shapes: + use_thickness = False + if isinstance(shape, pydiffvg.Circle): + assert(shape.center.is_contiguous()) + args.append(diffvg.ShapeType.circle) + args.append(shape.radius.cpu()) + args.append(shape.center.cpu()) + elif isinstance(shape, pydiffvg.Ellipse): + assert(shape.radius.is_contiguous()) + assert(shape.center.is_contiguous()) + args.append(diffvg.ShapeType.ellipse) + args.append(shape.radius.cpu()) + args.append(shape.center.cpu()) + elif isinstance(shape, pydiffvg.Path): + assert(shape.num_control_points.is_contiguous()) + assert(shape.points.is_contiguous()) + assert(shape.points.shape[1] == 2) + assert(torch.isfinite(shape.points).all()) + args.append(diffvg.ShapeType.path) + args.append(shape.num_control_points.to(torch.int32).cpu()) + args.append(shape.points.cpu()) + if len(shape.stroke_width.shape) > 0 and shape.stroke_width.shape[0] > 1: + assert(torch.isfinite(shape.stroke_width).all()) + use_thickness = True + args.append(shape.stroke_width.cpu()) + else: + args.append(None) + args.append(shape.is_closed) + args.append(shape.use_distance_approx) + elif isinstance(shape, pydiffvg.Polygon): + assert(shape.points.is_contiguous()) + assert(shape.points.shape[1] == 2) + args.append(diffvg.ShapeType.path) + if shape.is_closed: + args.append(torch.zeros(shape.points.shape[0], dtype = torch.int32)) + else: + args.append(torch.zeros(shape.points.shape[0] - 1, dtype = torch.int32)) + args.append(shape.points.cpu()) + args.append(None) + args.append(shape.is_closed) + args.append(False) # use_distance_approx + elif isinstance(shape, pydiffvg.Rect): + assert(shape.p_min.is_contiguous()) + assert(shape.p_max.is_contiguous()) + args.append(diffvg.ShapeType.rect) + args.append(shape.p_min.cpu()) + args.append(shape.p_max.cpu()) + else: + assert(False) + if use_thickness: + args.append(torch.tensor(0.0)) + else: + args.append(shape.stroke_width.cpu()) + + for shape_group in shape_groups: + assert(shape_group.shape_ids.is_contiguous()) + args.append(shape_group.shape_ids.to(torch.int32).cpu()) + # Fill color + if shape_group.fill_color is None: + args.append(None) + elif isinstance(shape_group.fill_color, torch.Tensor): + assert(shape_group.fill_color.is_contiguous()) + args.append(diffvg.ColorType.constant) + args.append(shape_group.fill_color.cpu()) + elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + assert(shape_group.fill_color.begin.is_contiguous()) + assert(shape_group.fill_color.end.is_contiguous()) + assert(shape_group.fill_color.offsets.is_contiguous()) + assert(shape_group.fill_color.stop_colors.is_contiguous()) + args.append(diffvg.ColorType.linear_gradient) + args.append(shape_group.fill_color.begin.cpu()) + args.append(shape_group.fill_color.end.cpu()) + args.append(shape_group.fill_color.offsets.cpu()) + args.append(shape_group.fill_color.stop_colors.cpu()) + elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): + assert(shape_group.fill_color.center.is_contiguous()) + assert(shape_group.fill_color.radius.is_contiguous()) + assert(shape_group.fill_color.offsets.is_contiguous()) + assert(shape_group.fill_color.stop_colors.is_contiguous()) + args.append(diffvg.ColorType.radial_gradient) + args.append(shape_group.fill_color.center.cpu()) + args.append(shape_group.fill_color.radius.cpu()) + args.append(shape_group.fill_color.offsets.cpu()) + args.append(shape_group.fill_color.stop_colors.cpu()) + + if shape_group.fill_color is not None: + # go through the underlying shapes and check if they are all closed + for shape_id in shape_group.shape_ids: + if isinstance(shapes[shape_id], pydiffvg.Path): + if not shapes[shape_id].is_closed: + warnings.warn("Detected non-closed paths with fill color. This might causes unexpected results.", Warning) + + # Stroke color + if shape_group.stroke_color is None: + args.append(None) + elif isinstance(shape_group.stroke_color, torch.Tensor): + assert(shape_group.stroke_color.is_contiguous()) + args.append(diffvg.ColorType.constant) + args.append(shape_group.stroke_color.cpu()) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + assert(shape_group.stroke_color.begin.is_contiguous()) + assert(shape_group.stroke_color.end.is_contiguous()) + assert(shape_group.stroke_color.offsets.is_contiguous()) + assert(shape_group.stroke_color.stop_colors.is_contiguous()) + assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) + args.append(diffvg.ColorType.linear_gradient) + args.append(shape_group.stroke_color.begin.cpu()) + args.append(shape_group.stroke_color.end.cpu()) + args.append(shape_group.stroke_color.offsets.cpu()) + args.append(shape_group.stroke_color.stop_colors.cpu()) + elif isinstance(shape_group.stroke_color, pydiffvg.RadialGradient): + assert(shape_group.stroke_color.center.is_contiguous()) + assert(shape_group.stroke_color.radius.is_contiguous()) + assert(shape_group.stroke_color.offsets.is_contiguous()) + assert(shape_group.stroke_color.stop_colors.is_contiguous()) + assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) + args.append(diffvg.ColorType.radial_gradient) + args.append(shape_group.stroke_color.center.cpu()) + args.append(shape_group.stroke_color.radius.cpu()) + args.append(shape_group.stroke_color.offsets.cpu()) + args.append(shape_group.stroke_color.stop_colors.cpu()) + args.append(shape_group.use_even_odd_rule) + # Transformation + args.append(shape_group.shape_to_canvas.contiguous().cpu()) + args.append(filter.type) + args.append(filter.radius.cpu()) + return args + + @staticmethod + def forward(ctx, + width, + height, + num_samples_x, + num_samples_y, + seed, + background_image, + *args): + """ + Forward rendering pass. + """ + # Unpack arguments + current_index = 0 + canvas_width = args[current_index] + current_index += 1 + canvas_height = args[current_index] + current_index += 1 + num_shapes = args[current_index] + current_index += 1 + num_shape_groups = args[current_index] + current_index += 1 + output_type = args[current_index] + current_index += 1 + use_prefiltering = args[current_index] + current_index += 1 + eval_positions = args[current_index] + current_index += 1 + shapes = [] + shape_groups = [] + shape_contents = [] # Important to avoid GC deleting the shapes + color_contents = [] # Same as above + for shape_id in range(num_shapes): + shape_type = args[current_index] + current_index += 1 + if shape_type == diffvg.ShapeType.circle: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.ellipse: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), + diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.path: + num_control_points = args[current_index] + current_index += 1 + points = args[current_index] + current_index += 1 + thickness = args[current_index] + current_index += 1 + is_closed = args[current_index] + current_index += 1 + use_distance_approx = args[current_index] + current_index += 1 + shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), + diffvg.float_ptr(points.data_ptr()), + diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), + num_control_points.shape[0], + points.shape[0], + is_closed, + use_distance_approx) + elif shape_type == diffvg.ShapeType.rect: + p_min = args[current_index] + current_index += 1 + p_max = args[current_index] + current_index += 1 + shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), + diffvg.Vector2f(p_max[0], p_max[1])) + else: + assert(False) + stroke_width = args[current_index] + current_index += 1 + shapes.append(diffvg.Shape(\ + shape_type, shape.get_ptr(), stroke_width.item())) + shape_contents.append(shape) + + for shape_group_id in range(num_shape_groups): + shape_ids = args[current_index] + current_index += 1 + fill_color_type = args[current_index] + current_index += 1 + if fill_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + fill_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif fill_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type is None: + fill_color = None + else: + assert(False) + stroke_color_type = args[current_index] + current_index += 1 + if stroke_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + stroke_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif stroke_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type is None: + stroke_color = None + else: + assert(False) + use_even_odd_rule = args[current_index] + current_index += 1 + shape_to_canvas = args[current_index] + current_index += 1 + + if fill_color is not None: + color_contents.append(fill_color) + if stroke_color is not None: + color_contents.append(stroke_color) + shape_groups.append(diffvg.ShapeGroup(\ + diffvg.int_ptr(shape_ids.data_ptr()), + shape_ids.shape[0], + diffvg.ColorType.constant if fill_color_type is None else fill_color_type, + diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), + diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, + diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), + use_even_odd_rule, + diffvg.float_ptr(shape_to_canvas.data_ptr()))) + + filter_type = args[current_index] + current_index += 1 + filter_radius = args[current_index] + current_index += 1 + filt = diffvg.Filter(filter_type, filter_radius) + + start = time.time() + scene = diffvg.Scene(canvas_width, canvas_height, + shapes, shape_groups, filt, pydiffvg.get_use_gpu(), + pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Scene construction, time: %.5f s' % time_elapsed) + + if output_type == OutputType.color: + assert(eval_positions.shape[0] == 0) + rendered_image = torch.zeros(height, width, 4, device = pydiffvg.get_device()) + else: + assert(output_type == OutputType.sdf) + if eval_positions.shape[0] == 0: + rendered_image = torch.zeros(height, width, 1, device = pydiffvg.get_device()) + else: + rendered_image = torch.zeros(eval_positions.shape[0], 1, device = pydiffvg.get_device()) + + if background_image is not None: + background_image = background_image.to(pydiffvg.get_device()) + if background_image.shape[2] == 3: + raise NotImplementedError('Background image must have 4 channels, not 3. Add a fourth channel with all ones via torch.ones().') + background_image = background_image.contiguous() + assert(background_image.shape[0] == rendered_image.shape[0]) + assert(background_image.shape[1] == rendered_image.shape[1]) + assert(background_image.shape[2] == 4) + + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.sdf else 0), + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(0), # d_background_image + diffvg.float_ptr(0), # d_render_image + diffvg.float_ptr(0), # d_render_sdf + diffvg.float_ptr(0), # d_translation + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + assert(torch.isfinite(rendered_image).all()) + time_elapsed = time.time() - start + if print_timing: + print('Forward pass, time: %.5f s' % time_elapsed) + + ctx.scene = scene + ctx.background_image = background_image + ctx.shape_contents = shape_contents + ctx.color_contents = color_contents + ctx.filter = filt + ctx.width = width + ctx.height = height + ctx.num_samples_x = num_samples_x + ctx.num_samples_y = num_samples_y + ctx.seed = seed + ctx.output_type = output_type + ctx.use_prefiltering = use_prefiltering + ctx.eval_positions = eval_positions + return rendered_image + + @staticmethod + def render_grad(grad_img, + width, + height, + num_samples_x, + num_samples_y, + seed, + background_image, + *args): + if not grad_img.is_contiguous(): + grad_img = grad_img.contiguous() + assert(torch.isfinite(grad_img).all()) + + # Unpack arguments + current_index = 0 + canvas_width = args[current_index] + current_index += 1 + canvas_height = args[current_index] + current_index += 1 + num_shapes = args[current_index] + current_index += 1 + num_shape_groups = args[current_index] + current_index += 1 + output_type = args[current_index] + current_index += 1 + use_prefiltering = args[current_index] + current_index += 1 + eval_positions = args[current_index] + current_index += 1 + shapes = [] + shape_groups = [] + shape_contents = [] # Important to avoid GC deleting the shapes + color_contents = [] # Same as above + for shape_id in range(num_shapes): + shape_type = args[current_index] + current_index += 1 + if shape_type == diffvg.ShapeType.circle: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.ellipse: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), + diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.path: + num_control_points = args[current_index] + current_index += 1 + points = args[current_index] + current_index += 1 + thickness = args[current_index] + current_index += 1 + is_closed = args[current_index] + current_index += 1 + use_distance_approx = args[current_index] + current_index += 1 + shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), + diffvg.float_ptr(points.data_ptr()), + diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), + num_control_points.shape[0], + points.shape[0], + is_closed, + use_distance_approx) + elif shape_type == diffvg.ShapeType.rect: + p_min = args[current_index] + current_index += 1 + p_max = args[current_index] + current_index += 1 + shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), + diffvg.Vector2f(p_max[0], p_max[1])) + else: + assert(False) + stroke_width = args[current_index] + current_index += 1 + shapes.append(diffvg.Shape(\ + shape_type, shape.get_ptr(), stroke_width.item())) + shape_contents.append(shape) + + for shape_group_id in range(num_shape_groups): + shape_ids = args[current_index] + current_index += 1 + fill_color_type = args[current_index] + current_index += 1 + if fill_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + fill_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif fill_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type is None: + fill_color = None + else: + assert(False) + stroke_color_type = args[current_index] + current_index += 1 + if stroke_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + stroke_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif stroke_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type is None: + stroke_color = None + else: + assert(False) + use_even_odd_rule = args[current_index] + current_index += 1 + shape_to_canvas = args[current_index] + current_index += 1 + + if fill_color is not None: + color_contents.append(fill_color) + if stroke_color is not None: + color_contents.append(stroke_color) + shape_groups.append(diffvg.ShapeGroup(\ + diffvg.int_ptr(shape_ids.data_ptr()), + shape_ids.shape[0], + diffvg.ColorType.constant if fill_color_type is None else fill_color_type, + diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), + diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, + diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), + use_even_odd_rule, + diffvg.float_ptr(shape_to_canvas.data_ptr()))) + + filter_type = args[current_index] + current_index += 1 + filter_radius = args[current_index] + current_index += 1 + filt = diffvg.Filter(filter_type, filter_radius) + + scene = diffvg.Scene(canvas_width, canvas_height, + shapes, shape_groups, filt, pydiffvg.get_use_gpu(), + pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) + + if output_type == OutputType.color: + assert(grad_img.shape[2] == 4) + else: + assert(grad_img.shape[2] == 1) + + if background_image is not None: + background_image = background_image.to(pydiffvg.get_device()) + if background_image.shape[2] == 3: + background_image = torch.cat((\ + background_image, torch.ones(background_image.shape[0], background_image.shape[1], 1, + device = background_image.device)), dim = 2) + background_image = background_image.contiguous() + assert(background_image.shape[0] == rendered_image.shape[0]) + assert(background_image.shape[1] == rendered_image.shape[1]) + assert(background_image.shape[2] == 4) + + translation_grad_image = \ + torch.zeros(height, width, 2, device = pydiffvg.get_device()) + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(0), # render_image + diffvg.float_ptr(0), # render_sdf + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(0), # d_background_image + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), + diffvg.float_ptr(translation_grad_image.data_ptr()), + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + time_elapsed = time.time() - start + if print_timing: + print('Gradient pass, time: %.5f s' % time_elapsed) + assert(torch.isfinite(translation_grad_image).all()) + + return translation_grad_image + + @staticmethod + def backward(ctx, + grad_img): + if not grad_img.is_contiguous(): + grad_img = grad_img.contiguous() + assert(torch.isfinite(grad_img).all()) + + scene = ctx.scene + width = ctx.width + height = ctx.height + num_samples_x = ctx.num_samples_x + num_samples_y = ctx.num_samples_y + seed = ctx.seed + output_type = ctx.output_type + use_prefiltering = ctx.use_prefiltering + eval_positions = ctx.eval_positions + background_image = ctx.background_image + + if background_image is not None: + d_background_image = torch.zeros_like(background_image) + else: + d_background_image = None + + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(0), # render_image + diffvg.float_ptr(0), # render_sdf + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(d_background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), + diffvg.float_ptr(0), # d_translation + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Backward pass, time: %.5f s' % time_elapsed) + + d_args = [] + d_args.append(None) # width + d_args.append(None) # height + d_args.append(None) # num_samples_x + d_args.append(None) # num_samples_y + d_args.append(None) # seed + d_args.append(d_background_image) + d_args.append(None) # canvas_width + d_args.append(None) # canvas_height + d_args.append(None) # num_shapes + d_args.append(None) # num_shape_groups + d_args.append(None) # output_type + d_args.append(None) # use_prefiltering + d_args.append(None) # eval_positions + for shape_id in range(scene.num_shapes): + d_args.append(None) # type + d_shape = scene.get_d_shape(shape_id) + use_thickness = False + if d_shape.type == diffvg.ShapeType.circle: + d_circle = d_shape.as_circle() + radius = torch.tensor(d_circle.radius) + assert(torch.isfinite(radius).all()) + d_args.append(radius) + c = d_circle.center + c = torch.tensor((c.x, c.y)) + assert(torch.isfinite(c).all()) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.ellipse: + d_ellipse = d_shape.as_ellipse() + r = d_ellipse.radius + r = torch.tensor((d_ellipse.radius.x, d_ellipse.radius.y)) + assert(torch.isfinite(r).all()) + d_args.append(r) + c = d_ellipse.center + c = torch.tensor((c.x, c.y)) + assert(torch.isfinite(c).all()) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.path: + d_path = d_shape.as_path() + points = torch.zeros((d_path.num_points, 2)) + thickness = None + if d_path.has_thickness(): + use_thickness = True + thickness = torch.zeros(d_path.num_points) + d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(thickness.data_ptr())) + else: + d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(0)) + assert(torch.isfinite(points).all()) + if thickness is not None: + assert(torch.isfinite(thickness).all()) + d_args.append(None) # num_control_points + d_args.append(points) + d_args.append(thickness) + d_args.append(None) # is_closed + d_args.append(None) # use_distance_approx + elif d_shape.type == diffvg.ShapeType.rect: + d_rect = d_shape.as_rect() + p_min = torch.tensor((d_rect.p_min.x, d_rect.p_min.y)) + p_max = torch.tensor((d_rect.p_max.x, d_rect.p_max.y)) + assert(torch.isfinite(p_min).all()) + assert(torch.isfinite(p_max).all()) + d_args.append(p_min) + d_args.append(p_max) + else: + assert(False) + if use_thickness: + d_args.append(None) + else: + w = torch.tensor((d_shape.stroke_width)) + assert(torch.isfinite(w).all()) + d_args.append(w) + + for group_id in range(scene.num_shape_groups): + d_shape_group = scene.get_d_shape_group(group_id) + d_args.append(None) # shape_ids + d_args.append(None) # fill_color_type + if d_shape_group.has_fill_color(): + if d_shape_group.fill_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.fill_color_as_constant() + c = d_constant.color + d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) + elif d_shape_group.fill_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.fill_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(torch.tensor((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(torch.tensor((end.x, end.y))) + offsets = torch.zeros((d_linear_gradient.num_stops)) + stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) + d_linear_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.fill_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(torch.tensor((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(torch.tensor((radius.x, radius.y))) + offsets = torch.zeros((d_radial_gradient.num_stops)) + stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) + d_radial_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # stroke_color_type + if d_shape_group.has_stroke_color(): + if d_shape_group.stroke_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.stroke_color_as_constant() + c = d_constant.color + d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) + elif d_shape_group.stroke_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.stroke_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(torch.tensor((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(torch.tensor((end.x, end.y))) + offsets = torch.zeros((d_linear_gradient.num_stops)) + stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) + d_linear_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.stroke_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(torch.tensor((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(torch.tensor((radius.x, radius.y))) + offsets = torch.zeros((d_radial_gradient.num_stops)) + stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) + d_radial_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # use_even_odd_rule + d_shape_to_canvas = torch.zeros((3, 3)) + d_shape_group.copy_to(diffvg.float_ptr(d_shape_to_canvas.data_ptr())) + assert(torch.isfinite(d_shape_to_canvas).all()) + d_args.append(d_shape_to_canvas) + d_args.append(None) # filter_type + d_args.append(torch.tensor(scene.get_d_filter_radius())) + + return tuple(d_args) diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/save_svg.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/save_svg.py new file mode 100644 index 0000000000000000000000000000000000000000..fd803ca3c799a895350ca25b1314115ebb438be9 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/save_svg.py @@ -0,0 +1,156 @@ +import torch +import pydiffvg +import xml.etree.ElementTree as etree +from xml.dom import minidom + +def prettify(elem): + """Return a pretty-printed XML string for the Element. + """ + rough_string = etree.tostring(elem, 'utf-8') + reparsed = minidom.parseString(rough_string) + return reparsed.toprettyxml(indent=" ") + +def save_svg(filename, width, height, shapes, shape_groups, use_gamma = False): + root = etree.Element('svg') + root.set('version', '1.1') + root.set('xmlns', 'http://www.w3.org/2000/svg') + root.set('width', str(width)) + root.set('height', str(height)) + defs = etree.SubElement(root, 'defs') + g = etree.SubElement(root, 'g') + if use_gamma: + f = etree.SubElement(defs, 'filter') + f.set('id', 'gamma') + f.set('x', '0') + f.set('y', '0') + f.set('width', '100%') + f.set('height', '100%') + gamma = etree.SubElement(f, 'feComponentTransfer') + gamma.set('color-interpolation-filters', 'sRGB') + feFuncR = etree.SubElement(gamma, 'feFuncR') + feFuncR.set('type', 'gamma') + feFuncR.set('amplitude', str(1)) + feFuncR.set('exponent', str(1/2.2)) + feFuncG = etree.SubElement(gamma, 'feFuncG') + feFuncG.set('type', 'gamma') + feFuncG.set('amplitude', str(1)) + feFuncG.set('exponent', str(1/2.2)) + feFuncB = etree.SubElement(gamma, 'feFuncB') + feFuncB.set('type', 'gamma') + feFuncB.set('amplitude', str(1)) + feFuncB.set('exponent', str(1/2.2)) + feFuncA = etree.SubElement(gamma, 'feFuncA') + feFuncA.set('type', 'gamma') + feFuncA.set('amplitude', str(1)) + feFuncA.set('exponent', str(1/2.2)) + g.set('style', 'filter:url(#gamma)') + + # Store color + for i, shape_group in enumerate(shape_groups): + def add_color(shape_color, name): + if isinstance(shape_color, pydiffvg.LinearGradient): + lg = shape_color + color = etree.SubElement(defs, 'linearGradient') + color.set('id', name) + color.set('x1', str(lg.begin[0].item())) + color.set('y1', str(lg.begin[1].item())) + color.set('x2', str(lg.end[0].item())) + color.set('y2', str(lg.end[1].item())) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', str(offsets[j])) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + stop.set('stop-opacity', '{}'.format(c[3])) + + if shape_group.fill_color is not None: + add_color(shape_group.fill_color, 'shape_{}_fill'.format(i)) + if shape_group.stroke_color is not None: + add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i)) + + for i, shape_group in enumerate(shape_groups): + shape = shapes[shape_group.shape_ids[0]] + if isinstance(shape, pydiffvg.Circle): + shape_node = etree.SubElement(g, 'circle') + shape_node.set('r', str(shape.radius.item())) + shape_node.set('cx', str(shape.center[0].item())) + shape_node.set('cy', str(shape.center[1].item())) + elif isinstance(shape, pydiffvg.Polygon): + shape_node = etree.SubElement(g, 'polygon') + points = shape.points.data.cpu().numpy() + path_str = '' + for j in range(0, shape.points.shape[0]): + path_str += '{} {}'.format(points[j, 0], points[j, 1]) + if j != shape.points.shape[0] - 1: + path_str += ' ' + shape_node.set('points', path_str) + elif isinstance(shape, pydiffvg.Path): + shape_node = etree.SubElement(g, 'path') + num_segments = shape.num_control_points.shape[0] + num_control_points = shape.num_control_points.data.cpu().numpy() + points = shape.points.data.cpu().numpy() + num_points = shape.points.shape[0] + path_str = 'M {} {}'.format(points[0, 0], points[0, 1]) + point_id = 1 + for j in range(0, num_segments): + if num_control_points[j] == 0: + p = point_id % num_points + path_str += ' L {} {}'.format(\ + points[p, 0], points[p, 1]) + point_id += 1 + elif num_control_points[j] == 1: + p1 = (point_id + 1) % num_points + path_str += ' Q {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[p1, 0], points[p1, 1]) + point_id += 2 + elif num_control_points[j] == 2: + p2 = (point_id + 2) % num_points + path_str += ' C {} {} {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[point_id + 1, 0], points[point_id + 1, 1], + points[p2, 0], points[p2, 1]) + point_id += 3 + shape_node.set('d', path_str) + elif isinstance(shape, pydiffvg.Rect): + shape_node = etree.SubElement(g, 'rect') + shape_node.set('x', str(shape.p_min[0].item())) + shape_node.set('y', str(shape.p_min[1].item())) + shape_node.set('width', str(shape.p_max[0].item() - shape.p_min[0].item())) + shape_node.set('height', str(shape.p_max[1].item() - shape.p_min[1].item())) + elif isinstance(shape, pydiffvg.Ellipse): + shape_node = etree.SubElement(g, 'ellipse') + shape_node.set('cx', str(shape.center[0].item())) + shape_node.set('cy', str(shape.center[1].item())) + shape_node.set('rx', str(shape.radius[0].item())) + shape_node.set('ry', str(shape.radius[1].item())) + else: + assert(False) + + shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item())) + if shape_group.fill_color is not None: + if isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) + else: + c = shape_group.fill_color.data.cpu().numpy() + shape_node.set('fill', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('opacity', str(c[3])) + else: + shape_node.set('fill', 'none') + if shape_group.stroke_color is not None: + if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) + else: + c = shape_group.stroke_color.data.cpu().numpy() + shape_node.set('stroke', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('stroke-opacity', str(c[3])) + shape_node.set('stroke-linecap', 'round') + shape_node.set('stroke-linejoin', 'round') + + with open(filename, "w") as f: + f.write(prettify(root)) diff --git a/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/shape.py b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/shape.py new file mode 100644 index 0000000000000000000000000000000000000000..707a6ed69609210b28e028d3fcbd0b7f0312d442 --- /dev/null +++ b/diffvg/build/lib.linux-x86_64-cpython-38/pydiffvg/shape.py @@ -0,0 +1,172 @@ +import torch +import svgpathtools +import math + +class Circle: + def __init__(self, radius, center, stroke_width = torch.tensor(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Ellipse: + def __init__(self, radius, center, stroke_width = torch.tensor(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Path: + def __init__(self, + num_control_points, + points, + is_closed, + stroke_width = torch.tensor(1.0), + id = '', + use_distance_approx = False): + self.num_control_points = num_control_points + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + self.use_distance_approx = use_distance_approx + +class Polygon: + def __init__(self, points, is_closed, stroke_width = torch.tensor(1.0), id = ''): + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + +class Rect: + def __init__(self, p_min, p_max, stroke_width = torch.tensor(1.0), id = ''): + self.p_min = p_min + self.p_max = p_max + self.stroke_width = stroke_width + self.id = id + +class ShapeGroup: + def __init__(self, + shape_ids, + fill_color, + use_even_odd_rule = True, + stroke_color = None, + shape_to_canvas = torch.eye(3), + id = ''): + self.shape_ids = shape_ids + self.fill_color = fill_color + self.use_even_odd_rule = use_even_odd_rule + self.stroke_color = stroke_color + self.shape_to_canvas = shape_to_canvas + self.id = id + +def from_svg_path(path_str, shape_to_canvas = torch.eye(3), force_close = False): + path = svgpathtools.parse_path(path_str) + if len(path) == 0: + return [] + ret_paths = [] + subpaths = path.continuous_subpaths() + for subpath in subpaths: + if subpath.isclosed(): + if len(subpath) > 1 and isinstance(subpath[-1], svgpathtools.Line) and subpath[-1].length() < 1e-5: + subpath.remove(subpath[-1]) + subpath[-1].end = subpath[0].start # Force closing the path + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + else: + beg = subpath[0].start + end = subpath[-1].end + if abs(end - beg) < 1e-5: + subpath[-1].end = beg # Force closing the path + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + elif force_close: + subpath.append(svgpathtools.Line(end, beg)) + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + + num_control_points = [] + points = [] + + for i, e in enumerate(subpath): + if i == 0: + points.append((e.start.real, e.start.imag)) + else: + # Must begin from the end of previous segment + assert(e.start.real == points[-1][0]) + assert(e.start.imag == points[-1][1]) + if isinstance(e, svgpathtools.Line): + num_control_points.append(0) + elif isinstance(e, svgpathtools.QuadraticBezier): + num_control_points.append(1) + points.append((e.control.real, e.control.imag)) + elif isinstance(e, svgpathtools.CubicBezier): + num_control_points.append(2) + points.append((e.control1.real, e.control1.imag)) + points.append((e.control2.real, e.control2.imag)) + elif isinstance(e, svgpathtools.Arc): + # Convert to Cubic curves + # https://www.joecridge.me/content/pdf/bezier-arcs.pdf + start = e.theta * math.pi / 180.0 + stop = (e.theta + e.delta) * math.pi / 180.0 + + sign = 1.0 + if stop < start: + sign = -1.0 + + epsilon = 0.00001 + debug = abs(e.delta) >= 90.0 + while (sign * (stop - start) > epsilon): + arc_to_draw = stop - start + if arc_to_draw > 0.0: + arc_to_draw = min(arc_to_draw, 0.5 * math.pi) + else: + arc_to_draw = max(arc_to_draw, -0.5 * math.pi) + alpha = arc_to_draw / 2.0 + cos_alpha = math.cos(alpha) + sin_alpha = math.sin(alpha) + cot_alpha = 1.0 / math.tan(alpha) + phi = start + alpha + cos_phi = math.cos(phi) + sin_phi = math.sin(phi) + lambda_ = (4.0 - cos_alpha) / 3.0 + mu = sin_alpha + (cos_alpha - lambda_) * cot_alpha + last = sign * (stop - (start + arc_to_draw)) <= epsilon + num_control_points.append(2) + rx = e.radius.real + ry = e.radius.imag + cx = e.center.real + cy = e.center.imag + rot = e.phi * math.pi / 180.0 + cos_rot = math.cos(rot) + sin_rot = math.sin(rot) + x = lambda_ * cos_phi + mu * sin_phi + y = lambda_ * sin_phi - mu * cos_phi + xx = x * cos_rot - y * sin_rot + yy = x * sin_rot + y * cos_rot + points.append((cx + rx * xx, cy + ry * yy)) + x = lambda_ * cos_phi - mu * sin_phi + y = lambda_ * sin_phi + mu * cos_phi + xx = x * cos_rot - y * sin_rot + yy = x * sin_rot + y * cos_rot + points.append((cx + rx * xx, cy + ry * yy)) + if not last: + points.append((cx + rx * math.cos(rot + start + arc_to_draw), + cy + ry * math.sin(rot + start + arc_to_draw))) + start += arc_to_draw + first = False + if i != len(subpath) - 1: + points.append((e.end.real, e.end.imag)) + else: + if subpath.isclosed(): + # Must end at the beginning of first segment + assert(e.end.real == points[0][0]) + assert(e.end.imag == points[0][1]) + else: + points.append((e.end.real, e.end.imag)) + points = torch.tensor(points, dtype=torch.float) + points = torch.cat((points, torch.ones([points.shape[0], 1])), dim = 1) @ torch.transpose(shape_to_canvas, 0, 1) + points = points / points[:, 2:3] + points = points[:, :2].contiguous() + ret_paths.append(Path(torch.tensor(num_control_points), points, subpath.isclosed())) + return ret_paths diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeCache.txt b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeCache.txt new file mode 100644 index 0000000000000000000000000000000000000000..5720a1a7230f77f39ee332394a50fd9d50186948 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeCache.txt @@ -0,0 +1,792 @@ +# This is the CMakeCache file. +# For build in directory: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 +# It was generated by CMake: /usr/local/envs/word/bin/cmake +# You can edit this file to change values found and used by cmake. +# If you do not want to change any of the values, simply exit the editor. +# If you do want to change a value, simply edit, save, and exit the editor. +# The syntax for the file is as follows: +# KEY:TYPE=VALUE +# KEY is the name of a variable in the cache. +# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!. +# VALUE is the current value for the KEY. + +######################## +# EXTERNAL cache entries +######################## + +//Path to a program. +CMAKE_ADDR2LINE:FILEPATH=/usr/bin/addr2line + +//Path to a program. +CMAKE_AR:FILEPATH=/usr/bin/ar + +//Choose the type of build, options are: None Debug Release RelWithDebInfo +// MinSizeRel ... +CMAKE_BUILD_TYPE:STRING=Release + +//Enable/Disable color output during build. +CMAKE_COLOR_MAKEFILE:BOOL=ON + +//CXX compiler +CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/c++ + +//A wrapper around 'ar' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_CXX_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-11 + +//A wrapper around 'ranlib' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_CXX_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-11 + +//Flags used by the CXX compiler during all build types. +CMAKE_CXX_FLAGS:STRING=-DVERSION_INFO=\"0.0.1\" + +//Flags used by the CXX compiler during DEBUG builds. +CMAKE_CXX_FLAGS_DEBUG:STRING=-g + +//Flags used by the CXX compiler during MINSIZEREL builds. +CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the CXX compiler during RELEASE builds. +CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the CXX compiler during RELWITHDEBINFO builds. +CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//C compiler +CMAKE_C_COMPILER:FILEPATH=/usr/bin/cc + +//A wrapper around 'ar' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_C_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-11 + +//A wrapper around 'ranlib' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_C_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-11 + +//Flags used by the C compiler during all build types. +CMAKE_C_FLAGS:STRING= + +//Flags used by the C compiler during DEBUG builds. +CMAKE_C_FLAGS_DEBUG:STRING=-g + +//Flags used by the C compiler during MINSIZEREL builds. +CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the C compiler during RELEASE builds. +CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the C compiler during RELWITHDEBINFO builds. +CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//Path to a program. +CMAKE_DLLTOOL:FILEPATH=CMAKE_DLLTOOL-NOTFOUND + +//Flags used by the linker during all build types. +CMAKE_EXE_LINKER_FLAGS:STRING= + +//Flags used by the linker during DEBUG builds. +CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during MINSIZEREL builds. +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during RELEASE builds. +CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during RELWITHDEBINFO builds. +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Enable/Disable output of compile commands during generation. +CMAKE_EXPORT_COMPILE_COMMANDS:BOOL= + +//Value Computed by CMake. +CMAKE_FIND_PACKAGE_REDIRECTS_DIR:STATIC=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/pkgRedirects + +//User executables (bin) +CMAKE_INSTALL_BINDIR:PATH=bin + +//Read-only architecture-independent data (DATAROOTDIR) +CMAKE_INSTALL_DATADIR:PATH= + +//Read-only architecture-independent data root (share) +CMAKE_INSTALL_DATAROOTDIR:PATH=share + +//Documentation root (DATAROOTDIR/doc/PROJECT_NAME) +CMAKE_INSTALL_DOCDIR:PATH= + +//C header files (include) +CMAKE_INSTALL_INCLUDEDIR:PATH=include + +//Info documentation (DATAROOTDIR/info) +CMAKE_INSTALL_INFODIR:PATH= + +//Object code libraries (lib) +CMAKE_INSTALL_LIBDIR:PATH=lib + +//Program executables (libexec) +CMAKE_INSTALL_LIBEXECDIR:PATH=libexec + +//Locale-dependent data (DATAROOTDIR/locale) +CMAKE_INSTALL_LOCALEDIR:PATH= + +//Modifiable single-machine data (var) +CMAKE_INSTALL_LOCALSTATEDIR:PATH=var + +//Man documentation (DATAROOTDIR/man) +CMAKE_INSTALL_MANDIR:PATH= + +//C header files for non-gcc (/usr/include) +CMAKE_INSTALL_OLDINCLUDEDIR:PATH=/usr/include + +//Install path prefix, prepended onto install directories. +CMAKE_INSTALL_PREFIX:PATH=/usr/local + +//Run-time variable data (LOCALSTATEDIR/run) +CMAKE_INSTALL_RUNSTATEDIR:PATH= + +//System admin executables (sbin) +CMAKE_INSTALL_SBINDIR:PATH=sbin + +//Modifiable architecture-independent data (com) +CMAKE_INSTALL_SHAREDSTATEDIR:PATH=com + +//Read-only single-machine data (etc) +CMAKE_INSTALL_SYSCONFDIR:PATH=etc + +//No help, variable specified on the command line. +CMAKE_LIBRARY_OUTPUT_DIRECTORY:UNINITIALIZED=/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38 + +//Path to a program. +CMAKE_LINKER:FILEPATH=/usr/bin/ld + +//Path to a program. +CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/gmake + +//Flags used by the linker during the creation of modules during +// all build types. +CMAKE_MODULE_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of modules during +// DEBUG builds. +CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of modules during +// MINSIZEREL builds. +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of modules during +// RELEASE builds. +CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of modules during +// RELWITHDEBINFO builds. +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_NM:FILEPATH=/usr/bin/nm + +//Path to a program. +CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy + +//Path to a program. +CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump + +//Value Computed by CMake +CMAKE_PROJECT_DESCRIPTION:STATIC=Differentiable Vector Graphics + +//Value Computed by CMake +CMAKE_PROJECT_HOMEPAGE_URL:STATIC= + +//Value Computed by CMake +CMAKE_PROJECT_NAME:STATIC=diffvg + +//Value Computed by CMake +CMAKE_PROJECT_VERSION:STATIC=0.0.1 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_MAJOR:STATIC=0 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_MINOR:STATIC=0 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_PATCH:STATIC=1 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_TWEAK:STATIC= + +//Path to a program. +CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib + +//Path to a program. +CMAKE_READELF:FILEPATH=/usr/bin/readelf + +//Flags used by the linker during the creation of shared libraries +// during all build types. +CMAKE_SHARED_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of shared libraries +// during DEBUG builds. +CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of shared libraries +// during MINSIZEREL builds. +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of shared libraries +// during RELEASE builds. +CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of shared libraries +// during RELWITHDEBINFO builds. +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//If set, runtime paths are not added when installing shared libraries, +// but are added when building. +CMAKE_SKIP_INSTALL_RPATH:BOOL=NO + +//If set, runtime paths are not added when using shared libraries. +CMAKE_SKIP_RPATH:BOOL=NO + +//Flags used by the linker during the creation of static libraries +// during all build types. +CMAKE_STATIC_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of static libraries +// during DEBUG builds. +CMAKE_STATIC_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of static libraries +// during MINSIZEREL builds. +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of static libraries +// during RELEASE builds. +CMAKE_STATIC_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of static libraries +// during RELWITHDEBINFO builds. +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_STRIP:FILEPATH=/usr/bin/strip + +//If this value is on, makefiles will be generated without the +// .SILENT directive, and all commands will be echoed to the console +// during the make. This is useful for debugging only. With Visual +// Studio IDE projects all commands are done without /nologo. +CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE + +//Compile device code in 64 bit mode +CUDA_64_BIT_DEVICE_CODE:BOOL=ON + +//Attach the build rule to the CUDA source file. Enable only when +// the CUDA source file is added to at most one target. +CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE:BOOL=ON + +//Generate and parse .cubin files in Device mode. +CUDA_BUILD_CUBIN:BOOL=OFF + +//Build in Emulation mode +CUDA_BUILD_EMULATION:BOOL=OFF + +//"cudart" library +CUDA_CUDART_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcudart.so + +//"cuda" library (older versions only). +CUDA_CUDA_LIBRARY:FILEPATH=CUDA_CUDA_LIBRARY-NOTFOUND + +//Directory to put all the output files. If blank it will default +// to the CMAKE_CURRENT_BINARY_DIR +CUDA_GENERATED_OUTPUT_DIR:PATH= + +//Generated file extension +CUDA_HOST_COMPILATION_CPP:BOOL=ON + +//Host side compiler used by NVCC +CUDA_HOST_COMPILER:FILEPATH=/usr/bin/cc + +//Path to a program. +CUDA_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda/bin/nvcc + +//Semi-colon delimit multiple arguments. during all build types. +CUDA_NVCC_FLAGS:STRING= + +//Semi-colon delimit multiple arguments. during DEBUG builds. +CUDA_NVCC_FLAGS_DEBUG:STRING= + +//Semi-colon delimit multiple arguments. during MINSIZEREL builds. +CUDA_NVCC_FLAGS_MINSIZEREL:STRING= + +//Semi-colon delimit multiple arguments. during RELEASE builds. +CUDA_NVCC_FLAGS_RELEASE:STRING= + +//Semi-colon delimit multiple arguments. during RELWITHDEBINFO +// builds. +CUDA_NVCC_FLAGS_RELWITHDEBINFO:STRING= + +//"OpenCL" library +CUDA_OpenCL_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libOpenCL.so + +//Propagate C/CXX_FLAGS and friends to the host compiler via -Xcompile +CUDA_PROPAGATE_HOST_FLAGS:BOOL=ON + +//Path to a file. +CUDA_SDK_ROOT_DIR:PATH=CUDA_SDK_ROOT_DIR-NOTFOUND + +//Compile CUDA objects with separable compilation enabled. Requires +// CUDA 5.0+ +CUDA_SEPARABLE_COMPILATION:BOOL=OFF + +//Path to a file. +CUDA_TOOLKIT_INCLUDE:PATH=/usr/local/cuda/include + +//Toolkit location. +CUDA_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda + +//Use the static version of the CUDA runtime library if available +CUDA_USE_STATIC_CUDA_RUNTIME:BOOL=ON + +//Print out the commands run while compiling the CUDA source file. +// With the Makefile generator this defaults to VERBOSE variable +// specified on the command line, but can be forced on with this +// option. +CUDA_VERBOSE_BUILD:BOOL=OFF + +//Version of CUDA as computed from nvcc. +CUDA_VERSION:STRING=12.2 + +//"cublas" library +CUDA_cublas_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcublas.so + +//"cudadevrt" library +CUDA_cudadevrt_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcudadevrt.a + +//static CUDA runtime library +CUDA_cudart_static_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcudart_static.a + +//"cufft" library +CUDA_cufft_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcufft.so + +//"cupti" library +CUDA_cupti_LIBRARY:FILEPATH=CUDA_cupti_LIBRARY-NOTFOUND + +//"curand" library +CUDA_curand_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcurand.so + +//"cusolver" library +CUDA_cusolver_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcusolver.so + +//"cusparse" library +CUDA_cusparse_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libcusparse.so + +//"nppc" library +CUDA_nppc_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppc.so + +//"nppial" library +CUDA_nppial_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppial.so + +//"nppicc" library +CUDA_nppicc_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppicc.so + +//"nppidei" library +CUDA_nppidei_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppidei.so + +//"nppif" library +CUDA_nppif_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppif.so + +//"nppig" library +CUDA_nppig_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppig.so + +//"nppim" library +CUDA_nppim_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppim.so + +//"nppist" library +CUDA_nppist_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppist.so + +//"nppisu" library +CUDA_nppisu_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppisu.so + +//"nppitc" library +CUDA_nppitc_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnppitc.so + +//"npps" library +CUDA_npps_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnpps.so + +//"nvToolsExt" library +CUDA_nvToolsExt_LIBRARY:FILEPATH=/usr/local/cuda/lib64/libnvToolsExt.so + +//Path to a library. +CUDA_rt_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/librt.a + +//Build diffvg with GPU code path? +DIFFVG_CUDA:BOOL=1 + +//Force new FindPython +PYBIND11_FINDPYTHON:BOOL=OFF + +//Install pybind11 header files? +PYBIND11_INSTALL:BOOL=OFF + +//Disable search for Python +PYBIND11_NOPYTHON:BOOL=OFF + +//Build pybind11 test suite? +PYBIND11_TEST:BOOL=OFF + +//Path to a file. +PYTHON_INCLUDE_DIR:PATH=/usr/local/include/python3.10 + +//No help, variable specified on the command line. +PYTHON_INCLUDE_PATH:UNINITIALIZED=/usr/local/envs/word/include/python3.8 + +//Path to a library. +PYTHON_LIBRARY:FILEPATH=/usr/local/envs/word/lib + +//Path to a library. +PYTHON_LIBRARY_DEBUG:FILEPATH=PYTHON_LIBRARY_DEBUG-NOTFOUND + +//Value Computed by CMake +diffvg_BINARY_DIR:STATIC=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 + +//Value Computed by CMake +diffvg_IS_TOP_LEVEL:STATIC=ON + +//Value Computed by CMake +diffvg_SOURCE_DIR:STATIC=/content/Word-As-Image/diffvg + +//Value Computed by CMake +pybind11_BINARY_DIR:STATIC=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/pybind11 + +//Value Computed by CMake +pybind11_IS_TOP_LEVEL:STATIC=OFF + +//Value Computed by CMake +pybind11_SOURCE_DIR:STATIC=/content/Word-As-Image/diffvg/pybind11 + + +######################## +# INTERNAL cache entries +######################## + +//ADVANCED property for variable: CMAKE_ADDR2LINE +CMAKE_ADDR2LINE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_AR +CMAKE_AR-ADVANCED:INTERNAL=1 +//This is the directory where this CMakeCache.txt was created +CMAKE_CACHEFILE_DIR:INTERNAL=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 +//Major version of cmake used to create the current loaded cache +CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3 +//Minor version of cmake used to create the current loaded cache +CMAKE_CACHE_MINOR_VERSION:INTERNAL=26 +//Patch version of cmake used to create the current loaded cache +CMAKE_CACHE_PATCH_VERSION:INTERNAL=4 +//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE +CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1 +//Path to CMake executable. +CMAKE_COMMAND:INTERNAL=/usr/local/envs/word/bin/cmake +//Path to cpack program executable. +CMAKE_CPACK_COMMAND:INTERNAL=/usr/local/envs/word/bin/cpack +//Path to ctest program executable. +CMAKE_CTEST_COMMAND:INTERNAL=/usr/local/envs/word/bin/ctest +//ADVANCED property for variable: CMAKE_CXX_COMPILER +CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_COMPILER_AR +CMAKE_CXX_COMPILER_AR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_COMPILER_RANLIB +CMAKE_CXX_COMPILER_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS +CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG +CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL +CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE +CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO +CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER +CMAKE_C_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER_AR +CMAKE_C_COMPILER_AR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER_RANLIB +CMAKE_C_COMPILER_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS +CMAKE_C_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG +CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL +CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE +CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO +CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_DLLTOOL +CMAKE_DLLTOOL-ADVANCED:INTERNAL=1 +//Path to cache edit program executable. +CMAKE_EDIT_COMMAND:INTERNAL=/usr/local/envs/word/bin/ccmake +//Executable file format +CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS +CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG +CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE +CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS +CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1 +//Name of external makefile project generator. +CMAKE_EXTRA_GENERATOR:INTERNAL= +//Name of generator. +CMAKE_GENERATOR:INTERNAL=Unix Makefiles +//Generator instance identifier. +CMAKE_GENERATOR_INSTANCE:INTERNAL= +//Name of generator platform. +CMAKE_GENERATOR_PLATFORM:INTERNAL= +//Name of generator toolset. +CMAKE_GENERATOR_TOOLSET:INTERNAL= +//Test CMAKE_HAVE_LIBC_PTHREAD +CMAKE_HAVE_LIBC_PTHREAD:INTERNAL=1 +//Source directory with the top level CMakeLists.txt file for this +// project +CMAKE_HOME_DIRECTORY:INTERNAL=/content/Word-As-Image/diffvg +//ADVANCED property for variable: CMAKE_INSTALL_BINDIR +CMAKE_INSTALL_BINDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_DATADIR +CMAKE_INSTALL_DATADIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_DATAROOTDIR +CMAKE_INSTALL_DATAROOTDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_DOCDIR +CMAKE_INSTALL_DOCDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_INCLUDEDIR +CMAKE_INSTALL_INCLUDEDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_INFODIR +CMAKE_INSTALL_INFODIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_LIBDIR +CMAKE_INSTALL_LIBDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_LIBEXECDIR +CMAKE_INSTALL_LIBEXECDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_LOCALEDIR +CMAKE_INSTALL_LOCALEDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_LOCALSTATEDIR +CMAKE_INSTALL_LOCALSTATEDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_MANDIR +CMAKE_INSTALL_MANDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_OLDINCLUDEDIR +CMAKE_INSTALL_OLDINCLUDEDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_RUNSTATEDIR +CMAKE_INSTALL_RUNSTATEDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_SBINDIR +CMAKE_INSTALL_SBINDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_SHAREDSTATEDIR +CMAKE_INSTALL_SHAREDSTATEDIR-ADVANCED:INTERNAL=1 +//Install .so files without execute permission. +CMAKE_INSTALL_SO_NO_EXE:INTERNAL=1 +//ADVANCED property for variable: CMAKE_INSTALL_SYSCONFDIR +CMAKE_INSTALL_SYSCONFDIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_LINKER +CMAKE_LINKER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MAKE_PROGRAM +CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS +CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG +CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE +CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_NM +CMAKE_NM-ADVANCED:INTERNAL=1 +//number of local generators +CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=2 +//ADVANCED property for variable: CMAKE_OBJCOPY +CMAKE_OBJCOPY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJDUMP +CMAKE_OBJDUMP-ADVANCED:INTERNAL=1 +//Platform information initialized +CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_RANLIB +CMAKE_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_READELF +CMAKE_READELF-ADVANCED:INTERNAL=1 +//Path to CMake installation. +CMAKE_ROOT:INTERNAL=/usr/local/envs/word/share/cmake-3.26 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS +CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG +CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE +CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH +CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_RPATH +CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS +CMAKE_STATIC_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_DEBUG +CMAKE_STATIC_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELEASE +CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STRIP +CMAKE_STRIP-ADVANCED:INTERNAL=1 +//uname command +CMAKE_UNAME:INTERNAL=/usr/bin/uname +//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE +CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_64_BIT_DEVICE_CODE +CUDA_64_BIT_DEVICE_CODE-ADVANCED:INTERNAL=1 +//List of intermediate files that are part of the cuda dependency +// scanning. +CUDA_ADDITIONAL_CLEAN_FILES:INTERNAL=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.depend;/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.depend +//ADVANCED property for variable: CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE +CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_BUILD_CUBIN +CUDA_BUILD_CUBIN-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_BUILD_EMULATION +CUDA_BUILD_EMULATION-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_CUDART_LIBRARY +CUDA_CUDART_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_CUDA_LIBRARY +CUDA_CUDA_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_GENERATED_OUTPUT_DIR +CUDA_GENERATED_OUTPUT_DIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_HOST_COMPILATION_CPP +CUDA_HOST_COMPILATION_CPP-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_EXECUTABLE +CUDA_NVCC_EXECUTABLE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_FLAGS +CUDA_NVCC_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_FLAGS_DEBUG +CUDA_NVCC_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_FLAGS_MINSIZEREL +CUDA_NVCC_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_FLAGS_RELEASE +CUDA_NVCC_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_NVCC_FLAGS_RELWITHDEBINFO +CUDA_NVCC_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_OpenCL_LIBRARY +CUDA_OpenCL_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_PROPAGATE_HOST_FLAGS +CUDA_PROPAGATE_HOST_FLAGS-ADVANCED:INTERNAL=1 +//This is the value of the last time CUDA_SDK_ROOT_DIR was set +// successfully. +CUDA_SDK_ROOT_DIR_INTERNAL:INTERNAL=CUDA_SDK_ROOT_DIR-NOTFOUND +//ADVANCED property for variable: CUDA_SEPARABLE_COMPILATION +CUDA_SEPARABLE_COMPILATION-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_TOOLKIT_INCLUDE +CUDA_TOOLKIT_INCLUDE-ADVANCED:INTERNAL=1 +//This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was +// set successfully. +CUDA_TOOLKIT_ROOT_DIR_INTERNAL:INTERNAL=/usr/local/cuda +//This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was +// set successfully. +CUDA_TOOLKIT_TARGET_DIR_INTERNAL:INTERNAL=/usr/local/cuda +//ADVANCED property for variable: CUDA_VERBOSE_BUILD +CUDA_VERBOSE_BUILD-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_VERSION +CUDA_VERSION-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cublas_LIBRARY +CUDA_cublas_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cudadevrt_LIBRARY +CUDA_cudadevrt_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cudart_static_LIBRARY +CUDA_cudart_static_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cufft_LIBRARY +CUDA_cufft_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cupti_LIBRARY +CUDA_cupti_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_curand_LIBRARY +CUDA_curand_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cusolver_LIBRARY +CUDA_cusolver_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_cusparse_LIBRARY +CUDA_cusparse_LIBRARY-ADVANCED:INTERNAL=1 +//Location of make2cmake.cmake +CUDA_make2cmake:INTERNAL=/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/make2cmake.cmake +//ADVANCED property for variable: CUDA_nppc_LIBRARY +CUDA_nppc_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppial_LIBRARY +CUDA_nppial_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppicc_LIBRARY +CUDA_nppicc_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppidei_LIBRARY +CUDA_nppidei_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppif_LIBRARY +CUDA_nppif_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppig_LIBRARY +CUDA_nppig_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppim_LIBRARY +CUDA_nppim_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppist_LIBRARY +CUDA_nppist_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppisu_LIBRARY +CUDA_nppisu_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nppitc_LIBRARY +CUDA_nppitc_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_npps_LIBRARY +CUDA_npps_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CUDA_nvToolsExt_LIBRARY +CUDA_nvToolsExt_LIBRARY-ADVANCED:INTERNAL=1 +//Location of parse_cubin.cmake +CUDA_parse_cubin:INTERNAL=/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/parse_cubin.cmake +//Location of run_nvcc.cmake +CUDA_run_nvcc:INTERNAL=/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/run_nvcc.cmake +//Details about finding CUDA +FIND_PACKAGE_MESSAGE_DETAILS_CUDA:INTERNAL=[/usr/local/cuda][/usr/local/cuda/bin/nvcc][/usr/local/cuda/include][/usr/local/cuda/lib64/libcudart_static.a][v12.2(10)] +//Details about finding Python +FIND_PACKAGE_MESSAGE_DETAILS_Python:INTERNAL=[/usr/local/include/python3.10][/usr/local/lib/libpython3.10.so][cfound components: Development Development.Module Development.Embed ][v3.10.13(3.7)] +//Details about finding PythonLibs +FIND_PACKAGE_MESSAGE_DETAILS_PythonLibs:INTERNAL=[/usr/local/envs/word/lib][/usr/local/include/python3.10][v3.10.13()] +//Details about finding Threads +FIND_PACKAGE_MESSAGE_DETAILS_Threads:INTERNAL=[TRUE][v()] +//Test HAS_FLTO +HAS_FLTO:INTERNAL=1 +PYBIND11_INCLUDE_DIR:INTERNAL=/content/Word-As-Image/diffvg/pybind11/include +//ADVANCED property for variable: PYTHON_INCLUDE_DIR +PYTHON_INCLUDE_DIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: PYTHON_LIBRARY +PYTHON_LIBRARY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: PYTHON_LIBRARY_DEBUG +PYTHON_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//linker supports push/pop state +_CMAKE_LINKER_PUSHPOP_STATE_SUPPORTED:INTERNAL=TRUE +//CMAKE_INSTALL_PREFIX during last run +_GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX:INTERNAL=/usr/local +_Python:INTERNAL=Python +//Path to a program. +_Python_CONFIG:INTERNAL=/usr/local/bin/python3.10-config +//Compiler reason failure +_Python_Compiler_REASON_FAILURE:INTERNAL= +_Python_DEVELOPMENT_EMBED_SIGNATURE:INTERNAL=0d60c336de4c138bc16d851eb54832b4 +_Python_DEVELOPMENT_MODULE_SIGNATURE:INTERNAL=ea53c9b786f64d950e01153b7c642f4d +//Development reason failure +_Python_Development_REASON_FAILURE:INTERNAL= +//Path to a file. +_Python_INCLUDE_DIR:INTERNAL=/usr/local/include/python3.10 +//Interpreter reason failure +_Python_Interpreter_REASON_FAILURE:INTERNAL= +//Path to a library. +_Python_LIBRARY_RELEASE:INTERNAL=/usr/local/lib/libpython3.10.so +//NumPy reason failure +_Python_NumPy_REASON_FAILURE:INTERNAL= +//true if pybind11 and all required components found on the system +pybind11_FOUND:INTERNAL=TRUE +//Directory where pybind11 headers are located +pybind11_INCLUDE_DIR:INTERNAL=/content/Word-As-Image/diffvg/pybind11/include + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCCompiler.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCCompiler.cmake new file mode 100644 index 0000000000000000000000000000000000000000..b67e05b5adeda4b5658743d4e0d8915a59e32399 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCCompiler.cmake @@ -0,0 +1,72 @@ +set(CMAKE_C_COMPILER "/usr/bin/cc") +set(CMAKE_C_COMPILER_ARG1 "") +set(CMAKE_C_COMPILER_ID "GNU") +set(CMAKE_C_COMPILER_VERSION "11.4.0") +set(CMAKE_C_COMPILER_VERSION_INTERNAL "") +set(CMAKE_C_COMPILER_WRAPPER "") +set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "17") +set(CMAKE_C_EXTENSIONS_COMPUTED_DEFAULT "ON") +set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert;c_std_17;c_std_23") +set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") +set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") +set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") +set(CMAKE_C17_COMPILE_FEATURES "c_std_17") +set(CMAKE_C23_COMPILE_FEATURES "c_std_23") + +set(CMAKE_C_PLATFORM_ID "Linux") +set(CMAKE_C_SIMULATE_ID "") +set(CMAKE_C_COMPILER_FRONTEND_VARIANT "GNU") +set(CMAKE_C_SIMULATE_VERSION "") + + + + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_C_COMPILER_AR "/usr/bin/gcc-ar-11") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_C_COMPILER_RANLIB "/usr/bin/gcc-ranlib-11") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_MT "") +set(CMAKE_COMPILER_IS_GNUCC 1) +set(CMAKE_C_COMPILER_LOADED 1) +set(CMAKE_C_COMPILER_WORKS TRUE) +set(CMAKE_C_ABI_COMPILED TRUE) + +set(CMAKE_C_COMPILER_ENV_VAR "CC") + +set(CMAKE_C_COMPILER_ID_RUN 1) +set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m) +set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC) +set(CMAKE_C_LINKER_PREFERENCE 10) + +# Save compiler ABI information. +set(CMAKE_C_SIZEOF_DATA_PTR "8") +set(CMAKE_C_COMPILER_ABI "ELF") +set(CMAKE_C_BYTE_ORDER "LITTLE_ENDIAN") +set(CMAKE_C_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_C_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_C_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}") +endif() + +if(CMAKE_C_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_C_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + + +set(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include") +set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "gcc;gcc_s;c;gcc;gcc_s") +set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib;/usr/local/cuda/lib64/stubs") +set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCXXCompiler.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCXXCompiler.cmake new file mode 100644 index 0000000000000000000000000000000000000000..e699eb85d23eb03dc4eeeb74d1a9d1878949a58f --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeCXXCompiler.cmake @@ -0,0 +1,83 @@ +set(CMAKE_CXX_COMPILER "/usr/bin/c++") +set(CMAKE_CXX_COMPILER_ARG1 "") +set(CMAKE_CXX_COMPILER_ID "GNU") +set(CMAKE_CXX_COMPILER_VERSION "11.4.0") +set(CMAKE_CXX_COMPILER_VERSION_INTERNAL "") +set(CMAKE_CXX_COMPILER_WRAPPER "") +set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT "17") +set(CMAKE_CXX_EXTENSIONS_COMPUTED_DEFAULT "ON") +set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17;cxx_std_20;cxx_std_23") +set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") +set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") +set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") +set(CMAKE_CXX17_COMPILE_FEATURES "cxx_std_17") +set(CMAKE_CXX20_COMPILE_FEATURES "cxx_std_20") +set(CMAKE_CXX23_COMPILE_FEATURES "cxx_std_23") + +set(CMAKE_CXX_PLATFORM_ID "Linux") +set(CMAKE_CXX_SIMULATE_ID "") +set(CMAKE_CXX_COMPILER_FRONTEND_VARIANT "GNU") +set(CMAKE_CXX_SIMULATE_VERSION "") + + + + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_CXX_COMPILER_AR "/usr/bin/gcc-ar-11") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_CXX_COMPILER_RANLIB "/usr/bin/gcc-ranlib-11") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_MT "") +set(CMAKE_COMPILER_IS_GNUCXX 1) +set(CMAKE_CXX_COMPILER_LOADED 1) +set(CMAKE_CXX_COMPILER_WORKS TRUE) +set(CMAKE_CXX_ABI_COMPILED TRUE) + +set(CMAKE_CXX_COMPILER_ENV_VAR "CXX") + +set(CMAKE_CXX_COMPILER_ID_RUN 1) +set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;m;mm;mpp;CPP;ixx;cppm) +set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC) + +foreach (lang C OBJC OBJCXX) + if (CMAKE_${lang}_COMPILER_ID_RUN) + foreach(extension IN LISTS CMAKE_${lang}_SOURCE_FILE_EXTENSIONS) + list(REMOVE_ITEM CMAKE_CXX_SOURCE_FILE_EXTENSIONS ${extension}) + endforeach() + endif() +endforeach() + +set(CMAKE_CXX_LINKER_PREFERENCE 30) +set(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1) + +# Save compiler ABI information. +set(CMAKE_CXX_SIZEOF_DATA_PTR "8") +set(CMAKE_CXX_COMPILER_ABI "ELF") +set(CMAKE_CXX_BYTE_ORDER "LITTLE_ENDIAN") +set(CMAKE_CXX_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_CXX_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_CXX_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}") +endif() + +if(CMAKE_CXX_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_CXX_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + + +set(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES "/usr/include/c++/11;/usr/include/x86_64-linux-gnu/c++/11;/usr/include/c++/11/backward;/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include") +set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;gcc_s;gcc;c;gcc_s;gcc") +set(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib;/usr/local/cuda/lib64/stubs") +set(CMAKE_CXX_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_C.bin b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_C.bin new file mode 100644 index 0000000000000000000000000000000000000000..3fd1d46a5ae76b18f23624abfda5b74161044185 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_C.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f1901373878efd64fb8d123f266ec93db00a3523087d52afa0fff59401a75ce +size 15968 diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_CXX.bin b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_CXX.bin new file mode 100644 index 0000000000000000000000000000000000000000..87823cd3db5512ed817d32f456859d5cece062b9 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeDetermineCompilerABI_CXX.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:706369b9a080132db3bd9f26616f30a752f9376201eb47cfb747ef4b34d7120e +size 15992 diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeSystem.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeSystem.cmake new file mode 100644 index 0000000000000000000000000000000000000000..69be4f7c1d1ffe873a7a99fec392696dcd8726fe --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CMakeSystem.cmake @@ -0,0 +1,15 @@ +set(CMAKE_HOST_SYSTEM "Linux-6.1.85+") +set(CMAKE_HOST_SYSTEM_NAME "Linux") +set(CMAKE_HOST_SYSTEM_VERSION "6.1.85+") +set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64") + + + +set(CMAKE_SYSTEM "Linux-6.1.85+") +set(CMAKE_SYSTEM_NAME "Linux") +set(CMAKE_SYSTEM_VERSION "6.1.85+") +set(CMAKE_SYSTEM_PROCESSOR "x86_64") + +set(CMAKE_CROSSCOMPILING "FALSE") + +set(CMAKE_SYSTEM_LOADED 1) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/CMakeCCompilerId.c b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/CMakeCCompilerId.c new file mode 100644 index 0000000000000000000000000000000000000000..88155ff20cb6a788920d888b832d81cfa22cdd8d --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/CMakeCCompilerId.c @@ -0,0 +1,866 @@ +#ifdef __cplusplus +# error "A C++ compiler has been selected for C." +#endif + +#if defined(__18CXX) +# define ID_VOID_MAIN +#endif +#if defined(__CLASSIC_C__) +/* cv-qualifiers did not exist in K&R C */ +# define const +# define volatile +#endif + +#if !defined(__has_include) +/* If the compiler does not have __has_include, pretend the answer is + always no. */ +# define __has_include(x) 0 +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# if defined(__GNUC__) +# define SIMULATE_ID "GNU" +# endif + /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later, + except that a few beta releases use the old format with V=2021. */ +# if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111 +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE) + /* The third version component from --version is an update index, + but no macro is provided for it. */ +# define COMPILER_VERSION_PATCH DEC(0) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER) +# define COMPILER_ID "IntelLLVM" +#if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +#endif +#if defined(__GNUC__) +# define SIMULATE_ID "GNU" +#endif +/* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and + * later. Look for 6 digit vs. 8 digit version number to decide encoding. + * VVVV is no smaller than the current year when a version is released. + */ +#if __INTEL_LLVM_COMPILER < 1000000L +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10) +#else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100) +#endif +#if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +#endif +#if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +#elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +#endif +#if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +#endif +#if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +#endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_C) +# define COMPILER_ID "SunPro" +# if __SUNPRO_C >= 0x5100 + /* __SUNPRO_C = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# endif + +#elif defined(__HP_cc) +# define COMPILER_ID "HP" + /* __HP_cc = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_cc/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_cc/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_cc % 100) + +#elif defined(__DECC) +# define COMPILER_ID "Compaq" + /* __DECC_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECC_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECC_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECC_VER % 10000) + +#elif defined(__IBMC__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__open_xl__) && defined(__clang__) +# define COMPILER_ID "IBMClang" +# define COMPILER_VERSION_MAJOR DEC(__open_xl_version__) +# define COMPILER_VERSION_MINOR DEC(__open_xl_release__) +# define COMPILER_VERSION_PATCH DEC(__open_xl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__open_xl_ptf_fix_level__) + + +#elif defined(__ibmxl__) && defined(__clang__) +# define COMPILER_ID "XLClang" +# define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__) +# define COMPILER_VERSION_MINOR DEC(__ibmxl_release__) +# define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__) + + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ >= 800 +# define COMPILER_ID "XL" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__NVCOMPILER) +# define COMPILER_ID "NVHPC" +# define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__) +# define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__) +# if defined(__NVCOMPILER_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__) +# endif + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__CLANG_FUJITSU) +# define COMPILER_ID "FujitsuClang" +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# define COMPILER_VERSION_INTERNAL_STR __clang_version__ + + +#elif defined(__FUJITSU) +# define COMPILER_ID "Fujitsu" +# if defined(__FCC_version__) +# define COMPILER_VERSION __FCC_version__ +# elif defined(__FCC_major__) +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# endif +# if defined(__fcc_version) +# define COMPILER_VERSION_INTERNAL DEC(__fcc_version) +# elif defined(__FCC_VERSION) +# define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION) +# endif + + +#elif defined(__ghs__) +# define COMPILER_ID "GHS" +/* __GHS_VERSION_NUMBER = VVVVRP */ +# ifdef __GHS_VERSION_NUMBER +# define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100) +# define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10) +# define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10) +# endif + +#elif defined(__TASKING__) +# define COMPILER_ID "Tasking" + # define COMPILER_VERSION_MAJOR DEC(__VERSION__/1000) + # define COMPILER_VERSION_MINOR DEC(__VERSION__ % 100) +# define COMPILER_VERSION_INTERNAL DEC(__VERSION__) + +#elif defined(__TINYC__) +# define COMPILER_ID "TinyCC" + +#elif defined(__BCC__) +# define COMPILER_ID "Bruce" + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__ARMCC_VERSION) && !defined(__clang__) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) && defined(__ARMCOMPILER_VERSION) +# define COMPILER_ID "ARMClang" + # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000) +# define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__LCC__) && (defined(__GNUC__) || defined(__GNUG__) || defined(__MCST__)) +# define COMPILER_ID "LCC" +# define COMPILER_VERSION_MAJOR DEC(__LCC__ / 100) +# define COMPILER_VERSION_MINOR DEC(__LCC__ % 100) +# if defined(__LCC_MINOR__) +# define COMPILER_VERSION_PATCH DEC(__LCC_MINOR__) +# endif +# if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define SIMULATE_ID "GNU" +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif +# endif + +#elif defined(__GNUC__) +# define COMPILER_ID "GNU" +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(_ADI_COMPILER) +# define COMPILER_ID "ADSP" +#if defined(__VERSIONNUM__) + /* __VERSIONNUM__ = 0xVVRRPPTT */ +# define COMPILER_VERSION_MAJOR DEC(__VERSIONNUM__ >> 24 & 0xFF) +# define COMPILER_VERSION_MINOR DEC(__VERSIONNUM__ >> 16 & 0xFF) +# define COMPILER_VERSION_PATCH DEC(__VERSIONNUM__ >> 8 & 0xFF) +# define COMPILER_VERSION_TWEAK DEC(__VERSIONNUM__ & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" +# if defined(__VER__) && defined(__ICCARM__) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000) +# define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000) +# define COMPILER_VERSION_PATCH DEC((__VER__) % 1000) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__)) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 100) +# define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100)) +# define COMPILER_VERSION_PATCH DEC(__SUBVERSION__) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# endif + +#elif defined(__SDCC_VERSION_MAJOR) || defined(SDCC) +# define COMPILER_ID "SDCC" +# if defined(__SDCC_VERSION_MAJOR) +# define COMPILER_VERSION_MAJOR DEC(__SDCC_VERSION_MAJOR) +# define COMPILER_VERSION_MINOR DEC(__SDCC_VERSION_MINOR) +# define COMPILER_VERSION_PATCH DEC(__SDCC_VERSION_PATCH) +# else + /* SDCC = VRP */ +# define COMPILER_VERSION_MAJOR DEC(SDCC/100) +# define COMPILER_VERSION_MINOR DEC(SDCC/10 % 10) +# define COMPILER_VERSION_PATCH DEC(SDCC % 10) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__MSYS__) +# define PLATFORM_ID "MSYS" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# elif defined(__VXWORKS__) +# define PLATFORM_ID "VxWorks" + +# else /* unknown platform */ +# define PLATFORM_ID +# endif + +#elif defined(__INTEGRITY) +# if defined(INT_178B) +# define PLATFORM_ID "Integrity178" + +# else /* regular Integrity */ +# define PLATFORM_ID "Integrity" +# endif + +# elif defined(_ADI_COMPILER) +# define PLATFORM_ID "ADSP" + +#else /* unknown platform */ +# define PLATFORM_ID + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_ARM64EC) +# define ARCHITECTURE_ID "ARM64EC" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM64) +# define ARCHITECTURE_ID "ARM64" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# if defined(__ICCARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__ICCRX__) +# define ARCHITECTURE_ID "RX" + +# elif defined(__ICCRH850__) +# define ARCHITECTURE_ID "RH850" + +# elif defined(__ICCRL78__) +# define ARCHITECTURE_ID "RL78" + +# elif defined(__ICCRISCV__) +# define ARCHITECTURE_ID "RISCV" + +# elif defined(__ICCAVR__) +# define ARCHITECTURE_ID "AVR" + +# elif defined(__ICC430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__ICCV850__) +# define ARCHITECTURE_ID "V850" + +# elif defined(__ICC8051__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__ICCSTM8__) +# define ARCHITECTURE_ID "STM8" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__ghs__) +# if defined(__PPC64__) +# define ARCHITECTURE_ID "PPC64" + +# elif defined(__ppc__) +# define ARCHITECTURE_ID "PPC" + +# elif defined(__ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__x86_64__) +# define ARCHITECTURE_ID "x64" + +# elif defined(__i386__) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__TI_COMPILER_VERSION__) +# if defined(__TI_ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__MSP430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__TMS320C28XX__) +# define ARCHITECTURE_ID "TMS320C28x" + +# elif defined(__TMS320C6X__) || defined(_TMS320C6X) +# define ARCHITECTURE_ID "TMS320C6x" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +# elif defined(__ADSPSHARC__) +# define ARCHITECTURE_ID "SHARC" + +# elif defined(__ADSPBLACKFIN__) +# define ARCHITECTURE_ID "Blackfin" + +#elif defined(__TASKING__) + +# if defined(__CTC__) || defined(__CPTC__) +# define ARCHITECTURE_ID "TriCore" + +# elif defined(__CMCS__) +# define ARCHITECTURE_ID "MCS" + +# elif defined(__CARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__CARC__) +# define ARCHITECTURE_ID "ARC" + +# elif defined(__C51__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__CPCP__) +# define ARCHITECTURE_ID "PCP" + +# else +# define ARCHITECTURE_ID "" +# endif + +#else +# define ARCHITECTURE_ID +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number. */ +#ifdef COMPILER_VERSION +char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]"; + +/* Construct a string literal encoding the version number components. */ +#elif defined(COMPILER_VERSION_MAJOR) +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the internal version number. */ +#ifdef COMPILER_VERSION_INTERNAL +char const info_version_internal[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', + 'i','n','t','e','r','n','a','l','[', + COMPILER_VERSION_INTERNAL,']','\0'}; +#elif defined(COMPILER_VERSION_INTERNAL_STR) +char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]"; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + +#if !defined(__STDC__) && !defined(__clang__) +# if defined(_MSC_VER) || defined(__ibmxl__) || defined(__IBMC__) +# define C_VERSION "90" +# else +# define C_VERSION +# endif +#elif __STDC_VERSION__ > 201710L +# define C_VERSION "23" +#elif __STDC_VERSION__ >= 201710L +# define C_VERSION "17" +#elif __STDC_VERSION__ >= 201000L +# define C_VERSION "11" +#elif __STDC_VERSION__ >= 199901L +# define C_VERSION "99" +#else +# define C_VERSION "90" +#endif +const char* info_language_standard_default = + "INFO" ":" "standard_default[" C_VERSION "]"; + +const char* info_language_extensions_default = "INFO" ":" "extensions_default[" +#if (defined(__clang__) || defined(__GNUC__) || defined(__xlC__) || \ + defined(__TI_COMPILER_VERSION__)) && \ + !defined(__STRICT_ANSI__) + "ON" +#else + "OFF" +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +#ifdef ID_VOID_MAIN +void main() {} +#else +# if defined(__CLASSIC_C__) +int main(argc, argv) int argc; char *argv[]; +# else +int main(int argc, char* argv[]) +# endif +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; + require += info_arch[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef COMPILER_VERSION_INTERNAL + require += info_version_internal[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) + require += info_cray[argc]; +#endif + require += info_language_standard_default[argc]; + require += info_language_extensions_default[argc]; + (void)argv; + return require; +} +#endif diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/a.out b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/a.out new file mode 100644 index 0000000000000000000000000000000000000000..c786756abbd10a6ac500dd20933efa409d328d0c Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/a.out differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/CMakeCXXCompilerId.cpp b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/CMakeCXXCompilerId.cpp new file mode 100644 index 0000000000000000000000000000000000000000..746b1672e6408e180134871066e63c54fb6e474d --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/CMakeCXXCompilerId.cpp @@ -0,0 +1,855 @@ +/* This source file must have a .cpp extension so that all C++ compilers + recognize the extension without flags. Borland does not know .cxx for + example. */ +#ifndef __cplusplus +# error "A C compiler has been selected for C++." +#endif + +#if !defined(__has_include) +/* If the compiler does not have __has_include, pretend the answer is + always no. */ +# define __has_include(x) 0 +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__COMO__) +# define COMPILER_ID "Comeau" + /* __COMO_VERSION__ = VRR */ +# define COMPILER_VERSION_MAJOR DEC(__COMO_VERSION__ / 100) +# define COMPILER_VERSION_MINOR DEC(__COMO_VERSION__ % 100) + +#elif defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# if defined(__GNUC__) +# define SIMULATE_ID "GNU" +# endif + /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later, + except that a few beta releases use the old format with V=2021. */ +# if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111 +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE) + /* The third version component from --version is an update index, + but no macro is provided for it. */ +# define COMPILER_VERSION_PATCH DEC(0) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER) +# define COMPILER_ID "IntelLLVM" +#if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +#endif +#if defined(__GNUC__) +# define SIMULATE_ID "GNU" +#endif +/* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and + * later. Look for 6 digit vs. 8 digit version number to decide encoding. + * VVVV is no smaller than the current year when a version is released. + */ +#if __INTEL_LLVM_COMPILER < 1000000L +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10) +#else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100) +#endif +#if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +#endif +#if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +#elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +#endif +#if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +#endif +#if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +#endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_CC) +# define COMPILER_ID "SunPro" +# if __SUNPRO_CC >= 0x5100 + /* __SUNPRO_CC = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# endif + +#elif defined(__HP_aCC) +# define COMPILER_ID "HP" + /* __HP_aCC = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_aCC/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_aCC/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_aCC % 100) + +#elif defined(__DECCXX) +# define COMPILER_ID "Compaq" + /* __DECCXX_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECCXX_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECCXX_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECCXX_VER % 10000) + +#elif defined(__IBMCPP__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__open_xl__) && defined(__clang__) +# define COMPILER_ID "IBMClang" +# define COMPILER_VERSION_MAJOR DEC(__open_xl_version__) +# define COMPILER_VERSION_MINOR DEC(__open_xl_release__) +# define COMPILER_VERSION_PATCH DEC(__open_xl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__open_xl_ptf_fix_level__) + + +#elif defined(__ibmxl__) && defined(__clang__) +# define COMPILER_ID "XLClang" +# define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__) +# define COMPILER_VERSION_MINOR DEC(__ibmxl_release__) +# define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__) + + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ >= 800 +# define COMPILER_ID "XL" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__NVCOMPILER) +# define COMPILER_ID "NVHPC" +# define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__) +# define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__) +# if defined(__NVCOMPILER_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__) +# endif + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__CLANG_FUJITSU) +# define COMPILER_ID "FujitsuClang" +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# define COMPILER_VERSION_INTERNAL_STR __clang_version__ + + +#elif defined(__FUJITSU) +# define COMPILER_ID "Fujitsu" +# if defined(__FCC_version__) +# define COMPILER_VERSION __FCC_version__ +# elif defined(__FCC_major__) +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# endif +# if defined(__fcc_version) +# define COMPILER_VERSION_INTERNAL DEC(__fcc_version) +# elif defined(__FCC_VERSION) +# define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION) +# endif + + +#elif defined(__ghs__) +# define COMPILER_ID "GHS" +/* __GHS_VERSION_NUMBER = VVVVRP */ +# ifdef __GHS_VERSION_NUMBER +# define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100) +# define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10) +# define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10) +# endif + +#elif defined(__TASKING__) +# define COMPILER_ID "Tasking" + # define COMPILER_VERSION_MAJOR DEC(__VERSION__/1000) + # define COMPILER_VERSION_MINOR DEC(__VERSION__ % 100) +# define COMPILER_VERSION_INTERNAL DEC(__VERSION__) + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__ARMCC_VERSION) && !defined(__clang__) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) && defined(__ARMCOMPILER_VERSION) +# define COMPILER_ID "ARMClang" + # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000) +# define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__LCC__) && (defined(__GNUC__) || defined(__GNUG__) || defined(__MCST__)) +# define COMPILER_ID "LCC" +# define COMPILER_VERSION_MAJOR DEC(__LCC__ / 100) +# define COMPILER_VERSION_MINOR DEC(__LCC__ % 100) +# if defined(__LCC_MINOR__) +# define COMPILER_VERSION_PATCH DEC(__LCC_MINOR__) +# endif +# if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define SIMULATE_ID "GNU" +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif +# endif + +#elif defined(__GNUC__) || defined(__GNUG__) +# define COMPILER_ID "GNU" +# if defined(__GNUC__) +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# else +# define COMPILER_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(_ADI_COMPILER) +# define COMPILER_ID "ADSP" +#if defined(__VERSIONNUM__) + /* __VERSIONNUM__ = 0xVVRRPPTT */ +# define COMPILER_VERSION_MAJOR DEC(__VERSIONNUM__ >> 24 & 0xFF) +# define COMPILER_VERSION_MINOR DEC(__VERSIONNUM__ >> 16 & 0xFF) +# define COMPILER_VERSION_PATCH DEC(__VERSIONNUM__ >> 8 & 0xFF) +# define COMPILER_VERSION_TWEAK DEC(__VERSIONNUM__ & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" +# if defined(__VER__) && defined(__ICCARM__) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000) +# define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000) +# define COMPILER_VERSION_PATCH DEC((__VER__) % 1000) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__)) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 100) +# define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100)) +# define COMPILER_VERSION_PATCH DEC(__SUBVERSION__) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__MSYS__) +# define PLATFORM_ID "MSYS" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# elif defined(__VXWORKS__) +# define PLATFORM_ID "VxWorks" + +# else /* unknown platform */ +# define PLATFORM_ID +# endif + +#elif defined(__INTEGRITY) +# if defined(INT_178B) +# define PLATFORM_ID "Integrity178" + +# else /* regular Integrity */ +# define PLATFORM_ID "Integrity" +# endif + +# elif defined(_ADI_COMPILER) +# define PLATFORM_ID "ADSP" + +#else /* unknown platform */ +# define PLATFORM_ID + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_ARM64EC) +# define ARCHITECTURE_ID "ARM64EC" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM64) +# define ARCHITECTURE_ID "ARM64" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# if defined(__ICCARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__ICCRX__) +# define ARCHITECTURE_ID "RX" + +# elif defined(__ICCRH850__) +# define ARCHITECTURE_ID "RH850" + +# elif defined(__ICCRL78__) +# define ARCHITECTURE_ID "RL78" + +# elif defined(__ICCRISCV__) +# define ARCHITECTURE_ID "RISCV" + +# elif defined(__ICCAVR__) +# define ARCHITECTURE_ID "AVR" + +# elif defined(__ICC430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__ICCV850__) +# define ARCHITECTURE_ID "V850" + +# elif defined(__ICC8051__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__ICCSTM8__) +# define ARCHITECTURE_ID "STM8" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__ghs__) +# if defined(__PPC64__) +# define ARCHITECTURE_ID "PPC64" + +# elif defined(__ppc__) +# define ARCHITECTURE_ID "PPC" + +# elif defined(__ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__x86_64__) +# define ARCHITECTURE_ID "x64" + +# elif defined(__i386__) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__TI_COMPILER_VERSION__) +# if defined(__TI_ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__MSP430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__TMS320C28XX__) +# define ARCHITECTURE_ID "TMS320C28x" + +# elif defined(__TMS320C6X__) || defined(_TMS320C6X) +# define ARCHITECTURE_ID "TMS320C6x" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +# elif defined(__ADSPSHARC__) +# define ARCHITECTURE_ID "SHARC" + +# elif defined(__ADSPBLACKFIN__) +# define ARCHITECTURE_ID "Blackfin" + +#elif defined(__TASKING__) + +# if defined(__CTC__) || defined(__CPTC__) +# define ARCHITECTURE_ID "TriCore" + +# elif defined(__CMCS__) +# define ARCHITECTURE_ID "MCS" + +# elif defined(__CARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__CARC__) +# define ARCHITECTURE_ID "ARC" + +# elif defined(__C51__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__CPCP__) +# define ARCHITECTURE_ID "PCP" + +# else +# define ARCHITECTURE_ID "" +# endif + +#else +# define ARCHITECTURE_ID +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number. */ +#ifdef COMPILER_VERSION +char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]"; + +/* Construct a string literal encoding the version number components. */ +#elif defined(COMPILER_VERSION_MAJOR) +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the internal version number. */ +#ifdef COMPILER_VERSION_INTERNAL +char const info_version_internal[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', + 'i','n','t','e','r','n','a','l','[', + COMPILER_VERSION_INTERNAL,']','\0'}; +#elif defined(COMPILER_VERSION_INTERNAL_STR) +char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]"; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + +#if defined(__INTEL_COMPILER) && defined(_MSVC_LANG) && _MSVC_LANG < 201403L +# if defined(__INTEL_CXX11_MODE__) +# if defined(__cpp_aggregate_nsdmi) +# define CXX_STD 201402L +# else +# define CXX_STD 201103L +# endif +# else +# define CXX_STD 199711L +# endif +#elif defined(_MSC_VER) && defined(_MSVC_LANG) +# define CXX_STD _MSVC_LANG +#else +# define CXX_STD __cplusplus +#endif + +const char* info_language_standard_default = "INFO" ":" "standard_default[" +#if CXX_STD > 202002L + "23" +#elif CXX_STD > 201703L + "20" +#elif CXX_STD >= 201703L + "17" +#elif CXX_STD >= 201402L + "14" +#elif CXX_STD >= 201103L + "11" +#else + "98" +#endif +"]"; + +const char* info_language_extensions_default = "INFO" ":" "extensions_default[" +#if (defined(__clang__) || defined(__GNUC__) || defined(__xlC__) || \ + defined(__TI_COMPILER_VERSION__)) && \ + !defined(__STRICT_ANSI__) + "ON" +#else + "OFF" +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +int main(int argc, char* argv[]) +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; + require += info_arch[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef COMPILER_VERSION_INTERNAL + require += info_version_internal[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) + require += info_cray[argc]; +#endif + require += info_language_standard_default[argc]; + require += info_language_extensions_default[argc]; + (void)argv; + return require; +} diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/a.out b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/a.out new file mode 100644 index 0000000000000000000000000000000000000000..6f7591430982e78f058bf3d0fb3528e454cf27eb Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/a.out differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeConfigureLog.yaml b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeConfigureLog.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a6a45a31a5f8f1bbe43dd1d60b69ed42c0950325 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeConfigureLog.yaml @@ -0,0 +1,615 @@ + +--- +events: + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineSystem.cmake:204 (message)" + - "CMakeLists.txt:3 (project)" + message: | + The system is: Linux - 6.1.85+ - x86_64 + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerId.cmake:17 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerId.cmake:64 (__determine_compiler_id_test)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCCompiler.cmake:123 (CMAKE_DETERMINE_COMPILER_ID)" + - "CMakeLists.txt:3 (project)" + message: | + Compiling the C compiler identification source file "CMakeCCompilerId.c" succeeded. + Compiler: /usr/bin/cc + Build flags: + Id flags: + + The output was: + 0 + + + Compilation of the C compiler identification source "CMakeCCompilerId.c" produced "a.out" + + The C compiler identification is GNU, found in: + /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdC/a.out + + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerId.cmake:17 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerId.cmake:64 (__determine_compiler_id_test)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCXXCompiler.cmake:126 (CMAKE_DETERMINE_COMPILER_ID)" + - "CMakeLists.txt:3 (project)" + message: | + Compiling the CXX compiler identification source file "CMakeCXXCompilerId.cpp" succeeded. + Compiler: /usr/bin/c++ + Build flags: ;-DVERSION_INFO=\\"0.0.1\\" + Id flags: + + The output was: + 0 + : warning: missing terminating " character + + + Compilation of the CXX compiler identification source "CMakeCXXCompilerId.cpp" produced "a.out" + + The CXX compiler identification is GNU, found in: + /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/3.26.4/CompilerIdCXX/a.out + + - + kind: "try_compile-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:57 (try_compile)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + checks: + - "Detecting C compiler ABI info" + directories: + source: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k" + binary: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k" + cmakeVariables: + CMAKE_C_FLAGS: "" + CMAKE_C_FLAGS_DEBUG: "-g" + CMAKE_EXE_LINKER_FLAGS: "" + buildResult: + variable: "CMAKE_C_ABI_COMPILED" + cached: true + stdout: | + Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k + + Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_cb2a3/fast && /usr/bin/gmake -f CMakeFiles/cmTC_cb2a3.dir/build.make CMakeFiles/cmTC_cb2a3.dir/build + gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k' + Building C object CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o + /usr/bin/cc -v -o CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -c /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompilerABI.c + Using built-in specs. + COLLECT_GCC=/usr/bin/cc + OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa + OFFLOAD_TARGET_DEFAULT=1 + Target: x86_64-linux-gnu + Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr,amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2 + Thread model: posix + Supported LTO compression algorithms: zlib zstd + gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) + COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/' + /usr/lib/gcc/x86_64-linux-gnu/11/cc1 -quiet -v -imultiarch x86_64-linux-gnu /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompilerABI.c -quiet -dumpdir CMakeFiles/cmTC_cb2a3.dir/ -dumpbase CMakeCCompilerABI.c.c -dumpbase-ext .c -mtune=generic -march=x86-64 -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccWQGUoU.s + GNU C17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu) + compiled by GNU C version 11.4.0, GMP version 6.2.1, MPFR version 4.1.0, MPC version 1.2.1, isl version isl-0.24-GMP + + GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 + ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu" + ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/include-fixed" + ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/../../../../x86_64-linux-gnu/include" + #include "..." search starts here: + #include <...> search starts here: + /usr/lib/gcc/x86_64-linux-gnu/11/include + /usr/local/include + /usr/include/x86_64-linux-gnu + /usr/include + End of search list. + GNU C17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu) + compiled by GNU C version 11.4.0, GMP version 6.2.1, MPFR version 4.1.0, MPC version 1.2.1, isl version isl-0.24-GMP + + GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 + Compiler executable checksum: 50eaa2331df977b8016186198deb2d18 + COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/' + as -v --64 -o CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o /tmp/ccWQGUoU.s + GNU assembler version 2.38 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.38 + COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/ + LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/ + COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.' + Linking C executable cmTC_cb2a3 + /usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_cb2a3.dir/link.txt --verbose=1 + /usr/bin/cc -v CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -o cmTC_cb2a3 + Using built-in specs. + COLLECT_GCC=/usr/bin/cc + COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper + OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa + OFFLOAD_TARGET_DEFAULT=1 + Target: x86_64-linux-gnu + Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr,amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2 + Thread model: posix + Supported LTO compression algorithms: zlib zstd + gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) + COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/ + LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/ + COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_cb2a3' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_cb2a3.' + /usr/lib/gcc/x86_64-linux-gnu/11/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper -plugin-opt=-fresolution=/tmp/cciPMsN6.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_cb2a3 /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/11 -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/local/cuda/lib64/stubs -L/usr/lib/gcc/x86_64-linux-gnu/11/../../.. CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -lgcc --push-state --as-needed -lgcc_s --pop-state -lc -lgcc --push-state --as-needed -lgcc_s --pop-state /usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o + COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_cb2a3' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_cb2a3.' + gmake[1]: Leaving directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k' + + exitCode: 0 + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:127 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + message: | + Parsed C implicit include dir info: rv=done + found start of include info + found start of implicit include info + add: [/usr/lib/gcc/x86_64-linux-gnu/11/include] + add: [/usr/local/include] + add: [/usr/include/x86_64-linux-gnu] + add: [/usr/include] + end of search list found + collapse include dir [/usr/lib/gcc/x86_64-linux-gnu/11/include] ==> [/usr/lib/gcc/x86_64-linux-gnu/11/include] + collapse include dir [/usr/local/include] ==> [/usr/local/include] + collapse include dir [/usr/include/x86_64-linux-gnu] ==> [/usr/include/x86_64-linux-gnu] + collapse include dir [/usr/include] ==> [/usr/include] + implicit include dirs: [/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include] + + + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:152 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + message: | + Parsed C implicit link information: + link line regex: [^( *|.*[/\\])(ld|CMAKE_LINK_STARTFILE-NOTFOUND|([^/\\]+-)?ld|collect2)[^/\\]*( |$)] + ignore line: [Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k] + ignore line: [] + ignore line: [Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_cb2a3/fast && /usr/bin/gmake -f CMakeFiles/cmTC_cb2a3.dir/build.make CMakeFiles/cmTC_cb2a3.dir/build] + ignore line: [gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-4A7j5k'] + ignore line: [Building C object CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o] + ignore line: [/usr/bin/cc -v -o CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -c /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompilerABI.c] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/cc] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2] + ignore line: [Thread model: posix] + ignore line: [Supported LTO compression algorithms: zlib zstd] + ignore line: [gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) ] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/'] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/11/cc1 -quiet -v -imultiarch x86_64-linux-gnu /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompilerABI.c -quiet -dumpdir CMakeFiles/cmTC_cb2a3.dir/ -dumpbase CMakeCCompilerABI.c.c -dumpbase-ext .c -mtune=generic -march=x86-64 -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccWQGUoU.s] + ignore line: [GNU C17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 11.4.0 GMP version 6.2.1 MPFR version 4.1.0 MPC version 1.2.1 isl version isl-0.24-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/include-fixed"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/../../../../x86_64-linux-gnu/include"] + ignore line: [#include "..." search starts here:] + ignore line: [#include <...> search starts here:] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/11/include] + ignore line: [ /usr/local/include] + ignore line: [ /usr/include/x86_64-linux-gnu] + ignore line: [ /usr/include] + ignore line: [End of search list.] + ignore line: [GNU C17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 11.4.0 GMP version 6.2.1 MPFR version 4.1.0 MPC version 1.2.1 isl version isl-0.24-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [Compiler executable checksum: 50eaa2331df977b8016186198deb2d18] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/'] + ignore line: [ as -v --64 -o CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o /tmp/ccWQGUoU.s] + ignore line: [GNU assembler version 2.38 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.38] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.'] + ignore line: [Linking C executable cmTC_cb2a3] + ignore line: [/usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_cb2a3.dir/link.txt --verbose=1] + ignore line: [/usr/bin/cc -v CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -o cmTC_cb2a3 ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/cc] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2] + ignore line: [Thread model: posix] + ignore line: [Supported LTO compression algorithms: zlib zstd] + ignore line: [gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_cb2a3' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_cb2a3.'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/11/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper -plugin-opt=-fresolution=/tmp/cciPMsN6.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_cb2a3 /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/11 -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/local/cuda/lib64/stubs -L/usr/lib/gcc/x86_64-linux-gnu/11/../../.. CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o -lgcc --push-state --as-needed -lgcc_s --pop-state -lc -lgcc --push-state --as-needed -lgcc_s --pop-state /usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/cciPMsN6.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-pie] ==> ignore + arg [-znow] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_cb2a3] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/local/cuda/lib64/stubs] ==> dir [/usr/local/cuda/lib64/stubs] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../..] + arg [CMakeFiles/cmTC_cb2a3.dir/CMakeCCompilerABI.c.o] ==> ignore + arg [-lgcc] ==> lib [gcc] + arg [--push-state] ==> ignore + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--pop-state] ==> ignore + arg [-lc] ==> lib [c] + arg [-lgcc] ==> lib [gcc] + arg [--push-state] ==> ignore + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--pop-state] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] ==> [/usr/lib/x86_64-linux-gnu/Scrt1.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] ==> [/usr/lib/x86_64-linux-gnu/crti.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] ==> [/usr/lib/x86_64-linux-gnu/crtn.o] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11] ==> [/usr/lib/gcc/x86_64-linux-gnu/11] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/local/cuda/lib64/stubs] ==> [/usr/local/cuda/lib64/stubs] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../..] ==> [/usr/lib] + implicit libs: [gcc;gcc_s;c;gcc;gcc_s] + implicit objs: [/usr/lib/x86_64-linux-gnu/Scrt1.o;/usr/lib/x86_64-linux-gnu/crti.o;/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o;/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o;/usr/lib/x86_64-linux-gnu/crtn.o] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib;/usr/local/cuda/lib64/stubs] + implicit fwks: [] + + + - + kind: "try_compile-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:57 (try_compile)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCXXCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + checks: + - "Detecting CXX compiler ABI info" + directories: + source: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR" + binary: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR" + cmakeVariables: + CMAKE_CXX_FLAGS: "-DVERSION_INFO=\\\"0.0.1\\\"" + CMAKE_CXX_FLAGS_DEBUG: "-g" + CMAKE_EXE_LINKER_FLAGS: "" + buildResult: + variable: "CMAKE_CXX_ABI_COMPILED" + cached: true + stdout: | + Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR + + Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_a7d3c/fast && /usr/bin/gmake -f CMakeFiles/cmTC_a7d3c.dir/build.make CMakeFiles/cmTC_a7d3c.dir/build + gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR' + Building CXX object CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o + /usr/bin/c++ -DVERSION_INFO=\\"0.0.1\\" -v -o CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -c /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompilerABI.cpp + Using built-in specs. + COLLECT_GCC=/usr/bin/c++ + OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa + OFFLOAD_TARGET_DEFAULT=1 + Target: x86_64-linux-gnu + Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr,amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2 + Thread model: posix + Supported LTO compression algorithms: zlib zstd + gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) + COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/' + /usr/lib/gcc/x86_64-linux-gnu/11/cc1plus -quiet -v -imultiarch x86_64-linux-gnu -D_GNU_SOURCE -D VERSION_INFO="0.0.1" /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompilerABI.cpp -quiet -dumpdir CMakeFiles/cmTC_a7d3c.dir/ -dumpbase CMakeCXXCompilerABI.cpp.cpp -dumpbase-ext .cpp -mtune=generic -march=x86-64 -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccPWMjx4.s + GNU C++17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu) + compiled by GNU C version 11.4.0, GMP version 6.2.1, MPFR version 4.1.0, MPC version 1.2.1, isl version isl-0.24-GMP + + GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 + ignoring duplicate directory "/usr/include/x86_64-linux-gnu/c++/11" + ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu" + ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/include-fixed" + ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/../../../../x86_64-linux-gnu/include" + #include "..." search starts here: + #include <...> search starts here: + /usr/include/c++/11 + /usr/include/x86_64-linux-gnu/c++/11 + /usr/include/c++/11/backward + /usr/lib/gcc/x86_64-linux-gnu/11/include + /usr/local/include + /usr/include/x86_64-linux-gnu + /usr/include + End of search list. + GNU C++17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu) + compiled by GNU C version 11.4.0, GMP version 6.2.1, MPFR version 4.1.0, MPC version 1.2.1, isl version isl-0.24-GMP + + GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 + Compiler executable checksum: d591828bb4d392ae8b7b160e5bb0b95f + COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/' + as -v --64 -o CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o /tmp/ccPWMjx4.s + GNU assembler version 2.38 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.38 + COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/ + LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/ + COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.' + Linking CXX executable cmTC_a7d3c + /usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_a7d3c.dir/link.txt --verbose=1 + /usr/bin/c++ -DVERSION_INFO=\\"0.0.1\\" -v CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_a7d3c + Using built-in specs. + COLLECT_GCC=/usr/bin/c++ + COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper + OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa + OFFLOAD_TARGET_DEFAULT=1 + Target: x86_64-linux-gnu + Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr,amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2 + Thread model: posix + Supported LTO compression algorithms: zlib zstd + gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) + COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/ + LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/ + COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'cmTC_a7d3c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_a7d3c.' + /usr/lib/gcc/x86_64-linux-gnu/11/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper -plugin-opt=-fresolution=/tmp/ccpVEDpW.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_a7d3c /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/11 -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/local/cuda/lib64/stubs -L/usr/lib/gcc/x86_64-linux-gnu/11/../../.. CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o + COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'cmTC_a7d3c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_a7d3c.' + gmake[1]: Leaving directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR' + + exitCode: 0 + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:127 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCXXCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + message: | + Parsed CXX implicit include dir info: rv=done + found start of include info + found start of implicit include info + add: [/usr/include/c++/11] + add: [/usr/include/x86_64-linux-gnu/c++/11] + add: [/usr/include/c++/11/backward] + add: [/usr/lib/gcc/x86_64-linux-gnu/11/include] + add: [/usr/local/include] + add: [/usr/include/x86_64-linux-gnu] + add: [/usr/include] + end of search list found + collapse include dir [/usr/include/c++/11] ==> [/usr/include/c++/11] + collapse include dir [/usr/include/x86_64-linux-gnu/c++/11] ==> [/usr/include/x86_64-linux-gnu/c++/11] + collapse include dir [/usr/include/c++/11/backward] ==> [/usr/include/c++/11/backward] + collapse include dir [/usr/lib/gcc/x86_64-linux-gnu/11/include] ==> [/usr/lib/gcc/x86_64-linux-gnu/11/include] + collapse include dir [/usr/local/include] ==> [/usr/local/include] + collapse include dir [/usr/include/x86_64-linux-gnu] ==> [/usr/include/x86_64-linux-gnu] + collapse include dir [/usr/include] ==> [/usr/include] + implicit include dirs: [/usr/include/c++/11;/usr/include/x86_64-linux-gnu/c++/11;/usr/include/c++/11/backward;/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include] + + + - + kind: "message-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake:152 (message)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCXXCompiler.cmake:26 (CMAKE_DETERMINE_COMPILER_ABI)" + - "CMakeLists.txt:3 (project)" + message: | + Parsed CXX implicit link information: + link line regex: [^( *|.*[/\\])(ld|CMAKE_LINK_STARTFILE-NOTFOUND|([^/\\]+-)?ld|collect2)[^/\\]*( |$)] + ignore line: [Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR] + ignore line: [] + ignore line: [Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_a7d3c/fast && /usr/bin/gmake -f CMakeFiles/cmTC_a7d3c.dir/build.make CMakeFiles/cmTC_a7d3c.dir/build] + ignore line: [gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-qdQRVR'] + ignore line: [Building CXX object CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o] + ignore line: [/usr/bin/c++ -DVERSION_INFO=\\"0.0.1\\" -v -o CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -c /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompilerABI.cpp] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/c++] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2] + ignore line: [Thread model: posix] + ignore line: [Supported LTO compression algorithms: zlib zstd] + ignore line: [gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) ] + ignore line: [COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/'] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/11/cc1plus -quiet -v -imultiarch x86_64-linux-gnu -D_GNU_SOURCE -D VERSION_INFO="0.0.1" /usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompilerABI.cpp -quiet -dumpdir CMakeFiles/cmTC_a7d3c.dir/ -dumpbase CMakeCXXCompilerABI.cpp.cpp -dumpbase-ext .cpp -mtune=generic -march=x86-64 -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccPWMjx4.s] + ignore line: [GNU C++17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 11.4.0 GMP version 6.2.1 MPFR version 4.1.0 MPC version 1.2.1 isl version isl-0.24-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [ignoring duplicate directory "/usr/include/x86_64-linux-gnu/c++/11"] + ignore line: [ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/include-fixed"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/11/../../../../x86_64-linux-gnu/include"] + ignore line: [#include "..." search starts here:] + ignore line: [#include <...> search starts here:] + ignore line: [ /usr/include/c++/11] + ignore line: [ /usr/include/x86_64-linux-gnu/c++/11] + ignore line: [ /usr/include/c++/11/backward] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/11/include] + ignore line: [ /usr/local/include] + ignore line: [ /usr/include/x86_64-linux-gnu] + ignore line: [ /usr/include] + ignore line: [End of search list.] + ignore line: [GNU C++17 (Ubuntu 11.4.0-1ubuntu1~22.04) version 11.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 11.4.0 GMP version 6.2.1 MPFR version 4.1.0 MPC version 1.2.1 isl version isl-0.24-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [Compiler executable checksum: d591828bb4d392ae8b7b160e5bb0b95f] + ignore line: [COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/'] + ignore line: [ as -v --64 -o CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o /tmp/ccPWMjx4.s] + ignore line: [GNU assembler version 2.38 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.38] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.'] + ignore line: [Linking CXX executable cmTC_a7d3c] + ignore line: [/usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_a7d3c.dir/link.txt --verbose=1] + ignore line: [/usr/bin/c++ -DVERSION_INFO=\\"0.0.1\\" -v CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_a7d3c ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/c++] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:amdgcn-amdhsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 11.4.0-1ubuntu1~22.04' --with-bugurl=file:///usr/share/doc/gcc-11/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ m2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-11 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-bootstrap --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --enable-libphobos-checking=release --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --enable-cet --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-nvptx/usr amdgcn-amdhsa=/build/gcc-11-XeT9lY/gcc-11-11.4.0/debian/tmp-gcn/usr --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu --with-build-config=bootstrap-lto-lean --enable-link-serialization=2] + ignore line: [Thread model: posix] + ignore line: [Supported LTO compression algorithms: zlib zstd] + ignore line: [gcc version 11.4.0 (Ubuntu 11.4.0-1ubuntu1~22.04) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/11/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/local/cuda/lib64/stubs/:/usr/lib/gcc/x86_64-linux-gnu/11/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-D' 'VERSION_INFO="0.0.1"' '-v' '-o' 'cmTC_a7d3c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' '-dumpdir' 'cmTC_a7d3c.'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/11/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper -plugin-opt=-fresolution=/tmp/ccpVEDpW.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_a7d3c /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/11 -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/local/cuda/lib64/stubs -L/usr/lib/gcc/x86_64-linux-gnu/11/../../.. CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/11/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/11/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/ccpVEDpW.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-pie] ==> ignore + arg [-znow] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_a7d3c] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/local/cuda/lib64/stubs] ==> dir [/usr/local/cuda/lib64/stubs] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/11/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../..] + arg [CMakeFiles/cmTC_a7d3c.dir/CMakeCXXCompilerABI.cpp.o] ==> ignore + arg [-lstdc++] ==> lib [stdc++] + arg [-lm] ==> lib [m] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [-lc] ==> lib [c] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/Scrt1.o] ==> [/usr/lib/x86_64-linux-gnu/Scrt1.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crti.o] ==> [/usr/lib/x86_64-linux-gnu/crti.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu/crtn.o] ==> [/usr/lib/x86_64-linux-gnu/crtn.o] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11] ==> [/usr/lib/gcc/x86_64-linux-gnu/11] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/local/cuda/lib64/stubs] ==> [/usr/local/cuda/lib64/stubs] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/11/../../..] ==> [/usr/lib] + implicit libs: [stdc++;m;gcc_s;gcc;c;gcc_s;gcc] + implicit objs: [/usr/lib/x86_64-linux-gnu/Scrt1.o;/usr/lib/x86_64-linux-gnu/crti.o;/usr/lib/gcc/x86_64-linux-gnu/11/crtbeginS.o;/usr/lib/gcc/x86_64-linux-gnu/11/crtendS.o;/usr/lib/x86_64-linux-gnu/crtn.o] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib;/usr/local/cuda/lib64/stubs] + implicit fwks: [] + + + - + kind: "try_compile-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckSourceCompiles.cmake:101 (try_compile)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckCompilerFlag.cmake:18 (cmake_check_source_compiles)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CheckCXXCompilerFlag.cmake:40 (cmake_check_compiler_flag)" + - "pybind11/tools/pybind11Common.cmake:194 (check_cxx_compiler_flag)" + - "pybind11/tools/pybind11Common.cmake:223 (_pybind11_return_if_cxx_and_linker_flags_work)" + - "pybind11/tools/pybind11Common.cmake:277 (_pybind11_generate_lto)" + - "pybind11/CMakeLists.txt:162 (include)" + checks: + - "Performing Test HAS_FLTO" + directories: + source: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs" + binary: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs" + cmakeVariables: + CMAKE_CXX_FLAGS: "-DVERSION_INFO=\\\"0.0.1\\\"" + CMAKE_CXX_FLAGS_DEBUG: "-g" + CMAKE_EXE_LINKER_FLAGS: "" + CMAKE_MODULE_PATH: "/content/Word-As-Image/diffvg/cmake/" + buildResult: + variable: "HAS_FLTO" + cached: true + stdout: | + Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs + + Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_f9813/fast && /usr/bin/gmake -f CMakeFiles/cmTC_f9813.dir/build.make CMakeFiles/cmTC_f9813.dir/build + gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs' + Building CXX object CMakeFiles/cmTC_f9813.dir/src.cxx.o + /usr/bin/c++ -DHAS_FLTO -DVERSION_INFO=\\"0.0.1\\" -flto -fno-fat-lto-objects -o CMakeFiles/cmTC_f9813.dir/src.cxx.o -c /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs/src.cxx + Linking CXX executable cmTC_f9813 + /usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_f9813.dir/link.txt --verbose=1 + /usr/bin/c++ -DVERSION_INFO=\\"0.0.1\\" CMakeFiles/cmTC_f9813.dir/src.cxx.o -o cmTC_f9813 -flto + gmake[1]: Leaving directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-RuStvs' + + exitCode: 0 + - + kind: "try_compile-v1" + backtrace: + - "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckSourceCompiles.cmake:101 (try_compile)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/CheckCSourceCompiles.cmake:76 (cmake_check_source_compiles)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/FindThreads.cmake:97 (CHECK_C_SOURCE_COMPILES)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/FindThreads.cmake:163 (_threads_check_libc)" + - "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA.cmake:1049 (find_package)" + - "CMakeLists.txt:19 (find_package)" + checks: + - "Performing Test CMAKE_HAVE_LIBC_PTHREAD" + directories: + source: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj" + binary: "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj" + cmakeVariables: + CMAKE_C_FLAGS: "-fPIC" + CMAKE_C_FLAGS_DEBUG: "-g" + CMAKE_EXE_LINKER_FLAGS: "" + CMAKE_MODULE_PATH: "/content/Word-As-Image/diffvg/cmake/" + buildResult: + variable: "CMAKE_HAVE_LIBC_PTHREAD" + cached: true + stdout: | + Change Dir: /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj + + Run Build Command(s):/usr/local/envs/word/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_df3fe/fast && /usr/bin/gmake -f CMakeFiles/cmTC_df3fe.dir/build.make CMakeFiles/cmTC_df3fe.dir/build + gmake[1]: Entering directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj' + Building C object CMakeFiles/cmTC_df3fe.dir/src.c.o + /usr/bin/cc -DCMAKE_HAVE_LIBC_PTHREAD -fPIC -o CMakeFiles/cmTC_df3fe.dir/src.c.o -c /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj/src.c + Linking C executable cmTC_df3fe + /usr/local/envs/word/bin/cmake -E cmake_link_script CMakeFiles/cmTC_df3fe.dir/link.txt --verbose=1 + /usr/bin/cc -fPIC CMakeFiles/cmTC_df3fe.dir/src.c.o -o cmTC_df3fe + gmake[1]: Leaving directory '/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeScratch/TryCompile-J68Npj' + + exitCode: 0 +... diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeDirectoryInformation.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeDirectoryInformation.cmake new file mode 100644 index 0000000000000000000000000000000000000000..41d6fe421900fe4929471461b0f4d68fea43d19b --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeDirectoryInformation.cmake @@ -0,0 +1,16 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Relative path conversion top directories. +set(CMAKE_RELATIVE_PATH_TOP_SOURCE "/content/Word-As-Image/diffvg") +set(CMAKE_RELATIVE_PATH_TOP_BINARY "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38") + +# Force unix paths in dependencies. +set(CMAKE_FORCE_UNIX_PATHS 1) + + +# The C and CXX include file regular expressions for this directory. +set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$") +set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$") +set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN}) +set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN}) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeRuleHashes.txt b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeRuleHashes.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4657b69ed6b06d8b822f1ac14c43cd03b72ea7a --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/CMakeRuleHashes.txt @@ -0,0 +1,3 @@ +# Hashes of file build rules. +308f31345bf710c0c1842552d83d0960 CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o +ad284927d3b40ac5f2c59cf71a108822 CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile.cmake new file mode 100644 index 0000000000000000000000000000000000000000..2e1db43c66f26a09a1d639ea9804aaaae0ffaffb --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile.cmake @@ -0,0 +1,164 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# The generator used is: +set(CMAKE_DEPENDS_GENERATOR "Unix Makefiles") + +# The top level Makefile was generated from the following files: +set(CMAKE_MAKEFILE_DEPENDS + "CMakeCache.txt" + "/content/Word-As-Image/diffvg/CMakeLists.txt" + "CMakeFiles/3.26.4/CMakeCCompiler.cmake" + "CMakeFiles/3.26.4/CMakeCXXCompiler.cmake" + "CMakeFiles/3.26.4/CMakeSystem.cmake" + "CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.cmake.pre-gen" + "CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.depend" + "CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.cmake.pre-gen" + "CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.depend" + "/content/Word-As-Image/diffvg/cmake/FindTensorFlow.cmake" + "/content/Word-As-Image/diffvg/pybind11/CMakeLists.txt" + "/content/Word-As-Image/diffvg/pybind11/tools/pybind11Common.cmake" + "/content/Word-As-Image/diffvg/pybind11/tools/pybind11NewTools.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompiler.cmake.in" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCCompilerABI.c" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCInformation.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompiler.cmake.in" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXCompilerABI.cpp" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCXXInformation.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCheckCompilerFlagCommonPatterns.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCommonLanguageInclude.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeCompilerIdDetection.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDependentOption.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCXXCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompileFeatures.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerABI.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineCompilerId.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeDetermineSystem.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeFindBinUtils.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeFindFrameworks.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeGenericSystem.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeInitializeConfigs.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeLanguageInformation.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakePackageConfigHelpers.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeParseImplicitIncludeInfo.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeParseImplicitLinkInfo.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeParseLibraryArchitecture.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeSystem.cmake.in" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeSystemSpecificInformation.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeSystemSpecificInitialize.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCXXCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeTestCompilerCommon.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CMakeUnixFindMake.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CheckCSourceCompiles.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CheckCXXCompilerFlag.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CheckCXXSourceCompiles.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CheckIncludeFile.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/CheckLibraryExists.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/ADSP-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/ARMCC-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/ARMClang-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/AppleClang-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Borland-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Bruce-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/CMakeCommonCompilerMacros.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Clang-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Clang-DetermineCompilerInternal.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Comeau-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Compaq-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Compaq-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Cray-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Embarcadero-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Fujitsu-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/FujitsuClang-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GHS-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU-C.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU-CXX.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU-FindBinUtils.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/GNU.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/HP-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/HP-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IAR-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IBMCPP-C-DetermineVersionInternal.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IBMCPP-CXX-DetermineVersionInternal.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IBMClang-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IBMClang-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Intel-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/IntelLLVM-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/LCC-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/LCC-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/MSVC-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/NVHPC-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/NVIDIA-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/OpenWatcom-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/PGI-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/PathScale-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/SCO-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/SDCC-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/SunPro-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/SunPro-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/TI-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Tasking-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/TinyCC-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/VisualAge-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/VisualAge-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/Watcom-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/XL-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/XL-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/XLClang-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/XLClang-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/zOS-C-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Compiler/zOS-CXX-DetermineCompiler.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/run_nvcc.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/select_compute_arch.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindPackageHandleStandardArgs.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindPackageMessage.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindPython.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindPython/Support.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindPythonLibs.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/FindThreads.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/GNUInstallDirs.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckCompilerFlag.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckFlagCommonConfig.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/CheckSourceCompiles.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Internal/FeatureTesting.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/Linux-Determine-CXX.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/Linux-GNU-C.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/Linux-GNU-CXX.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/Linux-GNU.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/Linux.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/Platform/UnixPaths.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/SelectLibraryConfigurations.cmake" + "/usr/local/envs/word/share/cmake-3.26/Modules/WriteBasicConfigVersionFile.cmake" + ) + +# The corresponding makefile is: +set(CMAKE_MAKEFILE_OUTPUTS + "Makefile" + "CMakeFiles/cmake.check_cache" + ) + +# Byproducts of CMake generate step: +set(CMAKE_MAKEFILE_PRODUCTS + "CMakeFiles/3.26.4/CMakeSystem.cmake" + "CMakeFiles/3.26.4/CMakeCCompiler.cmake" + "CMakeFiles/3.26.4/CMakeCXXCompiler.cmake" + "CMakeFiles/3.26.4/CMakeCCompiler.cmake" + "CMakeFiles/3.26.4/CMakeCXXCompiler.cmake" + "CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.cmake.pre-gen" + "CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.cmake.pre-gen" + "CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.Release.cmake" + "CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.Release.cmake" + "CMakeFiles/CMakeDirectoryInformation.cmake" + "pybind11/CMakeFiles/CMakeDirectoryInformation.cmake" + ) + +# Dependency information for all targets: +set(CMAKE_DEPEND_INFO_FILES + "CMakeFiles/diffvg.dir/DependInfo.cmake" + ) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile2 b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile2 new file mode 100644 index 0000000000000000000000000000000000000000..ed7f323ae29f518ab2d423d72ca0b185d3b4c037 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/Makefile2 @@ -0,0 +1,129 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Default target executed when no arguments are given to make. +default_target: all +.PHONY : default_target + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + +# Disable VCS-based implicit rules. +% : %,v + +# Disable VCS-based implicit rules. +% : RCS/% + +# Disable VCS-based implicit rules. +% : RCS/%,v + +# Disable VCS-based implicit rules. +% : SCCS/s.% + +# Disable VCS-based implicit rules. +% : s.% + +.SUFFIXES: .hpux_make_needs_suffix_list + +# Command-line flag to silence nested $(MAKE). +$(VERBOSE)MAKESILENT = -s + +#Suppress display of executed commands. +$(VERBOSE).SILENT: + +# A target that is always out of date. +cmake_force: +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/local/envs/word/bin/cmake + +# The command to remove a file. +RM = /usr/local/envs/word/bin/cmake -E rm -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /content/Word-As-Image/diffvg + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 + +#============================================================================= +# Directory level rules for the build root directory + +# The main recursive "all" target. +all: CMakeFiles/diffvg.dir/all +all: pybind11/all +.PHONY : all + +# The main recursive "preinstall" target. +preinstall: pybind11/preinstall +.PHONY : preinstall + +# The main recursive "clean" target. +clean: CMakeFiles/diffvg.dir/clean +clean: pybind11/clean +.PHONY : clean + +#============================================================================= +# Directory level rules for directory pybind11 + +# Recursive "all" directory target. +pybind11/all: +.PHONY : pybind11/all + +# Recursive "preinstall" directory target. +pybind11/preinstall: +.PHONY : pybind11/preinstall + +# Recursive "clean" directory target. +pybind11/clean: +.PHONY : pybind11/clean + +#============================================================================= +# Target rules for target CMakeFiles/diffvg.dir + +# All Build rule for target. +CMakeFiles/diffvg.dir/all: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/depend + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/build + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=1,2,3,4,5,6,7 "Built target diffvg" +.PHONY : CMakeFiles/diffvg.dir/all + +# Build rule for subdir invocation for target. +CMakeFiles/diffvg.dir/rule: cmake_check_build_system + $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles 7 + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 CMakeFiles/diffvg.dir/all + $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles 0 +.PHONY : CMakeFiles/diffvg.dir/rule + +# Convenience name for target. +diffvg: CMakeFiles/diffvg.dir/rule +.PHONY : diffvg + +# clean rule for target. +CMakeFiles/diffvg.dir/clean: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/clean +.PHONY : CMakeFiles/diffvg.dir/clean + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/TargetDirectories.txt b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/TargetDirectories.txt new file mode 100644 index 0000000000000000000000000000000000000000..c23a0b1efe5b4e393fc75aed9a271a0a279da683 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/TargetDirectories.txt @@ -0,0 +1,5 @@ +/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir +/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/edit_cache.dir +/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/rebuild_cache.dir +/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/edit_cache.dir +/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/rebuild_cache.dir diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/cmake.check_cache b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/cmake.check_cache new file mode 100644 index 0000000000000000000000000000000000000000..3dccd731726d7faa8b29d8d7dba3b981a53ca497 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/cmake.check_cache @@ -0,0 +1 @@ +# This file is generated by cmake for dependency checking of the CMakeCache.txt file diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/DependInfo.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/DependInfo.cmake new file mode 100644 index 0000000000000000000000000000000000000000..86fbc53215db51ef90a0dd57fcb9c93ad4f3a3bc --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/DependInfo.cmake @@ -0,0 +1,22 @@ + +# Consider dependencies only in project. +set(CMAKE_DEPENDS_IN_PROJECT_ONLY OFF) + +# The set of languages for which implicit dependencies are needed: +set(CMAKE_DEPENDS_LANGUAGES + ) + +# The set of dependency files which are needed: +set(CMAKE_DEPENDS_DEPENDENCY_FILES + "/content/Word-As-Image/diffvg/atomic.cpp" "CMakeFiles/diffvg.dir/atomic.cpp.o" "gcc" "CMakeFiles/diffvg.dir/atomic.cpp.o.d" + "/content/Word-As-Image/diffvg/color.cpp" "CMakeFiles/diffvg.dir/color.cpp.o" "gcc" "CMakeFiles/diffvg.dir/color.cpp.o.d" + "/content/Word-As-Image/diffvg/parallel.cpp" "CMakeFiles/diffvg.dir/parallel.cpp.o" "gcc" "CMakeFiles/diffvg.dir/parallel.cpp.o.d" + "/content/Word-As-Image/diffvg/shape.cpp" "CMakeFiles/diffvg.dir/shape.cpp.o" "gcc" "CMakeFiles/diffvg.dir/shape.cpp.o.d" + ) + +# Targets to which this target links which contain Fortran sources. +set(CMAKE_Fortran_TARGET_LINKED_INFO_FILES + ) + +# Fortran module output directory. +set(CMAKE_Fortran_TARGET_MODULE_DIR "") diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..ca35bc1b33573e47576bf49de2e590808e981f76 Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o.d b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o.d new file mode 100644 index 0000000000000000000000000000000000000000..47383f898b5620a93d5b7f287b10d66f309982e3 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/atomic.cpp.o.d @@ -0,0 +1,2 @@ +CMakeFiles/diffvg.dir/atomic.cpp.o: \ + /content/Word-As-Image/diffvg/atomic.cpp /usr/include/stdc-predef.h diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/build.make b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/build.make new file mode 100644 index 0000000000000000000000000000000000000000..08dee3907fd49dd315586f7f5ccdd56173a7cb04 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/build.make @@ -0,0 +1,179 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Delete rule output on recipe failure. +.DELETE_ON_ERROR: + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + +# Disable VCS-based implicit rules. +% : %,v + +# Disable VCS-based implicit rules. +% : RCS/% + +# Disable VCS-based implicit rules. +% : RCS/%,v + +# Disable VCS-based implicit rules. +% : SCCS/s.% + +# Disable VCS-based implicit rules. +% : s.% + +.SUFFIXES: .hpux_make_needs_suffix_list + +# Command-line flag to silence nested $(MAKE). +$(VERBOSE)MAKESILENT = -s + +#Suppress display of executed commands. +$(VERBOSE).SILENT: + +# A target that is always out of date. +cmake_force: +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/local/envs/word/bin/cmake + +# The command to remove a file. +RM = /usr/local/envs/word/bin/cmake -E rm -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /content/Word-As-Image/diffvg + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 + +# Include any dependencies generated for this target. +include CMakeFiles/diffvg.dir/depend.make +# Include any dependencies generated by the compiler for this target. +include CMakeFiles/diffvg.dir/compiler_depend.make + +# Include the progress variables for this target. +include CMakeFiles/diffvg.dir/progress.make + +# Include the compile flags for this target's objects. +include CMakeFiles/diffvg.dir/flags.make + +CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o: /content/Word-As-Image/diffvg/diffvg.cpp +CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o: CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.depend +CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o: CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.Release.cmake + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --blue --bold --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building NVCC (Device) object CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o" + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir && /usr/local/envs/word/bin/cmake -E make_directory /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//. + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir && /usr/local/envs/word/bin/cmake -D verbose:BOOL=$(VERBOSE) -D build_configuration:STRING=Release -D generated_file:STRING=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o -D generated_cubin_file:STRING=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o.cubin.txt -P /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.Release.cmake + +CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o: /content/Word-As-Image/diffvg/scene.cpp +CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o: CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.depend +CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o: CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.Release.cmake + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --blue --bold --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building NVCC (Device) object CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o" + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir && /usr/local/envs/word/bin/cmake -E make_directory /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//. + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir && /usr/local/envs/word/bin/cmake -D verbose:BOOL=$(VERBOSE) -D build_configuration:STRING=Release -D generated_file:STRING=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o -D generated_cubin_file:STRING=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o.cubin.txt -P /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.Release.cmake + +CMakeFiles/diffvg.dir/atomic.cpp.o: CMakeFiles/diffvg.dir/flags.make +CMakeFiles/diffvg.dir/atomic.cpp.o: /content/Word-As-Image/diffvg/atomic.cpp +CMakeFiles/diffvg.dir/atomic.cpp.o: CMakeFiles/diffvg.dir/compiler_depend.ts + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_3) "Building CXX object CMakeFiles/diffvg.dir/atomic.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -MD -MT CMakeFiles/diffvg.dir/atomic.cpp.o -MF CMakeFiles/diffvg.dir/atomic.cpp.o.d -o CMakeFiles/diffvg.dir/atomic.cpp.o -c /content/Word-As-Image/diffvg/atomic.cpp + +CMakeFiles/diffvg.dir/atomic.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/diffvg.dir/atomic.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /content/Word-As-Image/diffvg/atomic.cpp > CMakeFiles/diffvg.dir/atomic.cpp.i + +CMakeFiles/diffvg.dir/atomic.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/diffvg.dir/atomic.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /content/Word-As-Image/diffvg/atomic.cpp -o CMakeFiles/diffvg.dir/atomic.cpp.s + +CMakeFiles/diffvg.dir/color.cpp.o: CMakeFiles/diffvg.dir/flags.make +CMakeFiles/diffvg.dir/color.cpp.o: /content/Word-As-Image/diffvg/color.cpp +CMakeFiles/diffvg.dir/color.cpp.o: CMakeFiles/diffvg.dir/compiler_depend.ts + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_4) "Building CXX object CMakeFiles/diffvg.dir/color.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -MD -MT CMakeFiles/diffvg.dir/color.cpp.o -MF CMakeFiles/diffvg.dir/color.cpp.o.d -o CMakeFiles/diffvg.dir/color.cpp.o -c /content/Word-As-Image/diffvg/color.cpp + +CMakeFiles/diffvg.dir/color.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/diffvg.dir/color.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /content/Word-As-Image/diffvg/color.cpp > CMakeFiles/diffvg.dir/color.cpp.i + +CMakeFiles/diffvg.dir/color.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/diffvg.dir/color.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /content/Word-As-Image/diffvg/color.cpp -o CMakeFiles/diffvg.dir/color.cpp.s + +CMakeFiles/diffvg.dir/parallel.cpp.o: CMakeFiles/diffvg.dir/flags.make +CMakeFiles/diffvg.dir/parallel.cpp.o: /content/Word-As-Image/diffvg/parallel.cpp +CMakeFiles/diffvg.dir/parallel.cpp.o: CMakeFiles/diffvg.dir/compiler_depend.ts + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_5) "Building CXX object CMakeFiles/diffvg.dir/parallel.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -MD -MT CMakeFiles/diffvg.dir/parallel.cpp.o -MF CMakeFiles/diffvg.dir/parallel.cpp.o.d -o CMakeFiles/diffvg.dir/parallel.cpp.o -c /content/Word-As-Image/diffvg/parallel.cpp + +CMakeFiles/diffvg.dir/parallel.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/diffvg.dir/parallel.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /content/Word-As-Image/diffvg/parallel.cpp > CMakeFiles/diffvg.dir/parallel.cpp.i + +CMakeFiles/diffvg.dir/parallel.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/diffvg.dir/parallel.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /content/Word-As-Image/diffvg/parallel.cpp -o CMakeFiles/diffvg.dir/parallel.cpp.s + +CMakeFiles/diffvg.dir/shape.cpp.o: CMakeFiles/diffvg.dir/flags.make +CMakeFiles/diffvg.dir/shape.cpp.o: /content/Word-As-Image/diffvg/shape.cpp +CMakeFiles/diffvg.dir/shape.cpp.o: CMakeFiles/diffvg.dir/compiler_depend.ts + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_6) "Building CXX object CMakeFiles/diffvg.dir/shape.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -MD -MT CMakeFiles/diffvg.dir/shape.cpp.o -MF CMakeFiles/diffvg.dir/shape.cpp.o.d -o CMakeFiles/diffvg.dir/shape.cpp.o -c /content/Word-As-Image/diffvg/shape.cpp + +CMakeFiles/diffvg.dir/shape.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/diffvg.dir/shape.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /content/Word-As-Image/diffvg/shape.cpp > CMakeFiles/diffvg.dir/shape.cpp.i + +CMakeFiles/diffvg.dir/shape.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/diffvg.dir/shape.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /content/Word-As-Image/diffvg/shape.cpp -o CMakeFiles/diffvg.dir/shape.cpp.s + +# Object files for target diffvg +diffvg_OBJECTS = \ +"CMakeFiles/diffvg.dir/atomic.cpp.o" \ +"CMakeFiles/diffvg.dir/color.cpp.o" \ +"CMakeFiles/diffvg.dir/parallel.cpp.o" \ +"CMakeFiles/diffvg.dir/shape.cpp.o" + +# External object files for target diffvg +diffvg_EXTERNAL_OBJECTS = \ +"/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o" \ +"/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o" + +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/atomic.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/color.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/parallel.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/shape.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/build.make +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: /usr/local/cuda/lib64/libcudart_static.a +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: /usr/lib/x86_64-linux-gnu/librt.a +/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so: CMakeFiles/diffvg.dir/link.txt + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles --progress-num=$(CMAKE_PROGRESS_7) "Linking CXX shared module /content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so" + $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/diffvg.dir/link.txt --verbose=$(VERBOSE) + +# Rule to build all files generated by this target. +CMakeFiles/diffvg.dir/build: /content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so +.PHONY : CMakeFiles/diffvg.dir/build + +CMakeFiles/diffvg.dir/clean: + $(CMAKE_COMMAND) -P CMakeFiles/diffvg.dir/cmake_clean.cmake +.PHONY : CMakeFiles/diffvg.dir/clean + +CMakeFiles/diffvg.dir/depend: CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o +CMakeFiles/diffvg.dir/depend: CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(CMAKE_COMMAND) -E cmake_depends "Unix Makefiles" /content/Word-As-Image/diffvg /content/Word-As-Image/diffvg /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/DependInfo.cmake --color=$(COLOR) +.PHONY : CMakeFiles/diffvg.dir/depend + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/cmake_clean.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/cmake_clean.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d4cd91292eda7c8d0381e2db768d9fad44d9b5fb --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/cmake_clean.cmake @@ -0,0 +1,19 @@ +file(REMOVE_RECURSE + "/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.pdb" + "/content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so" + "CMakeFiles/diffvg.dir/atomic.cpp.o" + "CMakeFiles/diffvg.dir/atomic.cpp.o.d" + "CMakeFiles/diffvg.dir/color.cpp.o" + "CMakeFiles/diffvg.dir/color.cpp.o.d" + "CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o" + "CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o" + "CMakeFiles/diffvg.dir/parallel.cpp.o" + "CMakeFiles/diffvg.dir/parallel.cpp.o.d" + "CMakeFiles/diffvg.dir/shape.cpp.o" + "CMakeFiles/diffvg.dir/shape.cpp.o.d" +) + +# Per-language clean rules from dependency scanning. +foreach(lang CXX) + include(CMakeFiles/diffvg.dir/cmake_clean_${lang}.cmake OPTIONAL) +endforeach() diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..f83d8f443696ef3747b48ec1bf09df5cb2a23937 Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o.d b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o.d new file mode 100644 index 0000000000000000000000000000000000000000..24707b117659ea3761430183bf25c77f1efeef3d --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/color.cpp.o.d @@ -0,0 +1,166 @@ +CMakeFiles/diffvg.dir/color.cpp.o: \ + /content/Word-As-Image/diffvg/color.cpp /usr/include/stdc-predef.h \ + /content/Word-As-Image/diffvg/color.h \ + /content/Word-As-Image/diffvg/diffvg.h /usr/include/c++/11/cmath \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++config.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/os_defines.h \ + /usr/include/features.h /usr/include/features-time64.h \ + /usr/include/x86_64-linux-gnu/bits/wordsize.h \ + /usr/include/x86_64-linux-gnu/bits/timesize.h \ + /usr/include/x86_64-linux-gnu/sys/cdefs.h \ + /usr/include/x86_64-linux-gnu/bits/long-double.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/cpu_defines.h \ + /usr/include/c++/11/bits/cpp_type_traits.h \ + /usr/include/c++/11/ext/type_traits.h /usr/include/math.h \ + /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \ + /usr/include/x86_64-linux-gnu/bits/types.h \ + /usr/include/x86_64-linux-gnu/bits/typesizes.h \ + /usr/include/x86_64-linux-gnu/bits/time64.h \ + /usr/include/x86_64-linux-gnu/bits/math-vector.h \ + /usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h \ + /usr/include/x86_64-linux-gnu/bits/floatn.h \ + /usr/include/x86_64-linux-gnu/bits/floatn-common.h \ + /usr/include/x86_64-linux-gnu/bits/flt-eval-method.h \ + /usr/include/x86_64-linux-gnu/bits/fp-logb.h \ + /usr/include/x86_64-linux-gnu/bits/fp-fast.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-narrow.h \ + /usr/include/x86_64-linux-gnu/bits/iscanonical.h \ + /usr/include/c++/11/bits/std_abs.h /usr/include/stdlib.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h \ + /usr/include/x86_64-linux-gnu/bits/waitflags.h \ + /usr/include/x86_64-linux-gnu/bits/waitstatus.h \ + /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \ + /usr/include/x86_64-linux-gnu/sys/types.h \ + /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/time_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-intn.h /usr/include/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endianness.h \ + /usr/include/x86_64-linux-gnu/bits/byteswap.h \ + /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \ + /usr/include/x86_64-linux-gnu/sys/select.h \ + /usr/include/x86_64-linux-gnu/bits/select.h \ + /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \ + /usr/include/x86_64-linux-gnu/bits/select2.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \ + /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \ + /usr/include/x86_64-linux-gnu/bits/atomic_wide_counter.h \ + /usr/include/x86_64-linux-gnu/bits/struct_mutex.h \ + /usr/include/x86_64-linux-gnu/bits/struct_rwlock.h /usr/include/alloca.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib.h /usr/include/c++/11/cstdint \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdint.h /usr/include/stdint.h \ + /usr/include/x86_64-linux-gnu/bits/wchar.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \ + /usr/include/c++/11/atomic /usr/include/c++/11/bits/atomic_base.h \ + /usr/include/c++/11/bits/atomic_lockfree_defines.h \ + /usr/include/c++/11/bits/move.h /usr/include/c++/11/type_traits \ + /content/Word-As-Image/diffvg/vector.h /usr/include/c++/11/iostream \ + /usr/include/c++/11/ostream /usr/include/c++/11/ios \ + /usr/include/c++/11/iosfwd /usr/include/c++/11/bits/stringfwd.h \ + /usr/include/c++/11/bits/memoryfwd.h /usr/include/c++/11/bits/postypes.h \ + /usr/include/c++/11/cwchar /usr/include/wchar.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdarg.h \ + /usr/include/x86_64-linux-gnu/bits/types/wint_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/FILE.h \ + /usr/include/x86_64-linux-gnu/bits/wchar2.h \ + /usr/include/c++/11/exception /usr/include/c++/11/bits/exception.h \ + /usr/include/c++/11/bits/exception_ptr.h \ + /usr/include/c++/11/bits/exception_defines.h \ + /usr/include/c++/11/bits/cxxabi_init_exception.h \ + /usr/include/c++/11/typeinfo /usr/include/c++/11/bits/hash_bytes.h \ + /usr/include/c++/11/new /usr/include/c++/11/bits/nested_exception.h \ + /usr/include/c++/11/bits/char_traits.h \ + /usr/include/c++/11/bits/stl_algobase.h \ + /usr/include/c++/11/bits/functexcept.h \ + /usr/include/c++/11/ext/numeric_traits.h \ + /usr/include/c++/11/bits/stl_pair.h \ + /usr/include/c++/11/bits/stl_iterator_base_types.h \ + /usr/include/c++/11/bits/stl_iterator_base_funcs.h \ + /usr/include/c++/11/bits/concept_check.h \ + /usr/include/c++/11/debug/assertions.h \ + /usr/include/c++/11/bits/stl_iterator.h \ + /usr/include/c++/11/bits/ptr_traits.h /usr/include/c++/11/debug/debug.h \ + /usr/include/c++/11/bits/predefined_ops.h \ + /usr/include/c++/11/bits/localefwd.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++locale.h \ + /usr/include/c++/11/clocale /usr/include/locale.h \ + /usr/include/x86_64-linux-gnu/bits/locale.h /usr/include/c++/11/cctype \ + /usr/include/ctype.h /usr/include/c++/11/bits/ios_base.h \ + /usr/include/c++/11/ext/atomicity.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr-default.h \ + /usr/include/pthread.h /usr/include/sched.h \ + /usr/include/x86_64-linux-gnu/bits/sched.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_sched_param.h \ + /usr/include/x86_64-linux-gnu/bits/cpu-set.h /usr/include/time.h \ + /usr/include/x86_64-linux-gnu/bits/time.h \ + /usr/include/x86_64-linux-gnu/bits/timex.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \ + /usr/include/x86_64-linux-gnu/bits/setjmp.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct___jmp_buf_tag.h \ + /usr/include/x86_64-linux-gnu/bits/pthread_stack_min-dynamic.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/atomic_word.h \ + /usr/include/x86_64-linux-gnu/sys/single_threaded.h \ + /usr/include/c++/11/bits/locale_classes.h /usr/include/c++/11/string \ + /usr/include/c++/11/bits/allocator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++allocator.h \ + /usr/include/c++/11/ext/new_allocator.h \ + /usr/include/c++/11/bits/ostream_insert.h \ + /usr/include/c++/11/bits/cxxabi_forced.h \ + /usr/include/c++/11/bits/stl_function.h \ + /usr/include/c++/11/backward/binders.h \ + /usr/include/c++/11/bits/range_access.h \ + /usr/include/c++/11/initializer_list \ + /usr/include/c++/11/bits/basic_string.h \ + /usr/include/c++/11/ext/alloc_traits.h \ + /usr/include/c++/11/bits/alloc_traits.h \ + /usr/include/c++/11/bits/stl_construct.h \ + /usr/include/c++/11/ext/string_conversions.h /usr/include/c++/11/cstdlib \ + /usr/include/c++/11/cstdio /usr/include/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \ + /usr/include/x86_64-linux-gnu/bits/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/stdio2.h /usr/include/c++/11/cerrno \ + /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \ + /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \ + /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \ + /usr/include/x86_64-linux-gnu/bits/types/error_t.h \ + /usr/include/c++/11/bits/charconv.h \ + /usr/include/c++/11/bits/functional_hash.h \ + /usr/include/c++/11/bits/basic_string.tcc \ + /usr/include/c++/11/bits/locale_classes.tcc \ + /usr/include/c++/11/system_error \ + /usr/include/x86_64-linux-gnu/c++/11/bits/error_constants.h \ + /usr/include/c++/11/stdexcept /usr/include/c++/11/streambuf \ + /usr/include/c++/11/bits/streambuf.tcc \ + /usr/include/c++/11/bits/basic_ios.h \ + /usr/include/c++/11/bits/locale_facets.h /usr/include/c++/11/cwctype \ + /usr/include/wctype.h /usr/include/x86_64-linux-gnu/bits/wctype-wchar.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_base.h \ + /usr/include/c++/11/bits/streambuf_iterator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_inline.h \ + /usr/include/c++/11/bits/locale_facets.tcc \ + /usr/include/c++/11/bits/basic_ios.tcc \ + /usr/include/c++/11/bits/ostream.tcc /usr/include/c++/11/istream \ + /usr/include/c++/11/bits/istream.tcc /content/Word-As-Image/diffvg/ptr.h \ + /usr/include/c++/11/cstddef diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.make b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.make new file mode 100644 index 0000000000000000000000000000000000000000..6cf3b66808a93c9cb3cb625ad419995c7d15b386 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.make @@ -0,0 +1,2 @@ +# Empty compiler generated dependencies file for diffvg. +# This may be replaced when dependencies are built. diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.ts b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.ts new file mode 100644 index 0000000000000000000000000000000000000000..66c45552a4324338dadb6652bcf96c2111dcff2a --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/compiler_depend.ts @@ -0,0 +1,2 @@ +# CMAKE generated file: DO NOT EDIT! +# Timestamp file for compiler generated dependencies management for diffvg. diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/depend.make b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/depend.make new file mode 100644 index 0000000000000000000000000000000000000000..a7261fb426884a47e7724e7017fb3e4a6745af3d --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/depend.make @@ -0,0 +1,2 @@ +# Empty dependencies file for diffvg. +# This may be replaced when dependencies are built. diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..d0149dbcd36f3f73cc80ddeab766e292e47ff782 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36e989e64589a9b991d794d5030342a4d584659f441c03e32de3b1b920cf137b +size 2991008 diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.Release.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.Release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..4476b26ef13b2fd985c9f2481f20eca215bf3844 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.Release.cmake @@ -0,0 +1,314 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "/usr/local/envs/word/bin/cmake") # path +set(source_file "/content/Word-As-Image/diffvg/diffvg.cpp") # path +set(NVCC_generated_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.NVCC-depend") # path +set(cmake_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.depend") # path +set(CUDA_make2cmake "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/make2cmake.cmake") # path +set(CUDA_parse_cubin "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/parse_cubin.cmake") # path +set(build_cubin OFF) # bool +set(CUDA_HOST_COMPILER "/usr/bin/cc") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//.") # path +set(generated_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o") # path +set(generated_cubin_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o.cubin.txt") # path + +set(CUDA_NVCC_EXECUTABLE "/usr/local/cuda/bin/nvcc") # path +set(CUDA_NVCC_FLAGS -std=c++11 ;; ) # list +# Build specific configuration flags +set(CUDA_NVCC_FLAGS_RELEASE ; ) +set(CUDA_NVCC_FLAGS_DEBUG ; ) +set(CUDA_NVCC_FLAGS_MINSIZEREL ; ) +set(CUDA_NVCC_FLAGS_RELWITHDEBINFO ; ) +set(nvcc_flags -m64;-Ddiffvg_EXPORTS) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[/usr/local/cuda/include;/usr/local/envs/word/include/python3.8;/usr/local/include/python3.10;/usr/local/include/python3.10;/content/Word-As-Image/diffvg/pybind11/include;/usr/local/cuda/include]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[COMPILE_WITH_CUDA]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "-c") # string +set(cuda_language_flag -x=cu) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +set(CMAKE_HOST_FLAGS -DVERSION_INFO=\"0.0.1\" -fPIC) +set(CMAKE_HOST_FLAGS_RELEASE -O3 -DNDEBUG) +set(CMAKE_HOST_FLAGS_DEBUG -g) +set(CMAKE_HOST_FLAGS_MINSIZEREL -Os -DNDEBUG) +set(CMAKE_HOST_FLAGS_RELWITHDEBINFO -O2 -g -DNDEBUG) + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, escape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION 12.2) +if(CUDA_VERSION VERSION_LESS "3.0") + # Note that this will remove all occurrences of -G. + list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G") +endif() + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.cmake.pre-gen b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.cmake.pre-gen new file mode 100644 index 0000000000000000000000000000000000000000..ed84750786a8cff38f6ae75a3d336979367e7150 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.cmake.pre-gen @@ -0,0 +1,314 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "/usr/local/envs/word/bin/cmake") # path +set(source_file "/content/Word-As-Image/diffvg/diffvg.cpp") # path +set(NVCC_generated_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.NVCC-depend") # path +set(cmake_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_diffvg.cpp.o.depend") # path +set(CUDA_make2cmake "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/make2cmake.cmake") # path +set(CUDA_parse_cubin "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/parse_cubin.cmake") # path +set(build_cubin OFF) # bool +set(CUDA_HOST_COMPILER "/usr/bin/cc") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//.") # path +set(generated_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o") # path +set(generated_cubin_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_diffvg.cpp.o.cubin.txt") # path + +set(CUDA_NVCC_EXECUTABLE "/usr/local/cuda/bin/nvcc") # path +set(CUDA_NVCC_FLAGS -std=c++11 ;; ) # list +# Build specific configuration flags +set(CUDA_NVCC_FLAGS_RELEASE ; ) +set(CUDA_NVCC_FLAGS_DEBUG ; ) +set(CUDA_NVCC_FLAGS_MINSIZEREL ; ) +set(CUDA_NVCC_FLAGS_RELWITHDEBINFO ; ) +set(nvcc_flags -m64;-Ddiffvg_EXPORTS) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[/usr/local/cuda/include;$]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[$]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "-c") # string +set(cuda_language_flag -x=cu) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +set(CMAKE_HOST_FLAGS -DVERSION_INFO=\"0.0.1\" -fPIC) +set(CMAKE_HOST_FLAGS_RELEASE -O3 -DNDEBUG) +set(CMAKE_HOST_FLAGS_DEBUG -g) +set(CMAKE_HOST_FLAGS_MINSIZEREL -Os -DNDEBUG) +set(CMAKE_HOST_FLAGS_RELWITHDEBINFO -O2 -g -DNDEBUG) + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, escape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION 12.2) +if(CUDA_VERSION VERSION_LESS "3.0") + # Note that this will remove all occurrences of -G. + list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G") +endif() + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.depend b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.depend new file mode 100644 index 0000000000000000000000000000000000000000..9af1d77657f47b8855c3cd09f2249e45e4fbebb8 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o.depend @@ -0,0 +1,1275 @@ +# Generated by: make2cmake.cmake +SET(CUDA_NVCC_DEPEND + "/content/Word-As-Image/diffvg/aabb.h" + "/content/Word-As-Image/diffvg/atomic.h" + "/content/Word-As-Image/diffvg/cdf.h" + "/content/Word-As-Image/diffvg/color.h" + "/content/Word-As-Image/diffvg/compute_distance.h" + "/content/Word-As-Image/diffvg/cuda_utils.h" + "/content/Word-As-Image/diffvg/diffvg.cpp" + "/content/Word-As-Image/diffvg/diffvg.h" + "/content/Word-As-Image/diffvg/edge_query.h" + "/content/Word-As-Image/diffvg/filter.h" + "/content/Word-As-Image/diffvg/matrix.h" + "/content/Word-As-Image/diffvg/parallel.h" + "/content/Word-As-Image/diffvg/pcg.h" + "/content/Word-As-Image/diffvg/ptr.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/attr.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/buffer_info.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/cast.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/class.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/common.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/descr.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/init.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/internals.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/detail/typeid.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/options.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/pybind11.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/pytypes.h" + "/content/Word-As-Image/diffvg/pybind11/include/pybind11/stl.h" + "/content/Word-As-Image/diffvg/sample_boundary.h" + "/content/Word-As-Image/diffvg/scene.h" + "/content/Word-As-Image/diffvg/shape.h" + "/content/Word-As-Image/diffvg/solve.h" + "/content/Word-As-Image/diffvg/vector.h" + "/content/Word-As-Image/diffvg/winding_number.h" + "/content/Word-As-Image/diffvg/within_distance.h" + "/usr/include/alloca.h" + "/usr/include/asm-generic/bitsperlong.h" + "/usr/include/asm-generic/errno-base.h" + "/usr/include/asm-generic/errno.h" + "/usr/include/asm-generic/int-ll64.h" + "/usr/include/asm-generic/posix_types.h" + "/usr/include/asm-generic/types.h" + "/usr/include/assert.h" + "/usr/include/c++/11/algorithm" + "/usr/include/c++/11/array" + "/usr/include/c++/11/atomic" + "/usr/include/c++/11/backward/auto_ptr.h" + "/usr/include/c++/11/backward/binders.h" + "/usr/include/c++/11/bit" + "/usr/include/c++/11/bits/algorithmfwd.h" + "/usr/include/c++/11/bits/align.h" + "/usr/include/c++/11/bits/alloc_traits.h" + "/usr/include/c++/11/bits/allocated_ptr.h" + "/usr/include/c++/11/bits/allocator.h" + "/usr/include/c++/11/bits/atomic_base.h" + "/usr/include/c++/11/bits/atomic_lockfree_defines.h" + "/usr/include/c++/11/bits/basic_ios.h" + "/usr/include/c++/11/bits/basic_ios.tcc" + "/usr/include/c++/11/bits/basic_string.h" + "/usr/include/c++/11/bits/basic_string.tcc" + "/usr/include/c++/11/bits/char_traits.h" + "/usr/include/c++/11/bits/charconv.h" + "/usr/include/c++/11/bits/concept_check.h" + "/usr/include/c++/11/bits/cpp_type_traits.h" + "/usr/include/c++/11/bits/cxxabi_forced.h" + "/usr/include/c++/11/bits/cxxabi_init_exception.h" + "/usr/include/c++/11/bits/deque.tcc" + "/usr/include/c++/11/bits/enable_special_members.h" + "/usr/include/c++/11/bits/erase_if.h" + "/usr/include/c++/11/bits/exception.h" + "/usr/include/c++/11/bits/exception_defines.h" + "/usr/include/c++/11/bits/exception_ptr.h" + "/usr/include/c++/11/bits/forward_list.h" + "/usr/include/c++/11/bits/forward_list.tcc" + "/usr/include/c++/11/bits/functexcept.h" + "/usr/include/c++/11/bits/functional_hash.h" + "/usr/include/c++/11/bits/gslice.h" + "/usr/include/c++/11/bits/gslice_array.h" + "/usr/include/c++/11/bits/hash_bytes.h" + "/usr/include/c++/11/bits/hashtable.h" + "/usr/include/c++/11/bits/hashtable_policy.h" + "/usr/include/c++/11/bits/indirect_array.h" + "/usr/include/c++/11/bits/invoke.h" + "/usr/include/c++/11/bits/ios_base.h" + "/usr/include/c++/11/bits/istream.tcc" + "/usr/include/c++/11/bits/list.tcc" + "/usr/include/c++/11/bits/locale_classes.h" + "/usr/include/c++/11/bits/locale_classes.tcc" + "/usr/include/c++/11/bits/locale_facets.h" + "/usr/include/c++/11/bits/locale_facets.tcc" + "/usr/include/c++/11/bits/localefwd.h" + "/usr/include/c++/11/bits/mask_array.h" + "/usr/include/c++/11/bits/memoryfwd.h" + "/usr/include/c++/11/bits/move.h" + "/usr/include/c++/11/bits/nested_exception.h" + "/usr/include/c++/11/bits/ostream.tcc" + "/usr/include/c++/11/bits/ostream_insert.h" + "/usr/include/c++/11/bits/parse_numbers.h" + "/usr/include/c++/11/bits/postypes.h" + "/usr/include/c++/11/bits/predefined_ops.h" + "/usr/include/c++/11/bits/ptr_traits.h" + "/usr/include/c++/11/bits/range_access.h" + "/usr/include/c++/11/bits/refwrap.h" + "/usr/include/c++/11/bits/shared_ptr.h" + "/usr/include/c++/11/bits/shared_ptr_atomic.h" + "/usr/include/c++/11/bits/shared_ptr_base.h" + "/usr/include/c++/11/bits/slice_array.h" + "/usr/include/c++/11/bits/std_abs.h" + "/usr/include/c++/11/bits/std_function.h" + "/usr/include/c++/11/bits/std_mutex.h" + "/usr/include/c++/11/bits/stl_algo.h" + "/usr/include/c++/11/bits/stl_algobase.h" + "/usr/include/c++/11/bits/stl_bvector.h" + "/usr/include/c++/11/bits/stl_construct.h" + "/usr/include/c++/11/bits/stl_deque.h" + "/usr/include/c++/11/bits/stl_function.h" + "/usr/include/c++/11/bits/stl_heap.h" + "/usr/include/c++/11/bits/stl_iterator.h" + "/usr/include/c++/11/bits/stl_iterator_base_funcs.h" + "/usr/include/c++/11/bits/stl_iterator_base_types.h" + "/usr/include/c++/11/bits/stl_list.h" + "/usr/include/c++/11/bits/stl_map.h" + "/usr/include/c++/11/bits/stl_multimap.h" + "/usr/include/c++/11/bits/stl_multiset.h" + "/usr/include/c++/11/bits/stl_pair.h" + "/usr/include/c++/11/bits/stl_raw_storage_iter.h" + "/usr/include/c++/11/bits/stl_relops.h" + "/usr/include/c++/11/bits/stl_set.h" + "/usr/include/c++/11/bits/stl_tempbuf.h" + "/usr/include/c++/11/bits/stl_tree.h" + "/usr/include/c++/11/bits/stl_uninitialized.h" + "/usr/include/c++/11/bits/stl_vector.h" + "/usr/include/c++/11/bits/stream_iterator.h" + "/usr/include/c++/11/bits/streambuf.tcc" + "/usr/include/c++/11/bits/streambuf_iterator.h" + "/usr/include/c++/11/bits/stringfwd.h" + "/usr/include/c++/11/bits/uniform_int_dist.h" + "/usr/include/c++/11/bits/unique_lock.h" + "/usr/include/c++/11/bits/unique_ptr.h" + "/usr/include/c++/11/bits/unordered_map.h" + "/usr/include/c++/11/bits/unordered_set.h" + "/usr/include/c++/11/bits/uses_allocator.h" + "/usr/include/c++/11/bits/valarray_after.h" + "/usr/include/c++/11/bits/valarray_array.h" + "/usr/include/c++/11/bits/valarray_array.tcc" + "/usr/include/c++/11/bits/valarray_before.h" + "/usr/include/c++/11/bits/vector.tcc" + "/usr/include/c++/11/cassert" + "/usr/include/c++/11/cctype" + "/usr/include/c++/11/cerrno" + "/usr/include/c++/11/cfloat" + "/usr/include/c++/11/chrono" + "/usr/include/c++/11/climits" + "/usr/include/c++/11/clocale" + "/usr/include/c++/11/cmath" + "/usr/include/c++/11/condition_variable" + "/usr/include/c++/11/cstddef" + "/usr/include/c++/11/cstdint" + "/usr/include/c++/11/cstdio" + "/usr/include/c++/11/cstdlib" + "/usr/include/c++/11/cstring" + "/usr/include/c++/11/ctime" + "/usr/include/c++/11/cwchar" + "/usr/include/c++/11/cwctype" + "/usr/include/c++/11/cxxabi.h" + "/usr/include/c++/11/debug/assertions.h" + "/usr/include/c++/11/debug/debug.h" + "/usr/include/c++/11/deque" + "/usr/include/c++/11/exception" + "/usr/include/c++/11/ext/aligned_buffer.h" + "/usr/include/c++/11/ext/alloc_traits.h" + "/usr/include/c++/11/ext/atomicity.h" + "/usr/include/c++/11/ext/concurrence.h" + "/usr/include/c++/11/ext/new_allocator.h" + "/usr/include/c++/11/ext/numeric_traits.h" + "/usr/include/c++/11/ext/string_conversions.h" + "/usr/include/c++/11/ext/type_traits.h" + "/usr/include/c++/11/forward_list" + "/usr/include/c++/11/functional" + "/usr/include/c++/11/initializer_list" + "/usr/include/c++/11/ios" + "/usr/include/c++/11/iosfwd" + "/usr/include/c++/11/iostream" + "/usr/include/c++/11/istream" + "/usr/include/c++/11/iterator" + "/usr/include/c++/11/limits" + "/usr/include/c++/11/list" + "/usr/include/c++/11/map" + "/usr/include/c++/11/math.h" + "/usr/include/c++/11/memory" + "/usr/include/c++/11/mutex" + "/usr/include/c++/11/new" + "/usr/include/c++/11/ostream" + "/usr/include/c++/11/ratio" + "/usr/include/c++/11/set" + "/usr/include/c++/11/stdexcept" + "/usr/include/c++/11/stdlib.h" + "/usr/include/c++/11/streambuf" + "/usr/include/c++/11/string" + "/usr/include/c++/11/system_error" + "/usr/include/c++/11/tuple" + "/usr/include/c++/11/type_traits" + "/usr/include/c++/11/typeindex" + "/usr/include/c++/11/typeinfo" + "/usr/include/c++/11/unordered_map" + "/usr/include/c++/11/unordered_set" + "/usr/include/c++/11/utility" + "/usr/include/c++/11/valarray" + "/usr/include/c++/11/vector" + "/usr/include/c++/11/version" + "/usr/include/crypt.h" + "/usr/include/ctype.h" + "/usr/include/endian.h" + "/usr/include/errno.h" + "/usr/include/features-time64.h" + "/usr/include/features.h" + "/usr/include/inttypes.h" + "/usr/include/limits.h" + "/usr/include/linux/close_range.h" + "/usr/include/linux/errno.h" + "/usr/include/linux/limits.h" + "/usr/include/linux/posix_types.h" + "/usr/include/linux/stat.h" + "/usr/include/linux/stddef.h" + "/usr/include/linux/types.h" + "/usr/include/locale.h" + "/usr/include/math.h" + "/usr/include/pthread.h" + "/usr/include/sched.h" + "/usr/include/stdc-predef.h" + "/usr/include/stdint.h" + "/usr/include/stdio.h" + "/usr/include/stdlib.h" + "/usr/include/string.h" + "/usr/include/strings.h" + "/usr/include/time.h" + "/usr/include/unistd.h" + "/usr/include/wchar.h" + "/usr/include/wctype.h" + "/usr/include/x86_64-linux-gnu/asm/bitsperlong.h" + "/usr/include/x86_64-linux-gnu/asm/errno.h" + "/usr/include/x86_64-linux-gnu/asm/posix_types.h" + "/usr/include/x86_64-linux-gnu/asm/posix_types_64.h" + "/usr/include/x86_64-linux-gnu/asm/types.h" + "/usr/include/x86_64-linux-gnu/bits/atomic_wide_counter.h" + "/usr/include/x86_64-linux-gnu/bits/byteswap.h" + "/usr/include/x86_64-linux-gnu/bits/confname.h" + "/usr/include/x86_64-linux-gnu/bits/cpu-set.h" + "/usr/include/x86_64-linux-gnu/bits/endian.h" + "/usr/include/x86_64-linux-gnu/bits/endianness.h" + "/usr/include/x86_64-linux-gnu/bits/environments.h" + "/usr/include/x86_64-linux-gnu/bits/errno.h" + "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" + "/usr/include/x86_64-linux-gnu/bits/floatn.h" + "/usr/include/x86_64-linux-gnu/bits/flt-eval-method.h" + "/usr/include/x86_64-linux-gnu/bits/fp-fast.h" + "/usr/include/x86_64-linux-gnu/bits/fp-logb.h" + "/usr/include/x86_64-linux-gnu/bits/getopt_core.h" + "/usr/include/x86_64-linux-gnu/bits/getopt_posix.h" + "/usr/include/x86_64-linux-gnu/bits/iscanonical.h" + "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" + "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" + "/usr/include/x86_64-linux-gnu/bits/local_lim.h" + "/usr/include/x86_64-linux-gnu/bits/locale.h" + "/usr/include/x86_64-linux-gnu/bits/long-double.h" + "/usr/include/x86_64-linux-gnu/bits/math-vector.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls-narrow.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" + "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" + "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" + "/usr/include/x86_64-linux-gnu/bits/posix_opt.h" + "/usr/include/x86_64-linux-gnu/bits/pthread_stack_min-dynamic.h" + "/usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h" + "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" + "/usr/include/x86_64-linux-gnu/bits/sched.h" + "/usr/include/x86_64-linux-gnu/bits/select.h" + "/usr/include/x86_64-linux-gnu/bits/select2.h" + "/usr/include/x86_64-linux-gnu/bits/setjmp.h" + "/usr/include/x86_64-linux-gnu/bits/stat.h" + "/usr/include/x86_64-linux-gnu/bits/statx-generic.h" + "/usr/include/x86_64-linux-gnu/bits/statx.h" + "/usr/include/x86_64-linux-gnu/bits/stdint-intn.h" + "/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h" + "/usr/include/x86_64-linux-gnu/bits/stdio.h" + "/usr/include/x86_64-linux-gnu/bits/stdio2.h" + "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib.h" + "/usr/include/x86_64-linux-gnu/bits/string_fortified.h" + "/usr/include/x86_64-linux-gnu/bits/strings_fortified.h" + "/usr/include/x86_64-linux-gnu/bits/struct_mutex.h" + "/usr/include/x86_64-linux-gnu/bits/struct_rwlock.h" + "/usr/include/x86_64-linux-gnu/bits/struct_stat.h" + "/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h" + "/usr/include/x86_64-linux-gnu/bits/time.h" + "/usr/include/x86_64-linux-gnu/bits/time64.h" + "/usr/include/x86_64-linux-gnu/bits/timesize.h" + "/usr/include/x86_64-linux-gnu/bits/timex.h" + "/usr/include/x86_64-linux-gnu/bits/types.h" + "/usr/include/x86_64-linux-gnu/bits/types/FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/__FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__locale_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/clock_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/clockid_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/error_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/locale_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/mbstate_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/sigset_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct___jmp_buf_tag.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_sched_param.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_statx.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_statx_timestamp.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h" + "/usr/include/x86_64-linux-gnu/bits/types/time_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/timer_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/wint_t.h" + "/usr/include/x86_64-linux-gnu/bits/typesizes.h" + "/usr/include/x86_64-linux-gnu/bits/uintn-identity.h" + "/usr/include/x86_64-linux-gnu/bits/uio_lim.h" + "/usr/include/x86_64-linux-gnu/bits/unistd.h" + "/usr/include/x86_64-linux-gnu/bits/unistd_ext.h" + "/usr/include/x86_64-linux-gnu/bits/waitflags.h" + "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" + "/usr/include/x86_64-linux-gnu/bits/wchar.h" + "/usr/include/x86_64-linux-gnu/bits/wchar2.h" + "/usr/include/x86_64-linux-gnu/bits/wctype-wchar.h" + "/usr/include/x86_64-linux-gnu/bits/wordsize.h" + "/usr/include/x86_64-linux-gnu/bits/xopen_lim.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/atomic_word.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++allocator.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++config.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++locale.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/cpu_defines.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/ctype_base.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/ctype_inline.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/cxxabi_tweaks.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/error_constants.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/gthr-default.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/gthr.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/os_defines.h" + "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" + "/usr/include/x86_64-linux-gnu/gnu/stubs.h" + "/usr/include/x86_64-linux-gnu/sys/cdefs.h" + "/usr/include/x86_64-linux-gnu/sys/select.h" + "/usr/include/x86_64-linux-gnu/sys/single_threaded.h" + "/usr/include/x86_64-linux-gnu/sys/stat.h" + "/usr/include/x86_64-linux-gnu/sys/time.h" + "/usr/include/x86_64-linux-gnu/sys/types.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/float.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/limits.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stdarg.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stdint.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/syslimits.h" + "/usr/local/cuda/include/builtin_types.h" + "/usr/local/cuda/include/channel_descriptor.h" + "/usr/local/cuda/include/crt/common_functions.h" + "/usr/local/cuda/include/crt/cudacc_ext.h" + "/usr/local/cuda/include/crt/device_double_functions.h" + "/usr/local/cuda/include/crt/device_double_functions.hpp" + "/usr/local/cuda/include/crt/device_functions.h" + "/usr/local/cuda/include/crt/device_functions.hpp" + "/usr/local/cuda/include/crt/host_config.h" + "/usr/local/cuda/include/crt/host_defines.h" + "/usr/local/cuda/include/crt/math_functions.h" + "/usr/local/cuda/include/crt/math_functions.hpp" + "/usr/local/cuda/include/crt/sm_70_rt.h" + "/usr/local/cuda/include/crt/sm_80_rt.h" + "/usr/local/cuda/include/crt/sm_90_rt.h" + "/usr/local/cuda/include/cub/agent/agent_merge_sort.cuh" + "/usr/local/cuda/include/cub/agent/agent_radix_sort_downsweep.cuh" + "/usr/local/cuda/include/cub/agent/agent_radix_sort_histogram.cuh" + "/usr/local/cuda/include/cub/agent/agent_radix_sort_onesweep.cuh" + "/usr/local/cuda/include/cub/agent/agent_radix_sort_upsweep.cuh" + "/usr/local/cuda/include/cub/agent/agent_reduce.cuh" + "/usr/local/cuda/include/cub/agent/agent_reduce_by_key.cuh" + "/usr/local/cuda/include/cub/agent/agent_scan.cuh" + "/usr/local/cuda/include/cub/agent/agent_scan_by_key.cuh" + "/usr/local/cuda/include/cub/agent/agent_select_if.cuh" + "/usr/local/cuda/include/cub/agent/agent_unique_by_key.cuh" + "/usr/local/cuda/include/cub/agent/single_pass_scan_operators.cuh" + "/usr/local/cuda/include/cub/block/block_discontinuity.cuh" + "/usr/local/cuda/include/cub/block/block_exchange.cuh" + "/usr/local/cuda/include/cub/block/block_load.cuh" + "/usr/local/cuda/include/cub/block/block_merge_sort.cuh" + "/usr/local/cuda/include/cub/block/block_radix_rank.cuh" + "/usr/local/cuda/include/cub/block/block_radix_sort.cuh" + "/usr/local/cuda/include/cub/block/block_raking_layout.cuh" + "/usr/local/cuda/include/cub/block/block_reduce.cuh" + "/usr/local/cuda/include/cub/block/block_scan.cuh" + "/usr/local/cuda/include/cub/block/block_store.cuh" + "/usr/local/cuda/include/cub/block/radix_rank_sort_operations.cuh" + "/usr/local/cuda/include/cub/block/specializations/block_reduce_raking.cuh" + "/usr/local/cuda/include/cub/block/specializations/block_reduce_raking_commutative_only.cuh" + "/usr/local/cuda/include/cub/block/specializations/block_reduce_warp_reductions.cuh" + "/usr/local/cuda/include/cub/block/specializations/block_scan_raking.cuh" + "/usr/local/cuda/include/cub/block/specializations/block_scan_warp_scans.cuh" + "/usr/local/cuda/include/cub/config.cuh" + "/usr/local/cuda/include/cub/detail/choose_offset.cuh" + "/usr/local/cuda/include/cub/detail/cpp_compatibility.cuh" + "/usr/local/cuda/include/cub/detail/detect_cuda_runtime.cuh" + "/usr/local/cuda/include/cub/detail/device_synchronize.cuh" + "/usr/local/cuda/include/cub/detail/exec_check_disable.cuh" + "/usr/local/cuda/include/cub/detail/strong_load.cuh" + "/usr/local/cuda/include/cub/detail/strong_store.cuh" + "/usr/local/cuda/include/cub/detail/type_traits.cuh" + "/usr/local/cuda/include/cub/detail/uninitialized_copy.cuh" + "/usr/local/cuda/include/cub/device/device_merge_sort.cuh" + "/usr/local/cuda/include/cub/device/device_radix_sort.cuh" + "/usr/local/cuda/include/cub/device/device_reduce.cuh" + "/usr/local/cuda/include/cub/device/device_scan.cuh" + "/usr/local/cuda/include/cub/device/device_select.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_merge_sort.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_radix_sort.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_reduce.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_reduce_by_key.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_scan.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_scan_by_key.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_select_if.cuh" + "/usr/local/cuda/include/cub/device/dispatch/dispatch_unique_by_key.cuh" + "/usr/local/cuda/include/cub/device/dispatch/tuning/tuning_reduce_by_key.cuh" + "/usr/local/cuda/include/cub/device/dispatch/tuning/tuning_scan.cuh" + "/usr/local/cuda/include/cub/device/dispatch/tuning/tuning_scan_by_key.cuh" + "/usr/local/cuda/include/cub/device/dispatch/tuning/tuning_select_if.cuh" + "/usr/local/cuda/include/cub/device/dispatch/tuning/tuning_unique_by_key.cuh" + "/usr/local/cuda/include/cub/grid/grid_even_share.cuh" + "/usr/local/cuda/include/cub/grid/grid_mapping.cuh" + "/usr/local/cuda/include/cub/grid/grid_queue.cuh" + "/usr/local/cuda/include/cub/iterator/arg_index_input_iterator.cuh" + "/usr/local/cuda/include/cub/iterator/cache_modified_input_iterator.cuh" + "/usr/local/cuda/include/cub/iterator/constant_input_iterator.cuh" + "/usr/local/cuda/include/cub/thread/thread_load.cuh" + "/usr/local/cuda/include/cub/thread/thread_operators.cuh" + "/usr/local/cuda/include/cub/thread/thread_reduce.cuh" + "/usr/local/cuda/include/cub/thread/thread_scan.cuh" + "/usr/local/cuda/include/cub/thread/thread_sort.cuh" + "/usr/local/cuda/include/cub/thread/thread_store.cuh" + "/usr/local/cuda/include/cub/util_arch.cuh" + "/usr/local/cuda/include/cub/util_compiler.cuh" + "/usr/local/cuda/include/cub/util_cpp_dialect.cuh" + "/usr/local/cuda/include/cub/util_debug.cuh" + "/usr/local/cuda/include/cub/util_deprecated.cuh" + "/usr/local/cuda/include/cub/util_device.cuh" + "/usr/local/cuda/include/cub/util_macro.cuh" + "/usr/local/cuda/include/cub/util_math.cuh" + "/usr/local/cuda/include/cub/util_namespace.cuh" + "/usr/local/cuda/include/cub/util_ptx.cuh" + "/usr/local/cuda/include/cub/util_type.cuh" + "/usr/local/cuda/include/cub/version.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_exchange_shfl.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_exchange_smem.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_reduce_shfl.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_reduce_smem.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_scan_shfl.cuh" + "/usr/local/cuda/include/cub/warp/specializations/warp_scan_smem.cuh" + "/usr/local/cuda/include/cub/warp/warp_exchange.cuh" + "/usr/local/cuda/include/cub/warp/warp_reduce.cuh" + "/usr/local/cuda/include/cub/warp/warp_scan.cuh" + "/usr/local/cuda/include/cuda.h" + "/usr/local/cuda/include/cuda/std/detail/__config" + "/usr/local/cuda/include/cuda/std/detail/__pragma_pop" + "/usr/local/cuda/include/cuda/std/detail/__pragma_push" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__assert" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__availability" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/_One_of.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/__concept_macros.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/arithmetic.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/boolean_testable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/class_or_enum.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/common_reference_with.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/common_with.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/convertible_to.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/copyable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/derived_from.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/destructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/different_from.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/equality_comparable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/invocable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/movable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/predicate.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/regular.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/relation.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/same_as.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/semiregular.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/swappable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__concepts/totally_ordered.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__config" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__cuda/chrono.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__cuda/climits_prelude.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__cuda/cstddef_prelude.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__cuda/cstdint_prelude.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__debug" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/binary_function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/binary_negate.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/bind.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_back.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_front.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/binder1st.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/binder2nd.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/compose.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/default_searcher.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/hash.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/identity.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/invoke.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/is_transparent.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/mem_fn.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/mem_fun_ref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/not_fn.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/operations.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/perfect_forward.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/pointer_to_binary_function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/pointer_to_unary_function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/reference_wrapper.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/unary_function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/unary_negate.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/unwrap_ref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional/weak_result_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__functional_base" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/array.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/get.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/hash.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/memory_resource.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/pair.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/string.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__fwd/tuple.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/access.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/advance.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/back_insert_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/concepts.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/data.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/default_sentinel.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/distance.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/empty.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/erase_if_container.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/front_insert_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/incrementable_traits.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/insert_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/istream_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/istreambuf_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/iter_move.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/iterator_traits.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/move_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/next.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/ostream_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/ostreambuf_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/prev.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/readable_traits.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/reverse_access.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/reverse_iterator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/size.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__iterator/wrap_iter.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__memory/voidify.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__pragma_pop" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__pragma_push" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/apply_cv.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/make_tuple_types.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/sfinae_helpers.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/structured_bindings.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_element.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_indices.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_like.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_size.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_types.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_const.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_cv.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_lvalue_reference.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_rvalue_reference.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_volatile.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/aligned_storage.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/aligned_union.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/alignment_of.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/apply_cv.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/can_extract_key.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/common_reference.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/common_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/conditional.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/conjunction.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/copy_cv.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/copy_cvref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/decay.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/dependent_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/disjunction.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/enable_if.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/extent.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/has_unique_object_representation.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/has_virtual_destructor.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/integral_constant.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_abstract.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_aggregate.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_allocator.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_arithmetic.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_array.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_base_of.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_bounded_array.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_callable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_char_like_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_class.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_compound.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_const.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_constant_evaluated.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_convertible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_copy_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_copy_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_core_convertible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_default_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_destructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_empty.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_enum.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_final.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_floating_point.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_function.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_fundamental.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_implicitly_default_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_integral.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_literal_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_member_function_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_member_object_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_member_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_convertible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_copy_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_copy_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_default_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_destructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_move_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_move_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_null_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_object.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_pod.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_polymorphic.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_primary_template.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_reference.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_reference_wrapper.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_referenceable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_same.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_scalar.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_scoped_enum.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_signed.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_signed_integer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_standard_layout.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_swappable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivial.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_copy_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_copy_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_copyable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_default_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_destructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_move_assignable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_trivially_move_constructible.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_unbounded_array.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_union.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_unsigned.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_unsigned_integer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_valid_expansion.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_void.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_volatile.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/lazy.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/make_32_64_or_128_bit.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/make_const_lvalue_ref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/make_signed.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/make_unsigned.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/maybe_const.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/nat.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/negation.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/promote.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/rank.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_all_extents.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_const.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_const_ref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_cv.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_cvref.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_extent.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_pointer.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_reference.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/remove_volatile.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/result_of.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/type_identity.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/type_list.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/underlying_type.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__type_traits/void_t.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__undef_macros" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/as_const.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/auto_cast.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/cmp.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/convert_to_integral.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/declval.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/exchange.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/forward.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/forward_like.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/in_place.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/integer_sequence.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/move.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/pair.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/piecewise_construct.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/priority_tag.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/rel_ops.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/swap.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/to_underlying.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__utility/unreachable.h" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/__verbose_abort" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/chrono" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/climits" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/concepts" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/cstddef" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/cstdint" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/cstdlib" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/ctime" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/functional" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/initializer_list" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/iosfwd" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/iterator" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/limits" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/ratio" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/tuple" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/type_traits" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/utility" + "/usr/local/cuda/include/cuda/std/detail/libcxx/include/version" + "/usr/local/cuda/include/cuda/std/functional" + "/usr/local/cuda/include/cuda/std/tuple" + "/usr/local/cuda/include/cuda/std/type_traits" + "/usr/local/cuda/include/cuda/std/utility" + "/usr/local/cuda/include/cuda_bf16.h" + "/usr/local/cuda/include/cuda_bf16.hpp" + "/usr/local/cuda/include/cuda_device_runtime_api.h" + "/usr/local/cuda/include/cuda_fp16.h" + "/usr/local/cuda/include/cuda_fp16.hpp" + "/usr/local/cuda/include/cuda_runtime.h" + "/usr/local/cuda/include/cuda_runtime_api.h" + "/usr/local/cuda/include/device_atomic_functions.h" + "/usr/local/cuda/include/device_launch_parameters.h" + "/usr/local/cuda/include/device_types.h" + "/usr/local/cuda/include/driver_functions.h" + "/usr/local/cuda/include/driver_types.h" + "/usr/local/cuda/include/library_types.h" + "/usr/local/cuda/include/nv/detail/__preprocessor" + "/usr/local/cuda/include/nv/detail/__target_macros" + "/usr/local/cuda/include/nv/target" + "/usr/local/cuda/include/sm_20_atomic_functions.h" + "/usr/local/cuda/include/sm_20_intrinsics.h" + "/usr/local/cuda/include/sm_30_intrinsics.h" + "/usr/local/cuda/include/sm_32_atomic_functions.h" + "/usr/local/cuda/include/sm_32_intrinsics.h" + "/usr/local/cuda/include/sm_35_atomic_functions.h" + "/usr/local/cuda/include/sm_35_intrinsics.h" + "/usr/local/cuda/include/sm_60_atomic_functions.h" + "/usr/local/cuda/include/sm_61_intrinsics.h" + "/usr/local/cuda/include/surface_indirect_functions.h" + "/usr/local/cuda/include/surface_types.h" + "/usr/local/cuda/include/texture_indirect_functions.h" + "/usr/local/cuda/include/texture_types.h" + "/usr/local/cuda/include/thrust/advance.h" + "/usr/local/cuda/include/thrust/copy.h" + "/usr/local/cuda/include/thrust/detail/advance.inl" + "/usr/local/cuda/include/thrust/detail/alignment.h" + "/usr/local/cuda/include/thrust/detail/allocator/allocator_traits.h" + "/usr/local/cuda/include/thrust/detail/allocator/allocator_traits.inl" + "/usr/local/cuda/include/thrust/detail/allocator/copy_construct_range.h" + "/usr/local/cuda/include/thrust/detail/allocator/copy_construct_range.inl" + "/usr/local/cuda/include/thrust/detail/allocator/default_construct_range.h" + "/usr/local/cuda/include/thrust/detail/allocator/default_construct_range.inl" + "/usr/local/cuda/include/thrust/detail/allocator/destroy_range.h" + "/usr/local/cuda/include/thrust/detail/allocator/destroy_range.inl" + "/usr/local/cuda/include/thrust/detail/allocator/fill_construct_range.h" + "/usr/local/cuda/include/thrust/detail/allocator/fill_construct_range.inl" + "/usr/local/cuda/include/thrust/detail/allocator/no_throw_allocator.h" + "/usr/local/cuda/include/thrust/detail/allocator/tagged_allocator.h" + "/usr/local/cuda/include/thrust/detail/allocator/tagged_allocator.inl" + "/usr/local/cuda/include/thrust/detail/allocator/temporary_allocator.h" + "/usr/local/cuda/include/thrust/detail/allocator/temporary_allocator.inl" + "/usr/local/cuda/include/thrust/detail/allocator_aware_execution_policy.h" + "/usr/local/cuda/include/thrust/detail/config.h" + "/usr/local/cuda/include/thrust/detail/config/compiler.h" + "/usr/local/cuda/include/thrust/detail/config/config.h" + "/usr/local/cuda/include/thrust/detail/config/cpp_compatibility.h" + "/usr/local/cuda/include/thrust/detail/config/cpp_dialect.h" + "/usr/local/cuda/include/thrust/detail/config/debug.h" + "/usr/local/cuda/include/thrust/detail/config/deprecated.h" + "/usr/local/cuda/include/thrust/detail/config/device_system.h" + "/usr/local/cuda/include/thrust/detail/config/exec_check_disable.h" + "/usr/local/cuda/include/thrust/detail/config/forceinline.h" + "/usr/local/cuda/include/thrust/detail/config/global_workarounds.h" + "/usr/local/cuda/include/thrust/detail/config/host_device.h" + "/usr/local/cuda/include/thrust/detail/config/host_system.h" + "/usr/local/cuda/include/thrust/detail/config/namespace.h" + "/usr/local/cuda/include/thrust/detail/config/simple_defines.h" + "/usr/local/cuda/include/thrust/detail/contiguous_storage.h" + "/usr/local/cuda/include/thrust/detail/contiguous_storage.inl" + "/usr/local/cuda/include/thrust/detail/copy.h" + "/usr/local/cuda/include/thrust/detail/copy.inl" + "/usr/local/cuda/include/thrust/detail/copy_if.h" + "/usr/local/cuda/include/thrust/detail/copy_if.inl" + "/usr/local/cuda/include/thrust/detail/cpp11_required.h" + "/usr/local/cuda/include/thrust/detail/cstdint.h" + "/usr/local/cuda/include/thrust/detail/dependencies_aware_execution_policy.h" + "/usr/local/cuda/include/thrust/detail/distance.inl" + "/usr/local/cuda/include/thrust/detail/execute_with_allocator.h" + "/usr/local/cuda/include/thrust/detail/execute_with_allocator_fwd.h" + "/usr/local/cuda/include/thrust/detail/execute_with_dependencies.h" + "/usr/local/cuda/include/thrust/detail/execution_policy.h" + "/usr/local/cuda/include/thrust/detail/extrema.inl" + "/usr/local/cuda/include/thrust/detail/fill.inl" + "/usr/local/cuda/include/thrust/detail/find.inl" + "/usr/local/cuda/include/thrust/detail/for_each.inl" + "/usr/local/cuda/include/thrust/detail/function.h" + "/usr/local/cuda/include/thrust/detail/functional.inl" + "/usr/local/cuda/include/thrust/detail/functional/actor.h" + "/usr/local/cuda/include/thrust/detail/functional/actor.inl" + "/usr/local/cuda/include/thrust/detail/functional/argument.h" + "/usr/local/cuda/include/thrust/detail/functional/composite.h" + "/usr/local/cuda/include/thrust/detail/functional/operators.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/arithmetic_operators.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/assignment_operator.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/bitwise_operators.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/compound_assignment_operators.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/logical_operators.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/operator_adaptors.h" + "/usr/local/cuda/include/thrust/detail/functional/operators/relational_operators.h" + "/usr/local/cuda/include/thrust/detail/functional/placeholder.h" + "/usr/local/cuda/include/thrust/detail/functional/value.h" + "/usr/local/cuda/include/thrust/detail/generate.inl" + "/usr/local/cuda/include/thrust/detail/get_iterator_value.h" + "/usr/local/cuda/include/thrust/detail/integer_math.h" + "/usr/local/cuda/include/thrust/detail/integer_traits.h" + "/usr/local/cuda/include/thrust/detail/internal_functional.h" + "/usr/local/cuda/include/thrust/detail/malloc_and_free.h" + "/usr/local/cuda/include/thrust/detail/memory_wrapper.h" + "/usr/local/cuda/include/thrust/detail/merge.inl" + "/usr/local/cuda/include/thrust/detail/minmax.h" + "/usr/local/cuda/include/thrust/detail/mpl/math.h" + "/usr/local/cuda/include/thrust/detail/numeric_traits.h" + "/usr/local/cuda/include/thrust/detail/pair.inl" + "/usr/local/cuda/include/thrust/detail/pointer.h" + "/usr/local/cuda/include/thrust/detail/pointer.inl" + "/usr/local/cuda/include/thrust/detail/preprocessor.h" + "/usr/local/cuda/include/thrust/detail/raw_pointer_cast.h" + "/usr/local/cuda/include/thrust/detail/raw_reference_cast.h" + "/usr/local/cuda/include/thrust/detail/reduce.inl" + "/usr/local/cuda/include/thrust/detail/reference.h" + "/usr/local/cuda/include/thrust/detail/reference_forward_declaration.h" + "/usr/local/cuda/include/thrust/detail/replace.inl" + "/usr/local/cuda/include/thrust/detail/reverse.inl" + "/usr/local/cuda/include/thrust/detail/scan.inl" + "/usr/local/cuda/include/thrust/detail/scatter.inl" + "/usr/local/cuda/include/thrust/detail/seq.h" + "/usr/local/cuda/include/thrust/detail/sequence.inl" + "/usr/local/cuda/include/thrust/detail/sort.inl" + "/usr/local/cuda/include/thrust/detail/static_assert.h" + "/usr/local/cuda/include/thrust/detail/swap.h" + "/usr/local/cuda/include/thrust/detail/swap.inl" + "/usr/local/cuda/include/thrust/detail/swap_ranges.inl" + "/usr/local/cuda/include/thrust/detail/tabulate.inl" + "/usr/local/cuda/include/thrust/detail/temporary_array.h" + "/usr/local/cuda/include/thrust/detail/temporary_array.inl" + "/usr/local/cuda/include/thrust/detail/temporary_buffer.h" + "/usr/local/cuda/include/thrust/detail/transform.inl" + "/usr/local/cuda/include/thrust/detail/transform_reduce.inl" + "/usr/local/cuda/include/thrust/detail/trivial_sequence.h" + "/usr/local/cuda/include/thrust/detail/tuple.inl" + "/usr/local/cuda/include/thrust/detail/tuple_meta_transform.h" + "/usr/local/cuda/include/thrust/detail/tuple_transform.h" + "/usr/local/cuda/include/thrust/detail/type_deduction.h" + "/usr/local/cuda/include/thrust/detail/type_traits.h" + "/usr/local/cuda/include/thrust/detail/type_traits/function_traits.h" + "/usr/local/cuda/include/thrust/detail/type_traits/has_member_function.h" + "/usr/local/cuda/include/thrust/detail/type_traits/has_nested_type.h" + "/usr/local/cuda/include/thrust/detail/type_traits/has_trivial_assign.h" + "/usr/local/cuda/include/thrust/detail/type_traits/is_call_possible.h" + "/usr/local/cuda/include/thrust/detail/type_traits/is_metafunction_defined.h" + "/usr/local/cuda/include/thrust/detail/type_traits/iterator/is_output_iterator.h" + "/usr/local/cuda/include/thrust/detail/type_traits/minimum_type.h" + "/usr/local/cuda/include/thrust/detail/type_traits/pointer_traits.h" + "/usr/local/cuda/include/thrust/detail/type_traits/result_of_adaptable_function.h" + "/usr/local/cuda/include/thrust/detail/uninitialized_fill.inl" + "/usr/local/cuda/include/thrust/detail/use_default.h" + "/usr/local/cuda/include/thrust/distance.h" + "/usr/local/cuda/include/thrust/execution_policy.h" + "/usr/local/cuda/include/thrust/extrema.h" + "/usr/local/cuda/include/thrust/fill.h" + "/usr/local/cuda/include/thrust/find.h" + "/usr/local/cuda/include/thrust/for_each.h" + "/usr/local/cuda/include/thrust/functional.h" + "/usr/local/cuda/include/thrust/generate.h" + "/usr/local/cuda/include/thrust/iterator/counting_iterator.h" + "/usr/local/cuda/include/thrust/iterator/detail/any_assign.h" + "/usr/local/cuda/include/thrust/iterator/detail/any_system_tag.h" + "/usr/local/cuda/include/thrust/iterator/detail/counting_iterator.inl" + "/usr/local/cuda/include/thrust/iterator/detail/device_system_tag.h" + "/usr/local/cuda/include/thrust/iterator/detail/distance_from_result.h" + "/usr/local/cuda/include/thrust/iterator/detail/host_system_tag.h" + "/usr/local/cuda/include/thrust/iterator/detail/is_iterator_category.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_adaptor_base.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_category_to_system.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_category_to_traversal.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_category_with_system_and_traversal.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_facade_category.h" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_traits.inl" + "/usr/local/cuda/include/thrust/iterator/detail/iterator_traversal_tags.h" + "/usr/local/cuda/include/thrust/iterator/detail/minimum_category.h" + "/usr/local/cuda/include/thrust/iterator/detail/minimum_system.h" + "/usr/local/cuda/include/thrust/iterator/detail/normal_iterator.h" + "/usr/local/cuda/include/thrust/iterator/detail/permutation_iterator_base.h" + "/usr/local/cuda/include/thrust/iterator/detail/reverse_iterator.inl" + "/usr/local/cuda/include/thrust/iterator/detail/reverse_iterator_base.h" + "/usr/local/cuda/include/thrust/iterator/detail/tagged_iterator.h" + "/usr/local/cuda/include/thrust/iterator/detail/transform_iterator.inl" + "/usr/local/cuda/include/thrust/iterator/detail/tuple_of_iterator_references.h" + "/usr/local/cuda/include/thrust/iterator/detail/universal_categories.h" + "/usr/local/cuda/include/thrust/iterator/detail/zip_iterator.inl" + "/usr/local/cuda/include/thrust/iterator/detail/zip_iterator_base.h" + "/usr/local/cuda/include/thrust/iterator/iterator_adaptor.h" + "/usr/local/cuda/include/thrust/iterator/iterator_categories.h" + "/usr/local/cuda/include/thrust/iterator/iterator_facade.h" + "/usr/local/cuda/include/thrust/iterator/iterator_traits.h" + "/usr/local/cuda/include/thrust/iterator/permutation_iterator.h" + "/usr/local/cuda/include/thrust/iterator/reverse_iterator.h" + "/usr/local/cuda/include/thrust/iterator/transform_iterator.h" + "/usr/local/cuda/include/thrust/iterator/zip_iterator.h" + "/usr/local/cuda/include/thrust/memory.h" + "/usr/local/cuda/include/thrust/merge.h" + "/usr/local/cuda/include/thrust/pair.h" + "/usr/local/cuda/include/thrust/reduce.h" + "/usr/local/cuda/include/thrust/replace.h" + "/usr/local/cuda/include/thrust/reverse.h" + "/usr/local/cuda/include/thrust/scan.h" + "/usr/local/cuda/include/thrust/scatter.h" + "/usr/local/cuda/include/thrust/sequence.h" + "/usr/local/cuda/include/thrust/sort.h" + "/usr/local/cuda/include/thrust/swap.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/adjacent_difference.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/assign_value.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/binary_search.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/copy.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/copy_if.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/count.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/execution_policy.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/extrema.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/find.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/for_each.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/get_value.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/iter_swap.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/malloc_and_free.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/merge.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/par.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/partition.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/reduce.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/reduce_by_key.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/remove.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/scan.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/scan_by_key.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/set_operations.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/sort.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/swap_ranges.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/transform.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/unique.h" + "/usr/local/cuda/include/thrust/system/cpp/detail/unique_by_key.h" + "/usr/local/cuda/include/thrust/system/cpp/execution_policy.h" + "/usr/local/cuda/include/thrust/system/cuda/config.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/assign_value.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/cdp_dispatch.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/copy.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/copy_if.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/core/agent_launcher.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/core/alignment.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/core/triple_chevron_launch.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/core/util.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/cross_system.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/dispatch.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/error.inl" + "/usr/local/cuda/include/thrust/system/cuda/detail/execution_policy.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/extrema.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/fill.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/find.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/for_each.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/generate.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/get_value.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/guarded_cuda_runtime_api.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/guarded_driver_types.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/internal/copy_cross_system.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/internal/copy_device_to_device.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/iter_swap.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/make_unsigned_special.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/malloc_and_free.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/merge.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/par.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/par_to_seq.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/parallel_for.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/reduce.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/reduce_by_key.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/replace.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/reverse.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/scan.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/scan_by_key.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/scatter.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/sort.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/swap_ranges.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/tabulate.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/temporary_buffer.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/transform.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/transform_reduce.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/uninitialized_copy.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/uninitialized_fill.h" + "/usr/local/cuda/include/thrust/system/cuda/detail/util.h" + "/usr/local/cuda/include/thrust/system/cuda/error.h" + "/usr/local/cuda/include/thrust/system/cuda/execution_policy.h" + "/usr/local/cuda/include/thrust/system/detail/adl/assign_value.h" + "/usr/local/cuda/include/thrust/system/detail/adl/copy.h" + "/usr/local/cuda/include/thrust/system/detail/adl/copy_if.h" + "/usr/local/cuda/include/thrust/system/detail/adl/extrema.h" + "/usr/local/cuda/include/thrust/system/detail/adl/fill.h" + "/usr/local/cuda/include/thrust/system/detail/adl/find.h" + "/usr/local/cuda/include/thrust/system/detail/adl/for_each.h" + "/usr/local/cuda/include/thrust/system/detail/adl/generate.h" + "/usr/local/cuda/include/thrust/system/detail/adl/get_value.h" + "/usr/local/cuda/include/thrust/system/detail/adl/iter_swap.h" + "/usr/local/cuda/include/thrust/system/detail/adl/malloc_and_free.h" + "/usr/local/cuda/include/thrust/system/detail/adl/merge.h" + "/usr/local/cuda/include/thrust/system/detail/adl/reduce.h" + "/usr/local/cuda/include/thrust/system/detail/adl/reduce_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/adl/replace.h" + "/usr/local/cuda/include/thrust/system/detail/adl/reverse.h" + "/usr/local/cuda/include/thrust/system/detail/adl/scan.h" + "/usr/local/cuda/include/thrust/system/detail/adl/scan_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/adl/scatter.h" + "/usr/local/cuda/include/thrust/system/detail/adl/sequence.h" + "/usr/local/cuda/include/thrust/system/detail/adl/sort.h" + "/usr/local/cuda/include/thrust/system/detail/adl/swap_ranges.h" + "/usr/local/cuda/include/thrust/system/detail/adl/tabulate.h" + "/usr/local/cuda/include/thrust/system/detail/adl/temporary_buffer.h" + "/usr/local/cuda/include/thrust/system/detail/adl/transform.h" + "/usr/local/cuda/include/thrust/system/detail/adl/transform_reduce.h" + "/usr/local/cuda/include/thrust/system/detail/adl/uninitialized_fill.h" + "/usr/local/cuda/include/thrust/system/detail/bad_alloc.h" + "/usr/local/cuda/include/thrust/system/detail/errno.h" + "/usr/local/cuda/include/thrust/system/detail/error_category.inl" + "/usr/local/cuda/include/thrust/system/detail/error_code.inl" + "/usr/local/cuda/include/thrust/system/detail/error_condition.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/advance.h" + "/usr/local/cuda/include/thrust/system/detail/generic/advance.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/copy.h" + "/usr/local/cuda/include/thrust/system/detail/generic/copy.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/copy_if.h" + "/usr/local/cuda/include/thrust/system/detail/generic/copy_if.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/distance.h" + "/usr/local/cuda/include/thrust/system/detail/generic/distance.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/extrema.h" + "/usr/local/cuda/include/thrust/system/detail/generic/extrema.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/fill.h" + "/usr/local/cuda/include/thrust/system/detail/generic/find.h" + "/usr/local/cuda/include/thrust/system/detail/generic/find.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/for_each.h" + "/usr/local/cuda/include/thrust/system/detail/generic/generate.h" + "/usr/local/cuda/include/thrust/system/detail/generic/generate.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/memory.h" + "/usr/local/cuda/include/thrust/system/detail/generic/memory.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/merge.h" + "/usr/local/cuda/include/thrust/system/detail/generic/merge.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/reduce.h" + "/usr/local/cuda/include/thrust/system/detail/generic/reduce.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/reduce_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/generic/reduce_by_key.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/replace.h" + "/usr/local/cuda/include/thrust/system/detail/generic/replace.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/reverse.h" + "/usr/local/cuda/include/thrust/system/detail/generic/reverse.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/scan.h" + "/usr/local/cuda/include/thrust/system/detail/generic/scan.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/scan_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/generic/scan_by_key.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/scatter.h" + "/usr/local/cuda/include/thrust/system/detail/generic/scatter.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/select_system.h" + "/usr/local/cuda/include/thrust/system/detail/generic/select_system.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/select_system_exists.h" + "/usr/local/cuda/include/thrust/system/detail/generic/sequence.h" + "/usr/local/cuda/include/thrust/system/detail/generic/sequence.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/sort.h" + "/usr/local/cuda/include/thrust/system/detail/generic/sort.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/swap_ranges.h" + "/usr/local/cuda/include/thrust/system/detail/generic/swap_ranges.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/tabulate.h" + "/usr/local/cuda/include/thrust/system/detail/generic/tabulate.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/tag.h" + "/usr/local/cuda/include/thrust/system/detail/generic/temporary_buffer.h" + "/usr/local/cuda/include/thrust/system/detail/generic/temporary_buffer.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/transform.h" + "/usr/local/cuda/include/thrust/system/detail/generic/transform.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/transform_reduce.h" + "/usr/local/cuda/include/thrust/system/detail/generic/transform_reduce.inl" + "/usr/local/cuda/include/thrust/system/detail/generic/uninitialized_fill.h" + "/usr/local/cuda/include/thrust/system/detail/generic/uninitialized_fill.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/adjacent_difference.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/assign_value.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/binary_search.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/copy.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/copy.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/copy_backward.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/copy_if.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/execution_policy.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/extrema.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/fill.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/find.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/for_each.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/general_copy.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/generate.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/get_value.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/insertion_sort.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/iter_swap.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/malloc_and_free.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/merge.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/merge.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/partition.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/reduce.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/reduce_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/remove.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/replace.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/reverse.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/scan.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/scan_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/scatter.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/sequence.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/set_operations.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/sort.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/sort.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_merge_sort.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_merge_sort.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_primitive_sort.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_primitive_sort.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_radix_sort.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/stable_radix_sort.inl" + "/usr/local/cuda/include/thrust/system/detail/sequential/swap_ranges.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/tabulate.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/temporary_buffer.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/transform.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/transform_reduce.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/trivial_copy.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/uninitialized_fill.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/unique.h" + "/usr/local/cuda/include/thrust/system/detail/sequential/unique_by_key.h" + "/usr/local/cuda/include/thrust/system/detail/system_error.inl" + "/usr/local/cuda/include/thrust/system/error_code.h" + "/usr/local/cuda/include/thrust/system/system_error.h" + "/usr/local/cuda/include/thrust/system_error.h" + "/usr/local/cuda/include/thrust/tabulate.h" + "/usr/local/cuda/include/thrust/transform.h" + "/usr/local/cuda/include/thrust/transform_reduce.h" + "/usr/local/cuda/include/thrust/tuple.h" + "/usr/local/cuda/include/thrust/type_traits/integer_sequence.h" + "/usr/local/cuda/include/thrust/type_traits/is_contiguous_iterator.h" + "/usr/local/cuda/include/thrust/type_traits/is_trivially_relocatable.h" + "/usr/local/cuda/include/thrust/type_traits/logical_metafunctions.h" + "/usr/local/cuda/include/thrust/type_traits/remove_cvref.h" + "/usr/local/cuda/include/thrust/type_traits/void_t.h" + "/usr/local/cuda/include/thrust/uninitialized_fill.h" + "/usr/local/cuda/include/thrust/version.h" + "/usr/local/cuda/include/vector_functions.h" + "/usr/local/cuda/include/vector_functions.hpp" + "/usr/local/cuda/include/vector_types.h" + "/usr/local/envs/word/include/python3.8/Python.h" + "/usr/local/envs/word/include/python3.8/abstract.h" + "/usr/local/envs/word/include/python3.8/bltinmodule.h" + "/usr/local/envs/word/include/python3.8/boolobject.h" + "/usr/local/envs/word/include/python3.8/bytearrayobject.h" + "/usr/local/envs/word/include/python3.8/bytesobject.h" + "/usr/local/envs/word/include/python3.8/cellobject.h" + "/usr/local/envs/word/include/python3.8/ceval.h" + "/usr/local/envs/word/include/python3.8/classobject.h" + "/usr/local/envs/word/include/python3.8/code.h" + "/usr/local/envs/word/include/python3.8/codecs.h" + "/usr/local/envs/word/include/python3.8/compile.h" + "/usr/local/envs/word/include/python3.8/complexobject.h" + "/usr/local/envs/word/include/python3.8/context.h" + "/usr/local/envs/word/include/python3.8/cpython/abstract.h" + "/usr/local/envs/word/include/python3.8/cpython/dictobject.h" + "/usr/local/envs/word/include/python3.8/cpython/fileobject.h" + "/usr/local/envs/word/include/python3.8/cpython/initconfig.h" + "/usr/local/envs/word/include/python3.8/cpython/object.h" + "/usr/local/envs/word/include/python3.8/cpython/objimpl.h" + "/usr/local/envs/word/include/python3.8/cpython/pyerrors.h" + "/usr/local/envs/word/include/python3.8/cpython/pylifecycle.h" + "/usr/local/envs/word/include/python3.8/cpython/pymem.h" + "/usr/local/envs/word/include/python3.8/cpython/pystate.h" + "/usr/local/envs/word/include/python3.8/cpython/sysmodule.h" + "/usr/local/envs/word/include/python3.8/cpython/traceback.h" + "/usr/local/envs/word/include/python3.8/cpython/tupleobject.h" + "/usr/local/envs/word/include/python3.8/cpython/unicodeobject.h" + "/usr/local/envs/word/include/python3.8/descrobject.h" + "/usr/local/envs/word/include/python3.8/dictobject.h" + "/usr/local/envs/word/include/python3.8/dtoa.h" + "/usr/local/envs/word/include/python3.8/enumobject.h" + "/usr/local/envs/word/include/python3.8/eval.h" + "/usr/local/envs/word/include/python3.8/fileobject.h" + "/usr/local/envs/word/include/python3.8/fileutils.h" + "/usr/local/envs/word/include/python3.8/floatobject.h" + "/usr/local/envs/word/include/python3.8/frameobject.h" + "/usr/local/envs/word/include/python3.8/funcobject.h" + "/usr/local/envs/word/include/python3.8/genobject.h" + "/usr/local/envs/word/include/python3.8/import.h" + "/usr/local/envs/word/include/python3.8/intrcheck.h" + "/usr/local/envs/word/include/python3.8/iterobject.h" + "/usr/local/envs/word/include/python3.8/listobject.h" + "/usr/local/envs/word/include/python3.8/longintrepr.h" + "/usr/local/envs/word/include/python3.8/longobject.h" + "/usr/local/envs/word/include/python3.8/memoryobject.h" + "/usr/local/envs/word/include/python3.8/methodobject.h" + "/usr/local/envs/word/include/python3.8/modsupport.h" + "/usr/local/envs/word/include/python3.8/moduleobject.h" + "/usr/local/envs/word/include/python3.8/namespaceobject.h" + "/usr/local/envs/word/include/python3.8/object.h" + "/usr/local/envs/word/include/python3.8/objimpl.h" + "/usr/local/envs/word/include/python3.8/odictobject.h" + "/usr/local/envs/word/include/python3.8/osmodule.h" + "/usr/local/envs/word/include/python3.8/patchlevel.h" + "/usr/local/envs/word/include/python3.8/picklebufobject.h" + "/usr/local/envs/word/include/python3.8/pyarena.h" + "/usr/local/envs/word/include/python3.8/pycapsule.h" + "/usr/local/envs/word/include/python3.8/pyconfig.h" + "/usr/local/envs/word/include/python3.8/pyctype.h" + "/usr/local/envs/word/include/python3.8/pydebug.h" + "/usr/local/envs/word/include/python3.8/pyerrors.h" + "/usr/local/envs/word/include/python3.8/pyfpe.h" + "/usr/local/envs/word/include/python3.8/pyhash.h" + "/usr/local/envs/word/include/python3.8/pylifecycle.h" + "/usr/local/envs/word/include/python3.8/pymacconfig.h" + "/usr/local/envs/word/include/python3.8/pymacro.h" + "/usr/local/envs/word/include/python3.8/pymath.h" + "/usr/local/envs/word/include/python3.8/pymem.h" + "/usr/local/envs/word/include/python3.8/pyport.h" + "/usr/local/envs/word/include/python3.8/pystate.h" + "/usr/local/envs/word/include/python3.8/pystrcmp.h" + "/usr/local/envs/word/include/python3.8/pystrtod.h" + "/usr/local/envs/word/include/python3.8/pythonrun.h" + "/usr/local/envs/word/include/python3.8/pythread.h" + "/usr/local/envs/word/include/python3.8/pytime.h" + "/usr/local/envs/word/include/python3.8/rangeobject.h" + "/usr/local/envs/word/include/python3.8/setobject.h" + "/usr/local/envs/word/include/python3.8/sliceobject.h" + "/usr/local/envs/word/include/python3.8/structseq.h" + "/usr/local/envs/word/include/python3.8/sysmodule.h" + "/usr/local/envs/word/include/python3.8/traceback.h" + "/usr/local/envs/word/include/python3.8/tracemalloc.h" + "/usr/local/envs/word/include/python3.8/tupleobject.h" + "/usr/local/envs/word/include/python3.8/typeslots.h" + "/usr/local/envs/word/include/python3.8/unicodeobject.h" + "/usr/local/envs/word/include/python3.8/warnings.h" + "/usr/local/envs/word/include/python3.8/weakrefobject.h" +) + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..39bb8d392ceafc30857efa09d242bfb4b7624d75 Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.Release.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.Release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..3a020a372b868c208e0d5bbb7f694684f5dbd670 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.Release.cmake @@ -0,0 +1,314 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "/usr/local/envs/word/bin/cmake") # path +set(source_file "/content/Word-As-Image/diffvg/scene.cpp") # path +set(NVCC_generated_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.NVCC-depend") # path +set(cmake_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.depend") # path +set(CUDA_make2cmake "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/make2cmake.cmake") # path +set(CUDA_parse_cubin "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/parse_cubin.cmake") # path +set(build_cubin OFF) # bool +set(CUDA_HOST_COMPILER "/usr/bin/cc") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//.") # path +set(generated_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o") # path +set(generated_cubin_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o.cubin.txt") # path + +set(CUDA_NVCC_EXECUTABLE "/usr/local/cuda/bin/nvcc") # path +set(CUDA_NVCC_FLAGS -std=c++11 ;; ) # list +# Build specific configuration flags +set(CUDA_NVCC_FLAGS_RELEASE ; ) +set(CUDA_NVCC_FLAGS_DEBUG ; ) +set(CUDA_NVCC_FLAGS_MINSIZEREL ; ) +set(CUDA_NVCC_FLAGS_RELWITHDEBINFO ; ) +set(nvcc_flags -m64;-Ddiffvg_EXPORTS) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[/usr/local/cuda/include;/usr/local/envs/word/include/python3.8;/usr/local/include/python3.10;/usr/local/include/python3.10;/content/Word-As-Image/diffvg/pybind11/include;/usr/local/cuda/include]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[COMPILE_WITH_CUDA]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "-c") # string +set(cuda_language_flag -x=cu) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +set(CMAKE_HOST_FLAGS -DVERSION_INFO=\"0.0.1\" -fPIC) +set(CMAKE_HOST_FLAGS_RELEASE -O3 -DNDEBUG) +set(CMAKE_HOST_FLAGS_DEBUG -g) +set(CMAKE_HOST_FLAGS_MINSIZEREL -Os -DNDEBUG) +set(CMAKE_HOST_FLAGS_RELWITHDEBINFO -O2 -g -DNDEBUG) + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, escape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION 12.2) +if(CUDA_VERSION VERSION_LESS "3.0") + # Note that this will remove all occurrences of -G. + list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G") +endif() + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.cmake.pre-gen b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.cmake.pre-gen new file mode 100644 index 0000000000000000000000000000000000000000..57ad740c31b899e7ab3aecf64d34f6ef7c695e8f --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.cmake.pre-gen @@ -0,0 +1,314 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "/usr/local/envs/word/bin/cmake") # path +set(source_file "/content/Word-As-Image/diffvg/scene.cpp") # path +set(NVCC_generated_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.NVCC-depend") # path +set(cmake_dependency_file "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//diffvg_generated_scene.cpp.o.depend") # path +set(CUDA_make2cmake "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/make2cmake.cmake") # path +set(CUDA_parse_cubin "/usr/local/envs/word/share/cmake-3.26/Modules/FindCUDA/parse_cubin.cmake") # path +set(build_cubin OFF) # bool +set(CUDA_HOST_COMPILER "/usr/bin/cc") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//.") # path +set(generated_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o") # path +set(generated_cubin_file_internal "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir//./diffvg_generated_scene.cpp.o.cubin.txt") # path + +set(CUDA_NVCC_EXECUTABLE "/usr/local/cuda/bin/nvcc") # path +set(CUDA_NVCC_FLAGS -std=c++11 ;; ) # list +# Build specific configuration flags +set(CUDA_NVCC_FLAGS_RELEASE ; ) +set(CUDA_NVCC_FLAGS_DEBUG ; ) +set(CUDA_NVCC_FLAGS_MINSIZEREL ; ) +set(CUDA_NVCC_FLAGS_RELWITHDEBINFO ; ) +set(nvcc_flags -m64;-Ddiffvg_EXPORTS) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[/usr/local/cuda/include;$]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[$]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "-c") # string +set(cuda_language_flag -x=cu) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +set(CMAKE_HOST_FLAGS -DVERSION_INFO=\"0.0.1\" -fPIC) +set(CMAKE_HOST_FLAGS_RELEASE -O3 -DNDEBUG) +set(CMAKE_HOST_FLAGS_DEBUG -g) +set(CMAKE_HOST_FLAGS_MINSIZEREL -Os -DNDEBUG) +set(CMAKE_HOST_FLAGS_RELWITHDEBINFO -O2 -g -DNDEBUG) + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, escape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION 12.2) +if(CUDA_VERSION VERSION_LESS "3.0") + # Note that this will remove all occurrences of -G. + list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G") +endif() + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E rm -f "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.depend b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.depend new file mode 100644 index 0000000000000000000000000000000000000000..0ddd3530e9e70e1d6245fe95fc9efa688d9f3b3d --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o.depend @@ -0,0 +1,292 @@ +# Generated by: make2cmake.cmake +SET(CUDA_NVCC_DEPEND + "/content/Word-As-Image/diffvg/aabb.h" + "/content/Word-As-Image/diffvg/atomic.h" + "/content/Word-As-Image/diffvg/color.h" + "/content/Word-As-Image/diffvg/cuda_utils.h" + "/content/Word-As-Image/diffvg/diffvg.h" + "/content/Word-As-Image/diffvg/filter.h" + "/content/Word-As-Image/diffvg/matrix.h" + "/content/Word-As-Image/diffvg/ptr.h" + "/content/Word-As-Image/diffvg/scene.cpp" + "/content/Word-As-Image/diffvg/scene.h" + "/content/Word-As-Image/diffvg/shape.h" + "/content/Word-As-Image/diffvg/vector.h" + "/usr/include/alloca.h" + "/usr/include/asm-generic/errno-base.h" + "/usr/include/asm-generic/errno.h" + "/usr/include/assert.h" + "/usr/include/c++/11/algorithm" + "/usr/include/c++/11/atomic" + "/usr/include/c++/11/backward/binders.h" + "/usr/include/c++/11/bits/algorithmfwd.h" + "/usr/include/c++/11/bits/alloc_traits.h" + "/usr/include/c++/11/bits/allocator.h" + "/usr/include/c++/11/bits/atomic_base.h" + "/usr/include/c++/11/bits/atomic_lockfree_defines.h" + "/usr/include/c++/11/bits/basic_ios.h" + "/usr/include/c++/11/bits/basic_ios.tcc" + "/usr/include/c++/11/bits/basic_string.h" + "/usr/include/c++/11/bits/basic_string.tcc" + "/usr/include/c++/11/bits/char_traits.h" + "/usr/include/c++/11/bits/charconv.h" + "/usr/include/c++/11/bits/concept_check.h" + "/usr/include/c++/11/bits/cpp_type_traits.h" + "/usr/include/c++/11/bits/cxxabi_forced.h" + "/usr/include/c++/11/bits/cxxabi_init_exception.h" + "/usr/include/c++/11/bits/exception.h" + "/usr/include/c++/11/bits/exception_defines.h" + "/usr/include/c++/11/bits/exception_ptr.h" + "/usr/include/c++/11/bits/functexcept.h" + "/usr/include/c++/11/bits/functional_hash.h" + "/usr/include/c++/11/bits/hash_bytes.h" + "/usr/include/c++/11/bits/ios_base.h" + "/usr/include/c++/11/bits/istream.tcc" + "/usr/include/c++/11/bits/locale_classes.h" + "/usr/include/c++/11/bits/locale_classes.tcc" + "/usr/include/c++/11/bits/locale_facets.h" + "/usr/include/c++/11/bits/locale_facets.tcc" + "/usr/include/c++/11/bits/localefwd.h" + "/usr/include/c++/11/bits/memoryfwd.h" + "/usr/include/c++/11/bits/move.h" + "/usr/include/c++/11/bits/nested_exception.h" + "/usr/include/c++/11/bits/ostream.tcc" + "/usr/include/c++/11/bits/ostream_insert.h" + "/usr/include/c++/11/bits/parse_numbers.h" + "/usr/include/c++/11/bits/postypes.h" + "/usr/include/c++/11/bits/predefined_ops.h" + "/usr/include/c++/11/bits/ptr_traits.h" + "/usr/include/c++/11/bits/range_access.h" + "/usr/include/c++/11/bits/std_abs.h" + "/usr/include/c++/11/bits/stl_algo.h" + "/usr/include/c++/11/bits/stl_algobase.h" + "/usr/include/c++/11/bits/stl_bvector.h" + "/usr/include/c++/11/bits/stl_construct.h" + "/usr/include/c++/11/bits/stl_function.h" + "/usr/include/c++/11/bits/stl_heap.h" + "/usr/include/c++/11/bits/stl_iterator.h" + "/usr/include/c++/11/bits/stl_iterator_base_funcs.h" + "/usr/include/c++/11/bits/stl_iterator_base_types.h" + "/usr/include/c++/11/bits/stl_numeric.h" + "/usr/include/c++/11/bits/stl_pair.h" + "/usr/include/c++/11/bits/stl_relops.h" + "/usr/include/c++/11/bits/stl_tempbuf.h" + "/usr/include/c++/11/bits/stl_uninitialized.h" + "/usr/include/c++/11/bits/stl_vector.h" + "/usr/include/c++/11/bits/streambuf.tcc" + "/usr/include/c++/11/bits/streambuf_iterator.h" + "/usr/include/c++/11/bits/stringfwd.h" + "/usr/include/c++/11/bits/uniform_int_dist.h" + "/usr/include/c++/11/bits/vector.tcc" + "/usr/include/c++/11/cassert" + "/usr/include/c++/11/cctype" + "/usr/include/c++/11/cerrno" + "/usr/include/c++/11/chrono" + "/usr/include/c++/11/clocale" + "/usr/include/c++/11/cmath" + "/usr/include/c++/11/cstddef" + "/usr/include/c++/11/cstdint" + "/usr/include/c++/11/cstdio" + "/usr/include/c++/11/cstdlib" + "/usr/include/c++/11/cstring" + "/usr/include/c++/11/ctime" + "/usr/include/c++/11/cwchar" + "/usr/include/c++/11/cwctype" + "/usr/include/c++/11/debug/assertions.h" + "/usr/include/c++/11/debug/debug.h" + "/usr/include/c++/11/exception" + "/usr/include/c++/11/ext/alloc_traits.h" + "/usr/include/c++/11/ext/atomicity.h" + "/usr/include/c++/11/ext/new_allocator.h" + "/usr/include/c++/11/ext/numeric_traits.h" + "/usr/include/c++/11/ext/string_conversions.h" + "/usr/include/c++/11/ext/type_traits.h" + "/usr/include/c++/11/initializer_list" + "/usr/include/c++/11/ios" + "/usr/include/c++/11/iosfwd" + "/usr/include/c++/11/iostream" + "/usr/include/c++/11/istream" + "/usr/include/c++/11/limits" + "/usr/include/c++/11/math.h" + "/usr/include/c++/11/new" + "/usr/include/c++/11/numeric" + "/usr/include/c++/11/ostream" + "/usr/include/c++/11/ratio" + "/usr/include/c++/11/stdexcept" + "/usr/include/c++/11/stdlib.h" + "/usr/include/c++/11/streambuf" + "/usr/include/c++/11/string" + "/usr/include/c++/11/system_error" + "/usr/include/c++/11/type_traits" + "/usr/include/c++/11/typeinfo" + "/usr/include/c++/11/utility" + "/usr/include/c++/11/vector" + "/usr/include/ctype.h" + "/usr/include/endian.h" + "/usr/include/errno.h" + "/usr/include/features-time64.h" + "/usr/include/features.h" + "/usr/include/limits.h" + "/usr/include/linux/errno.h" + "/usr/include/linux/limits.h" + "/usr/include/locale.h" + "/usr/include/math.h" + "/usr/include/pthread.h" + "/usr/include/sched.h" + "/usr/include/stdc-predef.h" + "/usr/include/stdint.h" + "/usr/include/stdio.h" + "/usr/include/stdlib.h" + "/usr/include/string.h" + "/usr/include/strings.h" + "/usr/include/time.h" + "/usr/include/wchar.h" + "/usr/include/wctype.h" + "/usr/include/x86_64-linux-gnu/asm/errno.h" + "/usr/include/x86_64-linux-gnu/bits/atomic_wide_counter.h" + "/usr/include/x86_64-linux-gnu/bits/byteswap.h" + "/usr/include/x86_64-linux-gnu/bits/cpu-set.h" + "/usr/include/x86_64-linux-gnu/bits/endian.h" + "/usr/include/x86_64-linux-gnu/bits/endianness.h" + "/usr/include/x86_64-linux-gnu/bits/errno.h" + "/usr/include/x86_64-linux-gnu/bits/floatn-common.h" + "/usr/include/x86_64-linux-gnu/bits/floatn.h" + "/usr/include/x86_64-linux-gnu/bits/flt-eval-method.h" + "/usr/include/x86_64-linux-gnu/bits/fp-fast.h" + "/usr/include/x86_64-linux-gnu/bits/fp-logb.h" + "/usr/include/x86_64-linux-gnu/bits/iscanonical.h" + "/usr/include/x86_64-linux-gnu/bits/libc-header-start.h" + "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" + "/usr/include/x86_64-linux-gnu/bits/local_lim.h" + "/usr/include/x86_64-linux-gnu/bits/locale.h" + "/usr/include/x86_64-linux-gnu/bits/long-double.h" + "/usr/include/x86_64-linux-gnu/bits/math-vector.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls-narrow.h" + "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" + "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" + "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" + "/usr/include/x86_64-linux-gnu/bits/pthread_stack_min-dynamic.h" + "/usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h" + "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" + "/usr/include/x86_64-linux-gnu/bits/sched.h" + "/usr/include/x86_64-linux-gnu/bits/select.h" + "/usr/include/x86_64-linux-gnu/bits/select2.h" + "/usr/include/x86_64-linux-gnu/bits/setjmp.h" + "/usr/include/x86_64-linux-gnu/bits/stdint-intn.h" + "/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h" + "/usr/include/x86_64-linux-gnu/bits/stdio.h" + "/usr/include/x86_64-linux-gnu/bits/stdio2.h" + "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" + "/usr/include/x86_64-linux-gnu/bits/stdlib.h" + "/usr/include/x86_64-linux-gnu/bits/string_fortified.h" + "/usr/include/x86_64-linux-gnu/bits/strings_fortified.h" + "/usr/include/x86_64-linux-gnu/bits/struct_mutex.h" + "/usr/include/x86_64-linux-gnu/bits/struct_rwlock.h" + "/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h" + "/usr/include/x86_64-linux-gnu/bits/time.h" + "/usr/include/x86_64-linux-gnu/bits/time64.h" + "/usr/include/x86_64-linux-gnu/bits/timesize.h" + "/usr/include/x86_64-linux-gnu/bits/timex.h" + "/usr/include/x86_64-linux-gnu/bits/types.h" + "/usr/include/x86_64-linux-gnu/bits/types/FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/__FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__locale_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/clock_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/clockid_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/error_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/locale_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/mbstate_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/sigset_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct___jmp_buf_tag.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_sched_param.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h" + "/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h" + "/usr/include/x86_64-linux-gnu/bits/types/time_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/timer_t.h" + "/usr/include/x86_64-linux-gnu/bits/types/wint_t.h" + "/usr/include/x86_64-linux-gnu/bits/typesizes.h" + "/usr/include/x86_64-linux-gnu/bits/uintn-identity.h" + "/usr/include/x86_64-linux-gnu/bits/uio_lim.h" + "/usr/include/x86_64-linux-gnu/bits/waitflags.h" + "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" + "/usr/include/x86_64-linux-gnu/bits/wchar.h" + "/usr/include/x86_64-linux-gnu/bits/wchar2.h" + "/usr/include/x86_64-linux-gnu/bits/wctype-wchar.h" + "/usr/include/x86_64-linux-gnu/bits/wordsize.h" + "/usr/include/x86_64-linux-gnu/bits/xopen_lim.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/atomic_word.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++allocator.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++config.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/c++locale.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/cpu_defines.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/ctype_base.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/ctype_inline.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/error_constants.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/gthr-default.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/gthr.h" + "/usr/include/x86_64-linux-gnu/c++/11/bits/os_defines.h" + "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" + "/usr/include/x86_64-linux-gnu/gnu/stubs.h" + "/usr/include/x86_64-linux-gnu/sys/cdefs.h" + "/usr/include/x86_64-linux-gnu/sys/select.h" + "/usr/include/x86_64-linux-gnu/sys/single_threaded.h" + "/usr/include/x86_64-linux-gnu/sys/types.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/limits.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stdarg.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/stdint.h" + "/usr/lib/gcc/x86_64-linux-gnu/11/include/syslimits.h" + "/usr/local/cuda/include/builtin_types.h" + "/usr/local/cuda/include/channel_descriptor.h" + "/usr/local/cuda/include/crt/common_functions.h" + "/usr/local/cuda/include/crt/cudacc_ext.h" + "/usr/local/cuda/include/crt/device_double_functions.h" + "/usr/local/cuda/include/crt/device_double_functions.hpp" + "/usr/local/cuda/include/crt/device_functions.h" + "/usr/local/cuda/include/crt/device_functions.hpp" + "/usr/local/cuda/include/crt/host_config.h" + "/usr/local/cuda/include/crt/host_defines.h" + "/usr/local/cuda/include/crt/math_functions.h" + "/usr/local/cuda/include/crt/math_functions.hpp" + "/usr/local/cuda/include/crt/sm_70_rt.h" + "/usr/local/cuda/include/crt/sm_80_rt.h" + "/usr/local/cuda/include/crt/sm_90_rt.h" + "/usr/local/cuda/include/cuda.h" + "/usr/local/cuda/include/cuda_device_runtime_api.h" + "/usr/local/cuda/include/cuda_runtime.h" + "/usr/local/cuda/include/cuda_runtime_api.h" + "/usr/local/cuda/include/device_atomic_functions.h" + "/usr/local/cuda/include/device_launch_parameters.h" + "/usr/local/cuda/include/device_types.h" + "/usr/local/cuda/include/driver_functions.h" + "/usr/local/cuda/include/driver_types.h" + "/usr/local/cuda/include/library_types.h" + "/usr/local/cuda/include/sm_20_atomic_functions.h" + "/usr/local/cuda/include/sm_20_intrinsics.h" + "/usr/local/cuda/include/sm_30_intrinsics.h" + "/usr/local/cuda/include/sm_32_atomic_functions.h" + "/usr/local/cuda/include/sm_32_intrinsics.h" + "/usr/local/cuda/include/sm_35_atomic_functions.h" + "/usr/local/cuda/include/sm_35_intrinsics.h" + "/usr/local/cuda/include/sm_60_atomic_functions.h" + "/usr/local/cuda/include/sm_61_intrinsics.h" + "/usr/local/cuda/include/surface_indirect_functions.h" + "/usr/local/cuda/include/surface_types.h" + "/usr/local/cuda/include/texture_indirect_functions.h" + "/usr/local/cuda/include/texture_types.h" + "/usr/local/cuda/include/vector_functions.h" + "/usr/local/cuda/include/vector_functions.hpp" + "/usr/local/cuda/include/vector_types.h" +) + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/flags.make b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/flags.make new file mode 100644 index 0000000000000000000000000000000000000000..f5103ae9e26e287267d646223d52de135b13b971 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/flags.make @@ -0,0 +1,10 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# compile CXX with /usr/bin/c++ +CXX_DEFINES = -DCOMPILE_WITH_CUDA -Ddiffvg_EXPORTS + +CXX_INCLUDES = -I/usr/local/envs/word/include/python3.8 -I/usr/local/include/python3.10 -I/content/Word-As-Image/diffvg/pybind11/include -I/usr/local/cuda/include + +CXX_FLAGS = -DVERSION_INFO=\"0.0.1\" -O3 -std=gnu++11 -fPIC -Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/link.txt b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/link.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d9e68dc8bb9c70e87f1ac84f61e11564f2400d2 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/link.txt @@ -0,0 +1 @@ +/usr/bin/c++ -fPIC -DVERSION_INFO=\"0.0.1\" -O3 -shared -o /content/Word-As-Image/diffvg/build/lib.linux-x86_64-cpython-38/diffvg.so CMakeFiles/diffvg.dir/atomic.cpp.o CMakeFiles/diffvg.dir/color.cpp.o CMakeFiles/diffvg.dir/parallel.cpp.o CMakeFiles/diffvg.dir/shape.cpp.o CMakeFiles/diffvg.dir/diffvg_generated_diffvg.cpp.o CMakeFiles/diffvg.dir/diffvg_generated_scene.cpp.o -L/usr/local/cuda/lib64/libcudart_static.a -L/content/Word-As-Image/diffvg/Threads::Threads -L/content/Word-As-Image/diffvg/dl -L/usr/lib/x86_64-linux-gnu/librt.a -Wl,-rpath,"\$ORIGIN" /usr/local/cuda/lib64/libcudart_static.a -ldl /usr/lib/x86_64-linux-gnu/librt.a diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..4b4ff44d05dd3311ff6f6e47e255ccaa4251b5cc Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o.d b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o.d new file mode 100644 index 0000000000000000000000000000000000000000..7755d326a8f73ea055d672fcd09e6639e11abd45 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/parallel.cpp.o.d @@ -0,0 +1,195 @@ +CMakeFiles/diffvg.dir/parallel.cpp.o: \ + /content/Word-As-Image/diffvg/parallel.cpp /usr/include/stdc-predef.h \ + /content/Word-As-Image/diffvg/parallel.h \ + /content/Word-As-Image/diffvg/vector.h \ + /content/Word-As-Image/diffvg/diffvg.h /usr/include/c++/11/cmath \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++config.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/os_defines.h \ + /usr/include/features.h /usr/include/features-time64.h \ + /usr/include/x86_64-linux-gnu/bits/wordsize.h \ + /usr/include/x86_64-linux-gnu/bits/timesize.h \ + /usr/include/x86_64-linux-gnu/sys/cdefs.h \ + /usr/include/x86_64-linux-gnu/bits/long-double.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/cpu_defines.h \ + /usr/include/c++/11/bits/cpp_type_traits.h \ + /usr/include/c++/11/ext/type_traits.h /usr/include/math.h \ + /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \ + /usr/include/x86_64-linux-gnu/bits/types.h \ + /usr/include/x86_64-linux-gnu/bits/typesizes.h \ + /usr/include/x86_64-linux-gnu/bits/time64.h \ + /usr/include/x86_64-linux-gnu/bits/math-vector.h \ + /usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h \ + /usr/include/x86_64-linux-gnu/bits/floatn.h \ + /usr/include/x86_64-linux-gnu/bits/floatn-common.h \ + /usr/include/x86_64-linux-gnu/bits/flt-eval-method.h \ + /usr/include/x86_64-linux-gnu/bits/fp-logb.h \ + /usr/include/x86_64-linux-gnu/bits/fp-fast.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-narrow.h \ + /usr/include/x86_64-linux-gnu/bits/iscanonical.h \ + /usr/include/c++/11/bits/std_abs.h /usr/include/stdlib.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h \ + /usr/include/x86_64-linux-gnu/bits/waitflags.h \ + /usr/include/x86_64-linux-gnu/bits/waitstatus.h \ + /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \ + /usr/include/x86_64-linux-gnu/sys/types.h \ + /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/time_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-intn.h /usr/include/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endianness.h \ + /usr/include/x86_64-linux-gnu/bits/byteswap.h \ + /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \ + /usr/include/x86_64-linux-gnu/sys/select.h \ + /usr/include/x86_64-linux-gnu/bits/select.h \ + /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \ + /usr/include/x86_64-linux-gnu/bits/select2.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \ + /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \ + /usr/include/x86_64-linux-gnu/bits/atomic_wide_counter.h \ + /usr/include/x86_64-linux-gnu/bits/struct_mutex.h \ + /usr/include/x86_64-linux-gnu/bits/struct_rwlock.h /usr/include/alloca.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib.h /usr/include/c++/11/cstdint \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdint.h /usr/include/stdint.h \ + /usr/include/x86_64-linux-gnu/bits/wchar.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \ + /usr/include/c++/11/atomic /usr/include/c++/11/bits/atomic_base.h \ + /usr/include/c++/11/bits/atomic_lockfree_defines.h \ + /usr/include/c++/11/bits/move.h /usr/include/c++/11/type_traits \ + /usr/include/c++/11/iostream /usr/include/c++/11/ostream \ + /usr/include/c++/11/ios /usr/include/c++/11/iosfwd \ + /usr/include/c++/11/bits/stringfwd.h \ + /usr/include/c++/11/bits/memoryfwd.h /usr/include/c++/11/bits/postypes.h \ + /usr/include/c++/11/cwchar /usr/include/wchar.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdarg.h \ + /usr/include/x86_64-linux-gnu/bits/types/wint_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/FILE.h \ + /usr/include/x86_64-linux-gnu/bits/wchar2.h \ + /usr/include/c++/11/exception /usr/include/c++/11/bits/exception.h \ + /usr/include/c++/11/bits/exception_ptr.h \ + /usr/include/c++/11/bits/exception_defines.h \ + /usr/include/c++/11/bits/cxxabi_init_exception.h \ + /usr/include/c++/11/typeinfo /usr/include/c++/11/bits/hash_bytes.h \ + /usr/include/c++/11/new /usr/include/c++/11/bits/nested_exception.h \ + /usr/include/c++/11/bits/char_traits.h \ + /usr/include/c++/11/bits/stl_algobase.h \ + /usr/include/c++/11/bits/functexcept.h \ + /usr/include/c++/11/ext/numeric_traits.h \ + /usr/include/c++/11/bits/stl_pair.h \ + /usr/include/c++/11/bits/stl_iterator_base_types.h \ + /usr/include/c++/11/bits/stl_iterator_base_funcs.h \ + /usr/include/c++/11/bits/concept_check.h \ + /usr/include/c++/11/debug/assertions.h \ + /usr/include/c++/11/bits/stl_iterator.h \ + /usr/include/c++/11/bits/ptr_traits.h /usr/include/c++/11/debug/debug.h \ + /usr/include/c++/11/bits/predefined_ops.h \ + /usr/include/c++/11/bits/localefwd.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++locale.h \ + /usr/include/c++/11/clocale /usr/include/locale.h \ + /usr/include/x86_64-linux-gnu/bits/locale.h /usr/include/c++/11/cctype \ + /usr/include/ctype.h /usr/include/c++/11/bits/ios_base.h \ + /usr/include/c++/11/ext/atomicity.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr-default.h \ + /usr/include/pthread.h /usr/include/sched.h \ + /usr/include/x86_64-linux-gnu/bits/sched.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_sched_param.h \ + /usr/include/x86_64-linux-gnu/bits/cpu-set.h /usr/include/time.h \ + /usr/include/x86_64-linux-gnu/bits/time.h \ + /usr/include/x86_64-linux-gnu/bits/timex.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \ + /usr/include/x86_64-linux-gnu/bits/setjmp.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct___jmp_buf_tag.h \ + /usr/include/x86_64-linux-gnu/bits/pthread_stack_min-dynamic.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/atomic_word.h \ + /usr/include/x86_64-linux-gnu/sys/single_threaded.h \ + /usr/include/c++/11/bits/locale_classes.h /usr/include/c++/11/string \ + /usr/include/c++/11/bits/allocator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++allocator.h \ + /usr/include/c++/11/ext/new_allocator.h \ + /usr/include/c++/11/bits/ostream_insert.h \ + /usr/include/c++/11/bits/cxxabi_forced.h \ + /usr/include/c++/11/bits/stl_function.h \ + /usr/include/c++/11/backward/binders.h \ + /usr/include/c++/11/bits/range_access.h \ + /usr/include/c++/11/initializer_list \ + /usr/include/c++/11/bits/basic_string.h \ + /usr/include/c++/11/ext/alloc_traits.h \ + /usr/include/c++/11/bits/alloc_traits.h \ + /usr/include/c++/11/bits/stl_construct.h \ + /usr/include/c++/11/ext/string_conversions.h /usr/include/c++/11/cstdlib \ + /usr/include/c++/11/cstdio /usr/include/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \ + /usr/include/x86_64-linux-gnu/bits/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/stdio2.h /usr/include/c++/11/cerrno \ + /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \ + /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \ + /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \ + /usr/include/x86_64-linux-gnu/bits/types/error_t.h \ + /usr/include/c++/11/bits/charconv.h \ + /usr/include/c++/11/bits/functional_hash.h \ + /usr/include/c++/11/bits/basic_string.tcc \ + /usr/include/c++/11/bits/locale_classes.tcc \ + /usr/include/c++/11/system_error \ + /usr/include/x86_64-linux-gnu/c++/11/bits/error_constants.h \ + /usr/include/c++/11/stdexcept /usr/include/c++/11/streambuf \ + /usr/include/c++/11/bits/streambuf.tcc \ + /usr/include/c++/11/bits/basic_ios.h \ + /usr/include/c++/11/bits/locale_facets.h /usr/include/c++/11/cwctype \ + /usr/include/wctype.h /usr/include/x86_64-linux-gnu/bits/wctype-wchar.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_base.h \ + /usr/include/c++/11/bits/streambuf_iterator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_inline.h \ + /usr/include/c++/11/bits/locale_facets.tcc \ + /usr/include/c++/11/bits/basic_ios.tcc \ + /usr/include/c++/11/bits/ostream.tcc /usr/include/c++/11/istream \ + /usr/include/c++/11/bits/istream.tcc /usr/include/c++/11/mutex \ + /usr/include/c++/11/tuple /usr/include/c++/11/utility \ + /usr/include/c++/11/bits/stl_relops.h /usr/include/c++/11/array \ + /usr/include/c++/11/bits/uses_allocator.h \ + /usr/include/c++/11/bits/invoke.h /usr/include/c++/11/chrono \ + /usr/include/c++/11/ratio /usr/include/c++/11/limits \ + /usr/include/c++/11/ctime /usr/include/c++/11/bits/parse_numbers.h \ + /usr/include/c++/11/bits/std_mutex.h \ + /usr/include/c++/11/bits/unique_lock.h \ + /usr/include/c++/11/condition_variable \ + /usr/include/c++/11/bits/shared_ptr.h \ + /usr/include/c++/11/bits/shared_ptr_base.h \ + /usr/include/c++/11/bits/allocated_ptr.h \ + /usr/include/c++/11/bits/refwrap.h /usr/include/c++/11/bits/unique_ptr.h \ + /usr/include/c++/11/ext/aligned_buffer.h \ + /usr/include/c++/11/ext/concurrence.h /usr/include/c++/11/functional \ + /usr/include/c++/11/bits/std_function.h /usr/include/c++/11/cassert \ + /usr/include/assert.h /usr/include/c++/11/algorithm \ + /usr/include/c++/11/bits/stl_algo.h \ + /usr/include/c++/11/bits/algorithmfwd.h \ + /usr/include/c++/11/bits/stl_heap.h \ + /usr/include/c++/11/bits/stl_tempbuf.h \ + /usr/include/c++/11/bits/uniform_int_dist.h /usr/include/c++/11/list \ + /usr/include/c++/11/bits/stl_list.h /usr/include/c++/11/bits/list.tcc \ + /usr/include/c++/11/thread /usr/include/c++/11/bits/std_thread.h \ + /usr/include/c++/11/bits/this_thread_sleep.h /usr/include/c++/11/vector \ + /usr/include/c++/11/bits/stl_uninitialized.h \ + /usr/include/c++/11/bits/stl_vector.h \ + /usr/include/c++/11/bits/stl_bvector.h \ + /usr/include/c++/11/bits/vector.tcc diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/progress.make b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/progress.make new file mode 100644 index 0000000000000000000000000000000000000000..2f823159ca7c26f106b77946180752712959afe6 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/progress.make @@ -0,0 +1,8 @@ +CMAKE_PROGRESS_1 = 1 +CMAKE_PROGRESS_2 = 2 +CMAKE_PROGRESS_3 = 3 +CMAKE_PROGRESS_4 = 4 +CMAKE_PROGRESS_5 = 5 +CMAKE_PROGRESS_6 = 6 +CMAKE_PROGRESS_7 = 7 + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..4ec8baf5389e3c04d0aa0625d357033bd80e0c69 Binary files /dev/null and b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o differ diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o.d b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o.d new file mode 100644 index 0000000000000000000000000000000000000000..d70bd4c64718caa3539474d5353d613fbb471dfa --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/diffvg.dir/shape.cpp.o.d @@ -0,0 +1,167 @@ +CMakeFiles/diffvg.dir/shape.cpp.o: \ + /content/Word-As-Image/diffvg/shape.cpp /usr/include/stdc-predef.h \ + /content/Word-As-Image/diffvg/shape.h \ + /content/Word-As-Image/diffvg/diffvg.h /usr/include/c++/11/cmath \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++config.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/os_defines.h \ + /usr/include/features.h /usr/include/features-time64.h \ + /usr/include/x86_64-linux-gnu/bits/wordsize.h \ + /usr/include/x86_64-linux-gnu/bits/timesize.h \ + /usr/include/x86_64-linux-gnu/sys/cdefs.h \ + /usr/include/x86_64-linux-gnu/bits/long-double.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs.h \ + /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/cpu_defines.h \ + /usr/include/c++/11/bits/cpp_type_traits.h \ + /usr/include/c++/11/ext/type_traits.h /usr/include/math.h \ + /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \ + /usr/include/x86_64-linux-gnu/bits/types.h \ + /usr/include/x86_64-linux-gnu/bits/typesizes.h \ + /usr/include/x86_64-linux-gnu/bits/time64.h \ + /usr/include/x86_64-linux-gnu/bits/math-vector.h \ + /usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h \ + /usr/include/x86_64-linux-gnu/bits/floatn.h \ + /usr/include/x86_64-linux-gnu/bits/floatn-common.h \ + /usr/include/x86_64-linux-gnu/bits/flt-eval-method.h \ + /usr/include/x86_64-linux-gnu/bits/fp-logb.h \ + /usr/include/x86_64-linux-gnu/bits/fp-fast.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-helper-functions.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls.h \ + /usr/include/x86_64-linux-gnu/bits/mathcalls-narrow.h \ + /usr/include/x86_64-linux-gnu/bits/iscanonical.h \ + /usr/include/c++/11/bits/std_abs.h /usr/include/stdlib.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stddef.h \ + /usr/include/x86_64-linux-gnu/bits/waitflags.h \ + /usr/include/x86_64-linux-gnu/bits/waitstatus.h \ + /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \ + /usr/include/x86_64-linux-gnu/sys/types.h \ + /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/time_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-intn.h /usr/include/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endian.h \ + /usr/include/x86_64-linux-gnu/bits/endianness.h \ + /usr/include/x86_64-linux-gnu/bits/byteswap.h \ + /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \ + /usr/include/x86_64-linux-gnu/sys/select.h \ + /usr/include/x86_64-linux-gnu/bits/select.h \ + /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \ + /usr/include/x86_64-linux-gnu/bits/select2.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \ + /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \ + /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \ + /usr/include/x86_64-linux-gnu/bits/atomic_wide_counter.h \ + /usr/include/x86_64-linux-gnu/bits/struct_mutex.h \ + /usr/include/x86_64-linux-gnu/bits/struct_rwlock.h /usr/include/alloca.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \ + /usr/include/x86_64-linux-gnu/bits/stdlib.h /usr/include/c++/11/cstdint \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdint.h /usr/include/stdint.h \ + /usr/include/x86_64-linux-gnu/bits/wchar.h \ + /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \ + /usr/include/c++/11/atomic /usr/include/c++/11/bits/atomic_base.h \ + /usr/include/c++/11/bits/atomic_lockfree_defines.h \ + /usr/include/c++/11/bits/move.h /usr/include/c++/11/type_traits \ + /content/Word-As-Image/diffvg/color.h \ + /content/Word-As-Image/diffvg/vector.h /usr/include/c++/11/iostream \ + /usr/include/c++/11/ostream /usr/include/c++/11/ios \ + /usr/include/c++/11/iosfwd /usr/include/c++/11/bits/stringfwd.h \ + /usr/include/c++/11/bits/memoryfwd.h /usr/include/c++/11/bits/postypes.h \ + /usr/include/c++/11/cwchar /usr/include/wchar.h \ + /usr/lib/gcc/x86_64-linux-gnu/11/include/stdarg.h \ + /usr/include/x86_64-linux-gnu/bits/types/wint_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/FILE.h \ + /usr/include/x86_64-linux-gnu/bits/wchar2.h \ + /usr/include/c++/11/exception /usr/include/c++/11/bits/exception.h \ + /usr/include/c++/11/bits/exception_ptr.h \ + /usr/include/c++/11/bits/exception_defines.h \ + /usr/include/c++/11/bits/cxxabi_init_exception.h \ + /usr/include/c++/11/typeinfo /usr/include/c++/11/bits/hash_bytes.h \ + /usr/include/c++/11/new /usr/include/c++/11/bits/nested_exception.h \ + /usr/include/c++/11/bits/char_traits.h \ + /usr/include/c++/11/bits/stl_algobase.h \ + /usr/include/c++/11/bits/functexcept.h \ + /usr/include/c++/11/ext/numeric_traits.h \ + /usr/include/c++/11/bits/stl_pair.h \ + /usr/include/c++/11/bits/stl_iterator_base_types.h \ + /usr/include/c++/11/bits/stl_iterator_base_funcs.h \ + /usr/include/c++/11/bits/concept_check.h \ + /usr/include/c++/11/debug/assertions.h \ + /usr/include/c++/11/bits/stl_iterator.h \ + /usr/include/c++/11/bits/ptr_traits.h /usr/include/c++/11/debug/debug.h \ + /usr/include/c++/11/bits/predefined_ops.h \ + /usr/include/c++/11/bits/localefwd.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++locale.h \ + /usr/include/c++/11/clocale /usr/include/locale.h \ + /usr/include/x86_64-linux-gnu/bits/locale.h /usr/include/c++/11/cctype \ + /usr/include/ctype.h /usr/include/c++/11/bits/ios_base.h \ + /usr/include/c++/11/ext/atomicity.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/gthr-default.h \ + /usr/include/pthread.h /usr/include/sched.h \ + /usr/include/x86_64-linux-gnu/bits/sched.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_sched_param.h \ + /usr/include/x86_64-linux-gnu/bits/cpu-set.h /usr/include/time.h \ + /usr/include/x86_64-linux-gnu/bits/time.h \ + /usr/include/x86_64-linux-gnu/bits/timex.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \ + /usr/include/x86_64-linux-gnu/bits/setjmp.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct___jmp_buf_tag.h \ + /usr/include/x86_64-linux-gnu/bits/pthread_stack_min-dynamic.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/atomic_word.h \ + /usr/include/x86_64-linux-gnu/sys/single_threaded.h \ + /usr/include/c++/11/bits/locale_classes.h /usr/include/c++/11/string \ + /usr/include/c++/11/bits/allocator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/c++allocator.h \ + /usr/include/c++/11/ext/new_allocator.h \ + /usr/include/c++/11/bits/ostream_insert.h \ + /usr/include/c++/11/bits/cxxabi_forced.h \ + /usr/include/c++/11/bits/stl_function.h \ + /usr/include/c++/11/backward/binders.h \ + /usr/include/c++/11/bits/range_access.h \ + /usr/include/c++/11/initializer_list \ + /usr/include/c++/11/bits/basic_string.h \ + /usr/include/c++/11/ext/alloc_traits.h \ + /usr/include/c++/11/bits/alloc_traits.h \ + /usr/include/c++/11/bits/stl_construct.h \ + /usr/include/c++/11/ext/string_conversions.h /usr/include/c++/11/cstdlib \ + /usr/include/c++/11/cstdio /usr/include/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \ + /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \ + /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \ + /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \ + /usr/include/x86_64-linux-gnu/bits/stdio.h \ + /usr/include/x86_64-linux-gnu/bits/stdio2.h /usr/include/c++/11/cerrno \ + /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \ + /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \ + /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \ + /usr/include/x86_64-linux-gnu/bits/types/error_t.h \ + /usr/include/c++/11/bits/charconv.h \ + /usr/include/c++/11/bits/functional_hash.h \ + /usr/include/c++/11/bits/basic_string.tcc \ + /usr/include/c++/11/bits/locale_classes.tcc \ + /usr/include/c++/11/system_error \ + /usr/include/x86_64-linux-gnu/c++/11/bits/error_constants.h \ + /usr/include/c++/11/stdexcept /usr/include/c++/11/streambuf \ + /usr/include/c++/11/bits/streambuf.tcc \ + /usr/include/c++/11/bits/basic_ios.h \ + /usr/include/c++/11/bits/locale_facets.h /usr/include/c++/11/cwctype \ + /usr/include/wctype.h /usr/include/x86_64-linux-gnu/bits/wctype-wchar.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_base.h \ + /usr/include/c++/11/bits/streambuf_iterator.h \ + /usr/include/x86_64-linux-gnu/c++/11/bits/ctype_inline.h \ + /usr/include/c++/11/bits/locale_facets.tcc \ + /usr/include/c++/11/bits/basic_ios.tcc \ + /usr/include/c++/11/bits/ostream.tcc /usr/include/c++/11/istream \ + /usr/include/c++/11/bits/istream.tcc /content/Word-As-Image/diffvg/ptr.h \ + /usr/include/c++/11/cstddef /content/Word-As-Image/diffvg/matrix.h diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/progress.marks b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/progress.marks new file mode 100644 index 0000000000000000000000000000000000000000..7f8f011eb73d6043d2e6db9d2c101195ae2801f2 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles/progress.marks @@ -0,0 +1 @@ +7 diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/Makefile b/diffvg/build/temp.linux-x86_64-cpython-38/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..364c2c6168a0ba41c11a1401db37790093de2d8c --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/Makefile @@ -0,0 +1,262 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Default target executed when no arguments are given to make. +default_target: all +.PHONY : default_target + +# Allow only one "make -f Makefile2" at a time, but pass parallelism. +.NOTPARALLEL: + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + +# Disable VCS-based implicit rules. +% : %,v + +# Disable VCS-based implicit rules. +% : RCS/% + +# Disable VCS-based implicit rules. +% : RCS/%,v + +# Disable VCS-based implicit rules. +% : SCCS/s.% + +# Disable VCS-based implicit rules. +% : s.% + +.SUFFIXES: .hpux_make_needs_suffix_list + +# Command-line flag to silence nested $(MAKE). +$(VERBOSE)MAKESILENT = -s + +#Suppress display of executed commands. +$(VERBOSE).SILENT: + +# A target that is always out of date. +cmake_force: +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/local/envs/word/bin/cmake + +# The command to remove a file. +RM = /usr/local/envs/word/bin/cmake -E rm -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /content/Word-As-Image/diffvg + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 + +#============================================================================= +# Targets provided globally by CMake. + +# Special rule for the target edit_cache +edit_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake cache editor..." + /usr/local/envs/word/bin/ccmake -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : edit_cache + +# Special rule for the target edit_cache +edit_cache/fast: edit_cache +.PHONY : edit_cache/fast + +# Special rule for the target rebuild_cache +rebuild_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..." + /usr/local/envs/word/bin/cmake --regenerate-during-build -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : rebuild_cache + +# Special rule for the target rebuild_cache +rebuild_cache/fast: rebuild_cache +.PHONY : rebuild_cache/fast + +# The main all target +all: cmake_check_build_system + $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38//CMakeFiles/progress.marks + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 all + $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles 0 +.PHONY : all + +# The main clean target +clean: + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 clean +.PHONY : clean + +# The main clean target +clean/fast: clean +.PHONY : clean/fast + +# Prepare targets for installation. +preinstall: all + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 preinstall +.PHONY : preinstall + +# Prepare targets for installation. +preinstall/fast: + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 preinstall +.PHONY : preinstall/fast + +# clear depends +depend: + $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1 +.PHONY : depend + +#============================================================================= +# Target rules for targets named diffvg + +# Build rule for target. +diffvg: cmake_check_build_system + $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 diffvg +.PHONY : diffvg + +# fast build rule for target. +diffvg/fast: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/build +.PHONY : diffvg/fast + +atomic.o: atomic.cpp.o +.PHONY : atomic.o + +# target to build an object file +atomic.cpp.o: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/atomic.cpp.o +.PHONY : atomic.cpp.o + +atomic.i: atomic.cpp.i +.PHONY : atomic.i + +# target to preprocess a source file +atomic.cpp.i: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/atomic.cpp.i +.PHONY : atomic.cpp.i + +atomic.s: atomic.cpp.s +.PHONY : atomic.s + +# target to generate assembly for a file +atomic.cpp.s: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/atomic.cpp.s +.PHONY : atomic.cpp.s + +color.o: color.cpp.o +.PHONY : color.o + +# target to build an object file +color.cpp.o: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/color.cpp.o +.PHONY : color.cpp.o + +color.i: color.cpp.i +.PHONY : color.i + +# target to preprocess a source file +color.cpp.i: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/color.cpp.i +.PHONY : color.cpp.i + +color.s: color.cpp.s +.PHONY : color.s + +# target to generate assembly for a file +color.cpp.s: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/color.cpp.s +.PHONY : color.cpp.s + +parallel.o: parallel.cpp.o +.PHONY : parallel.o + +# target to build an object file +parallel.cpp.o: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/parallel.cpp.o +.PHONY : parallel.cpp.o + +parallel.i: parallel.cpp.i +.PHONY : parallel.i + +# target to preprocess a source file +parallel.cpp.i: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/parallel.cpp.i +.PHONY : parallel.cpp.i + +parallel.s: parallel.cpp.s +.PHONY : parallel.s + +# target to generate assembly for a file +parallel.cpp.s: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/parallel.cpp.s +.PHONY : parallel.cpp.s + +shape.o: shape.cpp.o +.PHONY : shape.o + +# target to build an object file +shape.cpp.o: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/shape.cpp.o +.PHONY : shape.cpp.o + +shape.i: shape.cpp.i +.PHONY : shape.i + +# target to preprocess a source file +shape.cpp.i: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/shape.cpp.i +.PHONY : shape.cpp.i + +shape.s: shape.cpp.s +.PHONY : shape.s + +# target to generate assembly for a file +shape.cpp.s: + $(MAKE) $(MAKESILENT) -f CMakeFiles/diffvg.dir/build.make CMakeFiles/diffvg.dir/shape.cpp.s +.PHONY : shape.cpp.s + +# Help Target +help: + @echo "The following are some of the valid targets for this Makefile:" + @echo "... all (the default if no target is provided)" + @echo "... clean" + @echo "... depend" + @echo "... edit_cache" + @echo "... rebuild_cache" + @echo "... diffvg" + @echo "... atomic.o" + @echo "... atomic.i" + @echo "... atomic.s" + @echo "... color.o" + @echo "... color.i" + @echo "... color.s" + @echo "... parallel.o" + @echo "... parallel.i" + @echo "... parallel.s" + @echo "... shape.o" + @echo "... shape.i" + @echo "... shape.s" +.PHONY : help + + + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/cmake_install.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/cmake_install.cmake new file mode 100644 index 0000000000000000000000000000000000000000..3a727f1f392bbb8614be2a329931d53e0645a704 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/cmake_install.cmake @@ -0,0 +1,60 @@ +# Install script for directory: /content/Word-As-Image/diffvg + +# Set the install prefix +if(NOT DEFINED CMAKE_INSTALL_PREFIX) + set(CMAKE_INSTALL_PREFIX "/usr/local") +endif() +string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") + +# Set the install configuration name. +if(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME) + if(BUILD_TYPE) + string(REGEX REPLACE "^[^A-Za-z0-9_]+" "" + CMAKE_INSTALL_CONFIG_NAME "${BUILD_TYPE}") + else() + set(CMAKE_INSTALL_CONFIG_NAME "Release") + endif() + message(STATUS "Install configuration: \"${CMAKE_INSTALL_CONFIG_NAME}\"") +endif() + +# Set the component getting installed. +if(NOT CMAKE_INSTALL_COMPONENT) + if(COMPONENT) + message(STATUS "Install component: \"${COMPONENT}\"") + set(CMAKE_INSTALL_COMPONENT "${COMPONENT}") + else() + set(CMAKE_INSTALL_COMPONENT) + endif() +endif() + +# Install shared libraries without execute permission? +if(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE) + set(CMAKE_INSTALL_SO_NO_EXE "1") +endif() + +# Is this installation the result of a crosscompile? +if(NOT DEFINED CMAKE_CROSSCOMPILING) + set(CMAKE_CROSSCOMPILING "FALSE") +endif() + +# Set default install directory permissions. +if(NOT DEFINED CMAKE_OBJDUMP) + set(CMAKE_OBJDUMP "/usr/bin/objdump") +endif() + +if(NOT CMAKE_INSTALL_LOCAL_ONLY) + # Include the install script for each subdirectory. + include("/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/cmake_install.cmake") + +endif() + +if(CMAKE_INSTALL_COMPONENT) + set(CMAKE_INSTALL_MANIFEST "install_manifest_${CMAKE_INSTALL_COMPONENT}.txt") +else() + set(CMAKE_INSTALL_MANIFEST "install_manifest.txt") +endif() + +string(REPLACE ";" "\n" CMAKE_INSTALL_MANIFEST_CONTENT + "${CMAKE_INSTALL_MANIFEST_FILES}") +file(WRITE "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/${CMAKE_INSTALL_MANIFEST}" + "${CMAKE_INSTALL_MANIFEST_CONTENT}") diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/compile_commands.json b/diffvg/build/temp.linux-x86_64-cpython-38/compile_commands.json new file mode 100644 index 0000000000000000000000000000000000000000..3447d71f34581826d7404acec0630754d8d00650 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/compile_commands.json @@ -0,0 +1,26 @@ +[ +{ + "directory": "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38", + "command": "/usr/bin/c++ -DCOMPILE_WITH_CUDA -Ddiffvg_EXPORTS -I/usr/local/envs/word/include/python3.8 -I/usr/local/include/python3.10 -I/content/Word-As-Image/diffvg/pybind11/include -I/usr/local/cuda/include -DVERSION_INFO=\\\"0.0.1\\\" -O3 -std=gnu++11 -fPIC -Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas -o CMakeFiles/diffvg.dir/atomic.cpp.o -c /content/Word-As-Image/diffvg/atomic.cpp", + "file": "/content/Word-As-Image/diffvg/atomic.cpp", + "output": "CMakeFiles/diffvg.dir/atomic.cpp.o" +}, +{ + "directory": "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38", + "command": "/usr/bin/c++ -DCOMPILE_WITH_CUDA -Ddiffvg_EXPORTS -I/usr/local/envs/word/include/python3.8 -I/usr/local/include/python3.10 -I/content/Word-As-Image/diffvg/pybind11/include -I/usr/local/cuda/include -DVERSION_INFO=\\\"0.0.1\\\" -O3 -std=gnu++11 -fPIC -Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas -o CMakeFiles/diffvg.dir/color.cpp.o -c /content/Word-As-Image/diffvg/color.cpp", + "file": "/content/Word-As-Image/diffvg/color.cpp", + "output": "CMakeFiles/diffvg.dir/color.cpp.o" +}, +{ + "directory": "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38", + "command": "/usr/bin/c++ -DCOMPILE_WITH_CUDA -Ddiffvg_EXPORTS -I/usr/local/envs/word/include/python3.8 -I/usr/local/include/python3.10 -I/content/Word-As-Image/diffvg/pybind11/include -I/usr/local/cuda/include -DVERSION_INFO=\\\"0.0.1\\\" -O3 -std=gnu++11 -fPIC -Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas -o CMakeFiles/diffvg.dir/parallel.cpp.o -c /content/Word-As-Image/diffvg/parallel.cpp", + "file": "/content/Word-As-Image/diffvg/parallel.cpp", + "output": "CMakeFiles/diffvg.dir/parallel.cpp.o" +}, +{ + "directory": "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38", + "command": "/usr/bin/c++ -DCOMPILE_WITH_CUDA -Ddiffvg_EXPORTS -I/usr/local/envs/word/include/python3.8 -I/usr/local/include/python3.10 -I/content/Word-As-Image/diffvg/pybind11/include -I/usr/local/cuda/include -DVERSION_INFO=\\\"0.0.1\\\" -O3 -std=gnu++11 -fPIC -Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas -o CMakeFiles/diffvg.dir/shape.cpp.o -c /content/Word-As-Image/diffvg/shape.cpp", + "file": "/content/Word-As-Image/diffvg/shape.cpp", + "output": "CMakeFiles/diffvg.dir/shape.cpp.o" +} +] \ No newline at end of file diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/CMakeDirectoryInformation.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/CMakeDirectoryInformation.cmake new file mode 100644 index 0000000000000000000000000000000000000000..41d6fe421900fe4929471461b0f4d68fea43d19b --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/CMakeDirectoryInformation.cmake @@ -0,0 +1,16 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Relative path conversion top directories. +set(CMAKE_RELATIVE_PATH_TOP_SOURCE "/content/Word-As-Image/diffvg") +set(CMAKE_RELATIVE_PATH_TOP_BINARY "/content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38") + +# Force unix paths in dependencies. +set(CMAKE_FORCE_UNIX_PATHS 1) + + +# The C and CXX include file regular expressions for this directory. +set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$") +set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$") +set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN}) +set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN}) diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/progress.marks b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/progress.marks new file mode 100644 index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/CMakeFiles/progress.marks @@ -0,0 +1 @@ +0 diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/Makefile b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9fc45e996330300888cac8389db9773d4cdfbb89 --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/Makefile @@ -0,0 +1,140 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.26 + +# Default target executed when no arguments are given to make. +default_target: all +.PHONY : default_target + +# Allow only one "make -f Makefile2" at a time, but pass parallelism. +.NOTPARALLEL: + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + +# Disable VCS-based implicit rules. +% : %,v + +# Disable VCS-based implicit rules. +% : RCS/% + +# Disable VCS-based implicit rules. +% : RCS/%,v + +# Disable VCS-based implicit rules. +% : SCCS/s.% + +# Disable VCS-based implicit rules. +% : s.% + +.SUFFIXES: .hpux_make_needs_suffix_list + +# Command-line flag to silence nested $(MAKE). +$(VERBOSE)MAKESILENT = -s + +#Suppress display of executed commands. +$(VERBOSE).SILENT: + +# A target that is always out of date. +cmake_force: +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/local/envs/word/bin/cmake + +# The command to remove a file. +RM = /usr/local/envs/word/bin/cmake -E rm -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /content/Word-As-Image/diffvg + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 + +#============================================================================= +# Targets provided globally by CMake. + +# Special rule for the target edit_cache +edit_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake cache editor..." + /usr/local/envs/word/bin/ccmake -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : edit_cache + +# Special rule for the target edit_cache +edit_cache/fast: edit_cache +.PHONY : edit_cache/fast + +# Special rule for the target rebuild_cache +rebuild_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..." + /usr/local/envs/word/bin/cmake --regenerate-during-build -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : rebuild_cache + +# Special rule for the target rebuild_cache +rebuild_cache/fast: rebuild_cache +.PHONY : rebuild_cache/fast + +# The main all target +all: cmake_check_build_system + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/pybind11//CMakeFiles/progress.marks + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 pybind11/all + $(CMAKE_COMMAND) -E cmake_progress_start /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38/CMakeFiles 0 +.PHONY : all + +# The main clean target +clean: + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 pybind11/clean +.PHONY : clean + +# The main clean target +clean/fast: clean +.PHONY : clean/fast + +# Prepare targets for installation. +preinstall: all + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 pybind11/preinstall +.PHONY : preinstall + +# Prepare targets for installation. +preinstall/fast: + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(MAKE) $(MAKESILENT) -f CMakeFiles/Makefile2 pybind11/preinstall +.PHONY : preinstall/fast + +# clear depends +depend: + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1 +.PHONY : depend + +# Help Target +help: + @echo "The following are some of the valid targets for this Makefile:" + @echo "... all (the default if no target is provided)" + @echo "... clean" + @echo "... depend" + @echo "... edit_cache" + @echo "... rebuild_cache" +.PHONY : help + + + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + cd /content/Word-As-Image/diffvg/build/temp.linux-x86_64-cpython-38 && $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system + diff --git a/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/cmake_install.cmake b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/cmake_install.cmake new file mode 100644 index 0000000000000000000000000000000000000000..623d87851f12e32a31618cf7dff9b8a741ce275b --- /dev/null +++ b/diffvg/build/temp.linux-x86_64-cpython-38/pybind11/cmake_install.cmake @@ -0,0 +1,44 @@ +# Install script for directory: /content/Word-As-Image/diffvg/pybind11 + +# Set the install prefix +if(NOT DEFINED CMAKE_INSTALL_PREFIX) + set(CMAKE_INSTALL_PREFIX "/usr/local") +endif() +string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") + +# Set the install configuration name. +if(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME) + if(BUILD_TYPE) + string(REGEX REPLACE "^[^A-Za-z0-9_]+" "" + CMAKE_INSTALL_CONFIG_NAME "${BUILD_TYPE}") + else() + set(CMAKE_INSTALL_CONFIG_NAME "Release") + endif() + message(STATUS "Install configuration: \"${CMAKE_INSTALL_CONFIG_NAME}\"") +endif() + +# Set the component getting installed. +if(NOT CMAKE_INSTALL_COMPONENT) + if(COMPONENT) + message(STATUS "Install component: \"${COMPONENT}\"") + set(CMAKE_INSTALL_COMPONENT "${COMPONENT}") + else() + set(CMAKE_INSTALL_COMPONENT) + endif() +endif() + +# Install shared libraries without execute permission? +if(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE) + set(CMAKE_INSTALL_SO_NO_EXE "1") +endif() + +# Is this installation the result of a crosscompile? +if(NOT DEFINED CMAKE_CROSSCOMPILING) + set(CMAKE_CROSSCOMPILING "FALSE") +endif() + +# Set default install directory permissions. +if(NOT DEFINED CMAKE_OBJDUMP) + set(CMAKE_OBJDUMP "/usr/bin/objdump") +endif() + diff --git a/diffvg/cdf.h b/diffvg/cdf.h new file mode 100644 index 0000000000000000000000000000000000000000..48a64f897f2c230e3e0b5595de401dd644b8b777 --- /dev/null +++ b/diffvg/cdf.h @@ -0,0 +1,29 @@ +#pragma once + +#include "diffvg.h" + +DEVICE int sample(const float *cdf, int num_entries, float u, float *updated_u = nullptr) { + // Binary search the cdf + auto lb = 0; + auto len = num_entries - 1 - lb; + while (len > 0) { + auto half_len = len / 2; + auto mid = lb + half_len; + assert(mid >= 0 && mid < num_entries); + if (u < cdf[mid]) { + len = half_len; + } else { + lb = mid + 1; + len = len - half_len - 1; + } + } + lb = clamp(lb, 0, num_entries - 1); + if (updated_u != nullptr) { + if (lb > 0) { + *updated_u = (u - cdf[lb - 1]) / (cdf[lb] - cdf[lb - 1]); + } else { + *updated_u = u / cdf[lb]; + } + } + return lb; +} diff --git a/diffvg/cmake/FindTensorFlow.cmake b/diffvg/cmake/FindTensorFlow.cmake new file mode 100644 index 0000000000000000000000000000000000000000..b251b10538f69f3dce42370e840f167ea24fc4fc --- /dev/null +++ b/diffvg/cmake/FindTensorFlow.cmake @@ -0,0 +1,34 @@ +# https://github.com/PatWie/tensorflow-cmake/blob/master/cmake/modules/FindTensorFlow.cmake + +execute_process( + COMMAND python -c "exec(\"try:\\n import tensorflow as tf; print(tf.__version__); print(tf.__cxx11_abi_flag__);print(tf.sysconfig.get_include()); print(tf.sysconfig.get_lib())\\nexcept ImportError:\\n exit(1)\")" + OUTPUT_VARIABLE TF_INFORMATION_STRING + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE retcode) + +if("${retcode}" STREQUAL "0") + string(REPLACE "\n" ";" TF_INFORMATION_LIST ${TF_INFORMATION_STRING}) + list(GET TF_INFORMATION_LIST 0 TF_DETECTED_VERSION) + list(GET TF_INFORMATION_LIST 1 TF_DETECTED_ABI) + list(GET TF_INFORMATION_LIST 2 TF_DETECTED_INCLUDE_DIR) + list(GET TF_INFORMATION_LIST 3 TF_DETECTED_LIBRARY_DIR) + if(WIN32) + find_library(TF_DETECTED_LIBRARY NAMES _pywrap_tensorflow_internal PATHS + ${TF_DETECTED_LIBRARY_DIR}/python) + else() + # For some reason my tensorflow doesn't have a .so file + list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.1) + list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.2) + find_library(TF_DETECTED_LIBRARY NAMES tensorflow_framework PATHS + ${TF_DETECTED_LIBRARY_DIR}) + endif() + set(TensorFlow_VERSION ${TF_DETECTED_VERSION}) + set(TensorFlow_ABI ${TF_DETECTED_ABI}) + set(TensorFlow_INCLUDE_DIR ${TF_DETECTED_INCLUDE_DIR}) + set(TensorFlow_LIBRARY ${TF_DETECTED_LIBRARY}) + if(TensorFlow_LIBRARY AND TensorFlow_INCLUDE_DIR) + set(TensorFlow_FOUND TRUE) + else() + set(TensorFlow_FOUND FALSE) + endif() +endif() diff --git a/diffvg/cmake/FindThrust.cmake b/diffvg/cmake/FindThrust.cmake new file mode 100644 index 0000000000000000000000000000000000000000..61eef297b996496f4222d6afb570fb5aa960781d --- /dev/null +++ b/diffvg/cmake/FindThrust.cmake @@ -0,0 +1,40 @@ +##============================================================================= +## +## Copyright (c) Kitware, Inc. +## All rights reserved. +## See LICENSE.txt for details. +## +## This software is distributed WITHOUT ANY WARRANTY; without even +## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +## PURPOSE. See the above copyright notice for more information. +## +## Copyright 2012 Sandia Corporation. +## Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +## the U.S. Government retains certain rights in this software. +## +##============================================================================= + +# +# FindThrust +# +# This module finds the Thrust header files and extrats their version. It +# sets the following variables. +# +# THRUST_INCLUDE_DIR - Include directory for thrust header files. (All header +# files will actually be in the thrust subdirectory.) +# THRUST_VERSION - Version of thrust in the form "major.minor.patch". +# + +find_path(THRUST_INCLUDE_DIR + HINTS /usr/include/cuda + /usr/local/include + /usr/local/cuda/include + ${CUDA_INCLUDE_DIRS} + ./thrust + ../thrust + NAMES thrust/version.h +) + +if (THRUST_INCLUDE_DIR) + set(THRUST_FOUND TRUE) +endif () \ No newline at end of file diff --git a/diffvg/color.cpp b/diffvg/color.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2a2e8abcee1dacefeaeb0268359737aec178bace --- /dev/null +++ b/diffvg/color.cpp @@ -0,0 +1,25 @@ +#include "color.h" + +void LinearGradient::copy_to(ptr stop_offsets, + ptr stop_colors) const { + float *o = stop_offsets.get(); + float *c = stop_colors.get(); + for (int i = 0; i < num_stops; i++) { + o[i] = this->stop_offsets[i]; + } + for (int i = 0; i < 4 * num_stops; i++) { + c[i] = this->stop_colors[i]; + } +} + +void RadialGradient::copy_to(ptr stop_offsets, + ptr stop_colors) const { + float *o = stop_offsets.get(); + float *c = stop_colors.get(); + for (int i = 0; i < num_stops; i++) { + o[i] = this->stop_offsets[i]; + } + for (int i = 0; i < 4 * num_stops; i++) { + c[i] = this->stop_colors[i]; + } +} diff --git a/diffvg/color.h b/diffvg/color.h new file mode 100644 index 0000000000000000000000000000000000000000..c787105636d42b4706110500982d0ce576eda47e --- /dev/null +++ b/diffvg/color.h @@ -0,0 +1,63 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include "ptr.h" + +enum class ColorType { + Constant, + LinearGradient, + RadialGradient +}; + +struct Constant { + Vector4f color; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct LinearGradient { + LinearGradient(const Vector2f &begin, + const Vector2f &end, + int num_stops, + ptr stop_offsets, + ptr stop_colors) + : begin(begin), end(end), num_stops(num_stops), + stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} + + ptr get_ptr() { + return ptr(this); + } + + void copy_to(ptr stop_offset, + ptr stop_colors) const; + + Vector2f begin, end; + int num_stops; + float *stop_offsets; + float *stop_colors; // rgba +}; + +struct RadialGradient { + RadialGradient(const Vector2f ¢er, + const Vector2f &radius, + int num_stops, + ptr stop_offsets, + ptr stop_colors) + : center(center), radius(radius), num_stops(num_stops), + stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} + + ptr get_ptr() { + return ptr(this); + } + + void copy_to(ptr stop_offset, + ptr stop_colors) const; + + Vector2f center, radius; + int num_stops; + float *stop_offsets; + float *stop_colors; // rgba +}; diff --git a/diffvg/compute_distance.h b/diffvg/compute_distance.h new file mode 100644 index 0000000000000000000000000000000000000000..c125641a9d720bd16be1428e205bd6c07c726bc5 --- /dev/null +++ b/diffvg/compute_distance.h @@ -0,0 +1,949 @@ +#pragma once + +#include "diffvg.h" +#include "edge_query.h" +#include "scene.h" +#include "shape.h" +#include "solve.h" +#include "vector.h" + +#include + +struct ClosestPointPathInfo { + int base_point_id; + int point_id; + float t_root; +}; + +DEVICE +inline +bool closest_point(const Circle &circle, const Vector2f &pt, + Vector2f *result) { + *result = circle.center + circle.radius * normalize(pt - circle.center); + return false; +} + +DEVICE +inline +bool closest_point(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt, float max_radius, + ClosestPointPathInfo *path_info, + Vector2f *result) { + auto min_dist = max_radius; + auto ret_pt = Vector2f{0, 0}; + auto found = false; + auto num_segments = path.num_base_points; + constexpr auto max_bvh_size = 128; + int bvh_stack[max_bvh_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * num_segments - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto base_point_id = node.child0; + auto point_id = - node.child1 - 1; + assert(base_point_id < num_segments); + assert(point_id < path.num_points); + auto dist = 0.f; + auto closest_pt = Vector2f{0, 0}; + auto t_root = 0.f; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + dist = distance(p0, pt); + closest_pt = p0; + t_root = 0; + } else if (t > 1) { + dist = distance(p1, pt); + closest_pt = p1; + t_root = 1; + } else { + dist = distance(p0 + t * (p1 - p0), pt); + closest_pt = p0 + t * (p1 - p0); + t_root = t; + } + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + if (path.use_distance_approx) { + closest_pt = quadratic_closest_pt_approx(p0, p1, p2, pt, &t_root); + dist = distance(closest_pt, pt); + } else { + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + auto pt0 = eval(0); + auto pt1 = eval(1); + auto dist0 = distance(pt0, pt); + auto dist1 = distance(pt1, pt); + { + dist = dist0; + closest_pt = pt0; + t_root = 0; + } + if (dist1 < dist) { + dist = dist1; + closest_pt = pt1; + t_root = 1; + } + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q + // Want to solve (q - pt) dot q' = 0 + // q' = (p0-2p1+p2)t + (-p0+p1) + // Expanding (p0-2p1+p2)^2 t^3 + + // 3(p0-2p1+p2)(-p0+p1) t^2 + + // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + + // (-p0+p1)(p0-pt) = 0 + auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); + auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); + auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); + auto D = sum((-p0+p1)*(p0-pt)); + float t[3]; + int num_sol = solve_cubic(A, B, C, D, t); + for (int j = 0; j < num_sol; j++) { + if (t[j] >= 0 && t[j] <= 1) { + auto p = eval(t[j]); + auto distp = distance(p, pt); + if (distp < dist) { + dist = distp; + closest_pt = p; + t_root = t[j]; + } + } + } + } + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + auto pt0 = eval(0); + auto pt1 = eval(1); + auto dist0 = distance(pt0, pt); + auto dist1 = distance(pt1, pt); + { + dist = dist0; + closest_pt = pt0; + t_root = 0; + } + if (dist1 < dist) { + dist = dist1; + closest_pt = pt1; + t_root = 1; + } + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // Want to solve (q - pt) dot q' = 0 + // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) + // Expanding + // 3*(-p0+3p1-3p2+p3)^2 t^5 + // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 + // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 + // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 + // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t + // (p0-pt)(-3p0+3p1) + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + // normalize the polynomial + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + // Isolator Polynomials: + // https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2233&rep=rep1&type=pdf + // x/5 + B/25 + // /----------------------------------------------------- + // 5x^4 + 4B x^3 + 3C x^2 + 2D x + E / x^5 + B x^4 + C x^3 + D x^2 + E x + F + // x^5 + 4B/5 x^4 + 3C/5 x^3 + 2D/5 x^2 + E/5 x + // ---------------------------------------------------- + // B/5 x^4 + 2C/5 x^3 + 3D/5 x^2 + 4E/5 x + F + // B/5 x^4 + 4B^2/25 x^3 + 3BC/25 x^2 + 2BD/25 x + BE/25 + // ---------------------------------------------------- + // (2C/5 - 4B^2/25)x^3 + (3D/5-3BC/25)x^2 + (4E/5-2BD/25) + (F-BE/25) + auto p1A = ((2 / 5.f) * C - (4 / 25.f) * B * B); + auto p1B = ((3 / 5.f) * D - (3 / 25.f) * B * C); + auto p1C = ((4 / 5.f) * E - (2 / 25.f) * B * D); + auto p1D = F - B * E / 25.f; + // auto q1A = 1 / 5.f; + // auto q1B = B / 25.f; + // x/5 + B/25 = 0 + // x = -B/5 + auto q_root = -B/5.f; + double p_roots[3]; + int num_sol = solve_cubic(p1A, p1B, p1C, p1D, p_roots); + float intervals[4]; + if (q_root >= 0 && q_root <= 1) { + intervals[0] = q_root; + } + for (int j = 0; j < num_sol; j++) { + intervals[j + 1] = p_roots[j]; + } + auto num_intervals = 1 + num_sol; + // sort intervals + for (int j = 1; j < num_intervals; j++) { + for (int k = j; k > 0 && intervals[k - 1] > intervals[k]; k--) { + auto tmp = intervals[k]; + intervals[k] = intervals[k - 1]; + intervals[k - 1] = tmp; + } + } + auto eval_polynomial = [&] (double t) { + return t*t*t*t*t+ + B*t*t*t*t+ + C*t*t*t+ + D*t*t+ + E*t+ + F; + }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + auto lower_bound = 0.f; + for (int j = 0; j < num_intervals + 1; j++) { + if (j < num_intervals && intervals[j] < 0.f) { + continue; + } + auto upper_bound = j < num_intervals ? + min(intervals[j], 1.f) : 1.f; + auto lb = lower_bound; + auto ub = upper_bound; + auto lb_eval = eval_polynomial(lb); + auto ub_eval = eval_polynomial(ub); + if (lb_eval * ub_eval > 0) { + // Doesn't have root + continue; + } + if (lb_eval > ub_eval) { + swap_(lb, ub); + } + auto t = 0.5f * (lb + ub); + auto num_iter = 20; + for (int it = 0; it < num_iter; it++) { + if (!(t >= lb && t <= ub)) { + t = 0.5f * (lb + ub); + } + auto value = eval_polynomial(t); + if (fabs(value) < 1e-5f || it == num_iter - 1) { + break; + } + // The derivative may not be entirely accurate, + // but the bisection is going to handle this + if (value > 0.f) { + ub = t; + } else { + lb = t; + } + auto derivative = eval_polynomial_deriv(t); + t -= value / derivative; + } + auto p = eval(t); + auto distp = distance(p, pt); + if (distp < dist) { + dist = distp; + closest_pt = p; + t_root = t; + } + if (upper_bound >= 1.f) { + break; + } + lower_bound = upper_bound; + } + } else { + assert(false); + } + if (dist < min_dist) { + min_dist = dist; + ret_pt = closest_pt; + path_info->base_point_id = base_point_id; + path_info->point_id = point_id; + path_info->t_root = t_root; + found = true; + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (within_distance(b0, pt, min_dist)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (within_distance(b1, pt, min_dist)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_size); + } + } + if (found) { + assert(path_info->base_point_id < num_segments); + } + *result = ret_pt; + return found; +} + +DEVICE +inline +bool closest_point(const Rect &rect, const Vector2f &pt, + Vector2f *result) { + auto min_dist = 0.f; + auto closest_pt = Vector2f{0, 0}; + auto update = [&](const Vector2f &p0, const Vector2f &p1, bool first) { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + auto d = distance(p0, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p0; + } + } else if (t > 1) { + auto d = distance(p1, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p1; + } + } else { + auto p = p0 + t * (p1 - p0); + auto d = distance(p, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p0; + } + } + }; + auto left_top = rect.p_min; + auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; + auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; + auto right_bottom = rect.p_max; + update(left_top, left_bottom, true); + update(left_top, right_top, false); + update(right_top, right_bottom, false); + update(left_bottom, right_bottom, false); + *result = closest_pt; + return true; +} + +DEVICE +inline +bool closest_point(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt, float max_radius, + ClosestPointPathInfo *path_info, + Vector2f *result) { + switch (shape.type) { + case ShapeType::Circle: + return closest_point(*(const Circle *)shape.ptr, pt, result); + case ShapeType::Ellipse: + // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf + assert(false); + return false; + case ShapeType::Path: + return closest_point(*(const Path *)shape.ptr, bvh_nodes, pt, max_radius, path_info, result); + case ShapeType::Rect: + return closest_point(*(const Rect *)shape.ptr, pt, result); + } + assert(false); + return false; +} + +DEVICE +inline +bool compute_distance(const SceneData &scene, + int shape_group_id, + const Vector2f &pt, + float max_radius, + int *min_shape_id, + Vector2f *closest_pt_, + ClosestPointPathInfo *path_info, + float *result) { + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; + + auto min_dist = max_radius; + auto found = false; + + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + const auto &shape = scene.shapes[shape_id]; + ClosestPointPathInfo local_path_info{-1, -1}; + auto local_closest_pt = Vector2f{0, 0}; + if (closest_point(shape, scene.path_bvhs[shape_id], local_pt, max_radius, &local_path_info, &local_closest_pt)) { + auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + auto dist = distance(closest_pt, pt); + if (!found || dist < min_dist) { + found = true; + min_dist = dist; + if (min_shape_id != nullptr) { + *min_shape_id = shape_id; + } + if (closest_pt_ != nullptr) { + *closest_pt_ = closest_pt; + } + if (path_info != nullptr) { + *path_info = local_path_info; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt, max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt, max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + + *result = min_dist; + return found; +} + + +DEVICE +inline +void d_closest_point(const Circle &circle, + const Vector2f &pt, + const Vector2f &d_closest_pt, + Circle &d_circle, + Vector2f &d_pt) { + // return circle.center + circle.radius * normalize(pt - circle.center); + auto d_center = d_closest_pt * + (1 + d_normalize(pt - circle.center, circle.radius * d_closest_pt)); + atomic_add(&d_circle.center.x, d_center); + atomic_add(&d_circle.radius, dot(d_closest_pt, normalize(pt - circle.center))); +} + +DEVICE +inline +void d_closest_point(const Path &path, + const Vector2f &pt, + const Vector2f &d_closest_pt, + const ClosestPointPathInfo &path_info, + Path &d_path, + Vector2f &d_pt) { + auto base_point_id = path_info.base_point_id; + auto point_id = path_info.point_id; + auto min_t_root = path_info.t_root; + + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + if (t < 0) { + d_p0 += d_closest_pt; + } else if (t > 1) { + d_p1 += d_closest_pt; + } else { + auto d_p = d_closest_pt; + // p = p0 + t * (p1 - p0) + d_p0 += d_p * (1 - t); + d_p1 += d_p * t; + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + // auto eval = [&](float t) -> Vector2f { + // auto tt = 1 - t; + // return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + // }; + // auto dist0 = distance(eval(0), pt); + // auto dist1 = distance(eval(1), pt); + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + auto d_p2 = Vector2f{0, 0}; + auto t = min_t_root; + if (t == 0) { + d_p0 += d_closest_pt; + } else if (t == 1) { + d_p2 += d_closest_pt; + } else { + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q + // Want to solve (q - pt) dot q' = 0 + // q' = (p0-2p1+p2)t + (-p0+p1) + // Expanding (p0-2p1+p2)^2 t^3 + + // 3(p0-2p1+p2)(-p0+p1) t^2 + + // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + + // (-p0+p1)(p0-pt) = 0 + auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); + auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); + auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); + // auto D = sum((-p0+p1)*(p0-pt)); + auto d_p = d_closest_pt; + // p = eval(t) + auto tt = 1 - t; + // (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2 + auto d_tt = 2 * tt * dot(d_p, p0) + 2 * t * dot(d_p, p1); + auto d_t = -d_tt + 2 * tt * dot(d_p, p1) + 2 * t * dot(d_p, p2); + auto d_p0 = d_p * tt * tt; + auto d_p1 = 2 * d_p * tt * t; + auto d_p2 = d_p * t * t; + // implicit function theorem: dt/dA = -1/(p'(t)) * dp/dA + auto poly_deriv_t = 3 * A * t * t + 2 * B * t + C; + if (fabs(poly_deriv_t) > 1e-6f) { + auto d_A = - (d_t / poly_deriv_t) * t * t * t; + auto d_B = - (d_t / poly_deriv_t) * t * t; + auto d_C = - (d_t / poly_deriv_t) * t; + auto d_D = - (d_t / poly_deriv_t); + // A = sum((p0-2*p1+p2)*(p0-2*p1+p2)) + // B = sum(3*(p0-2*p1+p2)*(-p0+p1)) + // C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)) + // D = sum((-p0+p1)*(p0-pt)) + d_p0 += 2*d_A*(p0-2*p1+p2)+ + 3*d_B*((-p0+p1)-(p0-2*p1+p2))+ + 2*d_C*(-2*(-p0+p1))+ + d_C*((p0-pt)+(p0-2*p1+p2))+ + 2*d_D*(-(p0-pt)+(-p0+p1)); + d_p1 += (-2)*2*d_A*(p0-2*p1+p2)+ + 3*d_B*(-2*(-p0+p1)+(p0-2*p1+p2))+ + 2*d_C*(2*(-p0+p1))+ + d_C*((-2)*(p0-pt))+ + d_D*(p0-pt); + d_p2 += 2*d_A*(p0-2*p1+p2)+ + 3*d_B*(-p0+p1)+ + d_C*(p0-pt); + d_pt += d_C*(-(p0-2*p1+p2))+ + d_D*(-(-p0+p1)); + } + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + atomic_add(d_path.points + 2 * i2, d_p2); + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + // auto eval = [&](float t) -> Vector2f { + // auto tt = 1 - t; + // return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + // }; + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + auto d_p2 = Vector2f{0, 0}; + auto d_p3 = Vector2f{0, 0}; + auto t = min_t_root; + if (t == 0) { + // closest_pt = p0 + d_p0 += d_closest_pt; + } else if (t == 1) { + // closest_pt = p1 + d_p3 += d_closest_pt; + } else { + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // Want to solve (q - pt) dot q' = 0 + // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) + // Expanding + // 3*(-p0+3p1-3p2+p3)^2 t^5 + // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 + // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 + // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 + // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t + // (p0-pt)(-3p0+3p1) + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + // auto eval_polynomial = [&] (double t) { + // return t*t*t*t*t+ + // B*t*t*t*t+ + // C*t*t*t+ + // D*t*t+ + // E*t+ + // F; + // }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + + // auto p = eval(t); + auto d_p = d_closest_pt; + // (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3 + auto tt = 1 - t; + auto d_tt = 3 * tt * tt * dot(d_p, p0) + + 6 * tt * t * dot(d_p, p1) + + 3 * t * t * dot(d_p, p2); + auto d_t = -d_tt + + 3 * tt * tt * dot(d_p, p1) + + 6 * tt * t * dot(d_p, p2) + + 3 * t * t * dot(d_p, p3); + d_p0 += d_p * (tt * tt * tt); + d_p1 += d_p * (3 * tt * tt * t); + d_p2 += d_p * (3 * tt * t * t); + d_p3 += d_p * (t * t * t); + // implicit function theorem: dt/dA = -1/(p'(t)) * dp/dA + auto poly_deriv_t = eval_polynomial_deriv(t); + if (fabs(poly_deriv_t) > 1e-10f) { + auto d_B = -(d_t / poly_deriv_t) * t * t * t * t; + auto d_C = -(d_t / poly_deriv_t) * t * t * t; + auto d_D = -(d_t / poly_deriv_t) * t * t; + auto d_E = -(d_t / poly_deriv_t) * t; + auto d_F = -(d_t / poly_deriv_t); + // B = B' / A + // C = C' / A + // D = D' / A + // E = E' / A + // F = F' / A + auto d_A = -d_B * B / A + -d_C * C / A + -d_D * D / A + -d_E * E / A + -d_F * F / A; + d_B /= A; + d_C /= A; + d_D /= A; + d_E /= A; + d_F /= A; + { + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)) + 1e-3; + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + auto eval_polynomial = [&] (double t) { + return t*t*t*t*t+ + B*t*t*t*t+ + C*t*t*t+ + D*t*t+ + E*t+ + F; + }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + auto lb = t - 1e-2f; + auto ub = t + 1e-2f; + auto lb_eval = eval_polynomial(lb); + auto ub_eval = eval_polynomial(ub); + if (lb_eval > ub_eval) { + swap_(lb, ub); + } + auto t_ = 0.5f * (lb + ub); + auto num_iter = 20; + for (int it = 0; it < num_iter; it++) { + if (!(t_ >= lb && t_ <= ub)) { + t_ = 0.5f * (lb + ub); + } + auto value = eval_polynomial(t_); + if (fabs(value) < 1e-5f || it == num_iter - 1) { + break; + } + // The derivative may not be entirely accurate, + // but the bisection is going to handle this + if (value > 0.f) { + ub = t_; + } else { + lb = t_; + } + auto derivative = eval_polynomial_deriv(t); + t_ -= value / derivative; + } + } + // A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)) + d_p0 += d_A * 3 * (-1) * 2 * (-p0+3*p1-3*p2+p3); + d_p1 += d_A * 3 * 3 * 2 * (-p0+3*p1-3*p2+p3); + d_p2 += d_A * 3 * (-3) * 2 * (-p0+3*p1-3*p2+p3); + d_p3 += d_A * 3 * 1 * 2 * (-p0+3*p1-3*p2+p3); + // B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)) + d_p0 += d_B * 5 * ((-1) * (3*p0-6*p1+3*p2) + 3 * (-p0+3*p1-3*p2+p3)); + d_p1 += d_B * 5 * (3 * (3*p0-6*p1+3*p2) + (-6) * (-p0+3*p1-3*p2+p3)); + d_p2 += d_B * 5 * ((-3) * (3*p0-6*p1+3*p2) + 3 * (-p0+3*p1-3*p2+p3)); + d_p3 += d_B * 5 * (3*p0-6*p1+3*p2); + // C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)) + d_p0 += d_C * 4 * ((-1) * (-3*p0+3*p1) + (-3) * (-p0+3*p1-3*p2+p3)) + + d_C * 2 * (3 * 2 * (3*p0-6*p1+3*p2)); + d_p1 += d_C * 4 * (3 * (-3*p0+3*p1) + 3 * (-p0+3*p1-3*p2+p3)) + + d_C * 2 * ((-6) * 2 * (3*p0-6*p1+3*p2)); + d_p2 += d_C * 4 * ((-3) * (-3*p0+3*p1)) + + d_C * 2 * (3 * 2 * (3*p0-6*p1+3*p2)); + d_p3 += d_C * 4 * (-3*p0+3*p1); + // D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))) + d_p0 += d_D * 3 * (3 * (-3*p0+3*p1) + (-3) * (3*p0-6*p1+3*p2)) + + d_D * 3 * ((-1) * (p0-pt) + 1 * (-p0+3*p1-3*p2+p3)); + d_p1 += d_D * 3 * ((-6) * (-3*p0+3*p1) + (3) * (3*p0-6*p1+3*p2)) + + d_D * 3 * (3 * (p0-pt)); + d_p2 += d_D * 3 * (3 * (-3*p0+3*p1)) + + d_D * 3 * ((-3) * (p0-pt)); + d_pt += d_D * 3 * ((-1) * (-p0+3*p1-3*p2+p3)); + // E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)) + d_p0 += d_E * ((-3) * 2 * (-3*p0+3*p1)) + + d_E * 2 * (1 * (3*p0-6*p1+3*p2) + 3 * (p0-pt)); + d_p1 += d_E * ( 3 * 2 * (-3*p0+3*p1)) + + d_E * 2 * ((-6) * (p0-pt)); + d_p2 += d_E * 2 * ( 3 * (p0-pt)); + d_pt += d_E * 2 * ((-1) * (3*p0-6*p1+3*p2)); + // F = sum((p0-pt)*(-3*p0+3*p1)) + d_p0 += d_F * (1 * (-3*p0+3*p1)) + + d_F * ((-3) * (p0-pt)); + d_p1 += d_F * (3 * (p0-pt)); + d_pt += d_F * ((-1) * (-3*p0+3*p1)); + } + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + atomic_add(d_path.points + 2 * i2, d_p2); + atomic_add(d_path.points + 2 * i3, d_p3); + } else { + assert(false); + } +} + +DEVICE +inline +void d_closest_point(const Rect &rect, + const Vector2f &pt, + const Vector2f &d_closest_pt, + Rect &d_rect, + Vector2f &d_pt) { + auto dist = [&](const Vector2f &p0, const Vector2f &p1) -> float { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + return distance(p0, pt); + } else if (t > 1) { + return distance(p1, pt); + } else { + return distance(p0 + t * (p1 - p0), pt); + } + // return 0; + }; + auto left_top = rect.p_min; + auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; + auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; + auto right_bottom = rect.p_max; + auto left_dist = dist(left_top, left_bottom); + auto top_dist = dist(left_top, right_top); + auto right_dist = dist(right_top, right_bottom); + auto bottom_dist = dist(left_bottom, right_bottom); + int min_id = 0; + auto min_dist = left_dist; + if (top_dist < min_dist) { min_dist = top_dist; min_id = 1; } + if (right_dist < min_dist) { min_dist = right_dist; min_id = 2; } + if (bottom_dist < min_dist) { min_dist = bottom_dist; min_id = 3; } + + auto d_update = [&](const Vector2f &p0, const Vector2f &p1, + const Vector2f &d_closest_pt, + Vector2f &d_p0, Vector2f &d_p1) { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + d_p0 += d_closest_pt; + } else if (t > 1) { + d_p1 += d_closest_pt; + } else { + // p = p0 + t * (p1 - p0) + auto d_p = d_closest_pt; + d_p0 += d_p * (1 - t); + d_p1 += d_p * t; + auto d_t = sum(d_p * (p1 - p0)); + // t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0) + auto d_numerator = d_t / dot(p1 - p0, p1 - p0); + auto d_denominator = d_t * (-t) / dot(p1 - p0, p1 - p0); + // numerator = dot(pt - p0, p1 - p0) + d_pt += (p1 - p0) * d_numerator; + d_p1 += (pt - p0) * d_numerator; + d_p0 += ((p0 - p1) + (p0 - pt)) * d_numerator; + // denominator = dot(p1 - p0, p1 - p0) + d_p1 += 2 * (p1 - p0) * d_denominator; + d_p0 += 2 * (p0 - p1) * d_denominator; + } + }; + auto d_left_top = Vector2f{0, 0}; + auto d_right_top = Vector2f{0, 0}; + auto d_left_bottom = Vector2f{0, 0}; + auto d_right_bottom = Vector2f{0, 0}; + if (min_id == 0) { + d_update(left_top, left_bottom, d_closest_pt, d_left_top, d_left_bottom); + } else if (min_id == 1) { + d_update(left_top, right_top, d_closest_pt, d_left_top, d_right_top); + } else if (min_id == 2) { + d_update(right_top, right_bottom, d_closest_pt, d_right_top, d_right_bottom); + } else { + assert(min_id == 3); + d_update(left_bottom, right_bottom, d_closest_pt, d_left_bottom, d_right_bottom); + } + auto d_p_min = Vector2f{0, 0}; + auto d_p_max = Vector2f{0, 0}; + // left_top = rect.p_min + // right_top = Vector2f{rect.p_max.x, rect.p_min.y} + // left_bottom = Vector2f{rect.p_min.x, rect.p_max.y} + // right_bottom = rect.p_max + d_p_min += d_left_top; + d_p_max.x += d_right_top.x; + d_p_min.y += d_right_top.y; + d_p_min.x += d_left_bottom.x; + d_p_max.y += d_left_bottom.y; + d_p_max += d_right_bottom; + atomic_add(d_rect.p_min, d_p_min); + atomic_add(d_rect.p_max, d_p_max); +} + +DEVICE +inline +void d_closest_point(const Shape &shape, + const Vector2f &pt, + const Vector2f &d_closest_pt, + const ClosestPointPathInfo &path_info, + Shape &d_shape, + Vector2f &d_pt) { + switch (shape.type) { + case ShapeType::Circle: + d_closest_point(*(const Circle *)shape.ptr, + pt, + d_closest_pt, + *(Circle *)d_shape.ptr, + d_pt); + break; + case ShapeType::Ellipse: + // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf + assert(false); + break; + case ShapeType::Path: + d_closest_point(*(const Path *)shape.ptr, + pt, + d_closest_pt, + path_info, + *(Path *)d_shape.ptr, + d_pt); + break; + case ShapeType::Rect: + d_closest_point(*(const Rect *)shape.ptr, + pt, + d_closest_pt, + *(Rect *)d_shape.ptr, + d_pt); + break; + } +} + +DEVICE +inline +void d_compute_distance(const Matrix3x3f &canvas_to_shape, + const Matrix3x3f &shape_to_canvas, + const Shape &shape, + const Vector2f &pt, + const Vector2f &closest_pt, + const ClosestPointPathInfo &path_info, + float d_dist, + Matrix3x3f &d_shape_to_canvas, + Shape &d_shape, + float *d_translation) { + if (distance_squared(pt, closest_pt) < 1e-10f) { + // The derivative at distance=0 is undefined + return; + } + assert(isfinite(d_dist)); + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(canvas_to_shape, pt); + auto local_closest_pt = xform_pt(canvas_to_shape, closest_pt); + // auto local_closest_pt = closest_point(shape, local_pt); + // auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + // auto dist = distance(closest_pt, pt); + auto d_pt = Vector2f{0, 0}; + auto d_closest_pt = Vector2f{0, 0}; + d_distance(closest_pt, pt, d_dist, d_closest_pt, d_pt); + assert(isfinite(d_pt)); + assert(isfinite(d_closest_pt)); + // auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + auto d_local_closest_pt = Vector2f{0, 0}; + auto d_shape_to_canvas_ = Matrix3x3f(); + d_xform_pt(shape_to_canvas, local_closest_pt, d_closest_pt, + d_shape_to_canvas_, d_local_closest_pt); + assert(isfinite(d_local_closest_pt)); + auto d_local_pt = Vector2f{0, 0}; + d_closest_point(shape, local_pt, d_local_closest_pt, path_info, d_shape, d_local_pt); + assert(isfinite(d_local_pt)); + auto d_canvas_to_shape = Matrix3x3f(); + d_xform_pt(canvas_to_shape, + pt, + d_local_pt, + d_canvas_to_shape, + d_pt); + // http://jack.valmadre.net/notes/2016/09/04/back-prop-differentials/#back-propagation-using-differentials + auto tc2s = transpose(canvas_to_shape); + d_shape_to_canvas_ += -tc2s * d_canvas_to_shape * tc2s; + atomic_add(&d_shape_to_canvas(0, 0), d_shape_to_canvas_); + if (d_translation != nullptr) { + atomic_add(d_translation, -d_pt); + } +} diff --git a/diffvg/cuda_utils.h b/diffvg/cuda_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4609babc129a27397df72879bd6c8f55e71d1a --- /dev/null +++ b/diffvg/cuda_utils.h @@ -0,0 +1,53 @@ +#pragma once + +#ifdef __CUDACC__ + #include + #include +#endif +#include +#include +#include + +#ifdef __CUDACC__ +#define checkCuda(x) do { if((x)!=cudaSuccess) { \ + printf("CUDA Runtime Error: %s at %s:%d\n",\ + cudaGetErrorString(x),__FILE__,__LINE__);\ + exit(1);}} while(0) +#endif + +template +DEVICE +inline T infinity() { +#ifdef __CUDA_ARCH__ + const unsigned long long ieee754inf = 0x7ff0000000000000; + return __longlong_as_double(ieee754inf); +#else + return std::numeric_limits::infinity(); +#endif +} + +template <> +DEVICE +inline double infinity() { +#ifdef __CUDA_ARCH__ + return __longlong_as_double(0x7ff0000000000000ULL); +#else + return std::numeric_limits::infinity(); +#endif +} + +template <> +DEVICE +inline float infinity() { +#ifdef __CUDA_ARCH__ + return __int_as_float(0x7f800000); +#else + return std::numeric_limits::infinity(); +#endif +} + +inline void cuda_synchronize() { +#ifdef __CUDACC__ + checkCuda(cudaDeviceSynchronize()); +#endif +} diff --git a/diffvg/diffvg.cpp b/diffvg/diffvg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7346d24b758b135bdd402fdb67ea412f48419eb3 --- /dev/null +++ b/diffvg/diffvg.cpp @@ -0,0 +1,1792 @@ +#include "diffvg.h" +#include "aabb.h" +#include "shape.h" +#include "sample_boundary.h" +#include "atomic.h" +#include "cdf.h" +#include "compute_distance.h" +#include "cuda_utils.h" +#include "edge_query.h" +#include "filter.h" +#include "matrix.h" +#include "parallel.h" +#include "pcg.h" +#include "ptr.h" +#include "scene.h" +#include "vector.h" +#include "winding_number.h" +#include "within_distance.h" +#include +#include +#include +#include +#include + +namespace py = pybind11; + +struct Command { + int shape_group_id; + int shape_id; + int point_id; // Only used by path +}; + +DEVICE +bool is_inside(const SceneData &scene_data, + int shape_group_id, + const Vector2f &pt, + EdgeQuery *edge_query) { + const ShapeGroup &shape_group = scene_data.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + const auto &bvh_nodes = scene_data.shape_groups_bvh_nodes[shape_group_id]; + const AABB &bbox = bvh_nodes[2 * shape_group.num_shapes - 2].box; + if (!inside(bbox, local_pt)) { + return false; + } + auto winding_number = 0; + // Traverse the shape group BVH + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + auto w = compute_winding_number( + scene_data.shapes[shape_id], scene_data.path_bvhs[shape_id], local_pt); + winding_number += w; + if (edge_query != nullptr) { + if (edge_query->shape_group_id == shape_group_id && + edge_query->shape_id == shape_id) { + if ((shape_group.use_even_odd_rule && abs(w) % 2 == 1) || + (!shape_group.use_even_odd_rule && w != 0)) { + edge_query->hit = true; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (shape_group.use_even_odd_rule) { + return abs(winding_number) % 2 == 1; + } else { + return winding_number != 0; + } +} + +DEVICE void accumulate_boundary_gradient(const Shape &shape, + float contrib, + float t, + const Vector2f &normal, + const BoundaryData &boundary_data, + Shape &d_shape, + const Matrix3x3f &shape_to_canvas, + const Vector2f &local_boundary_pt, + Matrix3x3f &d_shape_to_canvas) { + assert(isfinite(contrib)); + assert(isfinite(normal)); + // According to Reynold transport theorem, + // the Jacobian of the boundary integral is dot(velocity, normal), + // where the velocity depends on the variable being differentiated with. + if (boundary_data.is_stroke) { + auto has_path_thickness = false; + if (shape.type == ShapeType::Path) { + const Path &path = *(const Path *)shape.ptr; + has_path_thickness = path.thickness != nullptr; + } + // differentiate stroke width: velocity is the same as normal + if (has_path_thickness) { + Path *d_p = (Path*)d_shape.ptr; + auto base_point_id = boundary_data.path.base_point_id; + auto point_id = boundary_data.path.point_id; + auto t = boundary_data.path.t; + const Path &path = *(const Path *)shape.ptr; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + // r = r0 + t * (r1 - r0) + atomic_add(&d_p->thickness[i0], (1 - t) * contrib); + atomic_add(&d_p->thickness[i1], ( t) * contrib); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + // r = (1-t)^2r0 + 2(1-t)t r1 + t^2 r2 + atomic_add(&d_p->thickness[i0], square(1 - t) * contrib); + atomic_add(&d_p->thickness[i1], (2*(1-t)*t) * contrib); + atomic_add(&d_p->thickness[i2], (t*t) * contrib); + } else if (path.num_control_points[base_point_id] == 2) { + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + // r = (1-t)^3r0 + 3*(1-t)^2tr1 + 3*(1-t)t^2r2 + t^3r3 + atomic_add(&d_p->thickness[i0], cubic(1 - t) * contrib); + atomic_add(&d_p->thickness[i1], 3 * square(1 - t) * t * contrib); + atomic_add(&d_p->thickness[i2], 3 * (1 - t) * t * t * contrib); + atomic_add(&d_p->thickness[i3], t * t * t * contrib); + } else { + assert(false); + } + } else { + atomic_add(&d_shape.stroke_width, contrib); + } + } + switch (shape.type) { + case ShapeType::Circle: { + Circle *d_p = (Circle*)d_shape.ptr; + // velocity for the center is (1, 0) for x and (0, 1) for y + atomic_add(&d_p->center[0], normal * contrib); + // velocity for the radius is the same as the normal + atomic_add(&d_p->radius, contrib); + break; + } case ShapeType::Ellipse: { + Ellipse *d_p = (Ellipse*)d_shape.ptr; + // velocity for the center is (1, 0) for x and (0, 1) for y + atomic_add(&d_p->center[0], normal * contrib); + // velocity for the radius: + // x = center.x + r.x * cos(2pi * t) + // y = center.y + r.y * sin(2pi * t) + // for r.x: (cos(2pi * t), 0) + // for r.y: (0, sin(2pi * t)) + atomic_add(&d_p->radius.x, cos(2 * float(M_PI) * t) * normal.x * contrib); + atomic_add(&d_p->radius.y, sin(2 * float(M_PI) * t) * normal.y * contrib); + break; + } case ShapeType::Path: { + Path *d_p = (Path*)d_shape.ptr; + auto base_point_id = boundary_data.path.base_point_id; + auto point_id = boundary_data.path.point_id; + auto t = boundary_data.path.t; + const Path &path = *(const Path *)shape.ptr; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + // pt = p0 + t * (p1 - p0) + // velocity for p0.x: (1 - t, 0) + // p0.y: ( 0, 1 - t) + // p1.x: ( t, 0) + // p1.y: ( 0, t) + atomic_add(&d_p->points[2 * i0 + 0], (1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], (1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], ( t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], ( t) * normal.y * contrib); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + // pt = (1-t)^2p0 + 2(1-t)t p1 + t^2 p2 + // velocity for p0.x: ((1-t)^2, 0) + // p0.y: ( 0, (1-t)^2) + // p1.x: (2(1-t)t, 0) + // p1.y: ( 0, 2(1-t)t) + // p1.x: ( t^2, 0) + // p1.y: ( 0, t^2) + atomic_add(&d_p->points[2 * i0 + 0], square(1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], square(1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], (2*(1-t)*t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], (2*(1-t)*t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i2 + 0], (t*t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i2 + 1], (t*t) * normal.y * contrib); + } else if (path.num_control_points[base_point_id] == 2) { + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + // pt = (1-t)^3p0 + 3*(1-t)^2tp1 + 3*(1-t)t^2p2 + t^3p3 + // velocity for p0.x: ( (1-t)^3, 0) + // p0.y: ( 0, (1-t)^3) + // p1.x: (3*(1-t)^2t, 0) + // p1.y: ( 0, 3*(1-t)^2t) + // p2.x: (3*(1-t)t^2, 0) + // p2.y: ( 0, 3*(1-t)t^2) + // p2.x: ( t^3, 0) + // p2.y: ( 0, t^3) + atomic_add(&d_p->points[2 * i0 + 0], cubic(1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], cubic(1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], 3 * square(1 - t) * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], 3 * square(1 - t) * t * normal.y * contrib); + atomic_add(&d_p->points[2 * i2 + 0], 3 * (1 - t) * t * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i2 + 1], 3 * (1 - t) * t * t * normal.y * contrib); + atomic_add(&d_p->points[2 * i3 + 0], t * t * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i3 + 1], t * t * t * normal.y * contrib); + } else { + assert(false); + } + break; + } case ShapeType::Rect: { + Rect *d_p = (Rect*)d_shape.ptr; + // The velocity depends on the position of the boundary + if (normal == Vector2f{-1, 0}) { + // left + // velocity for p_min is (1, 0) for x and (0, 0) for y + atomic_add(&d_p->p_min.x, -contrib); + } else if (normal == Vector2f{1, 0}) { + // right + // velocity for p_max is (1, 0) for x and (0, 0) for y + atomic_add(&d_p->p_max.x, contrib); + } else if (normal == Vector2f{0, -1}) { + // top + // velocity for p_min is (0, 0) for x and (0, 1) for y + atomic_add(&d_p->p_min.y, -contrib); + } else if (normal == Vector2f{0, 1}) { + // bottom + // velocity for p_max is (0, 0) for x and (0, 1) for y + atomic_add(&d_p->p_max.y, contrib); + } else { + // incorrect normal assignment? + assert(false); + } + break; + } default: { + assert(false); + break; + } + } + // for shape_to_canvas we have the following relationship: + // boundary_pt = xform_pt(shape_to_canvas, local_pt) + // the velocity is the derivative of boundary_pt with respect to shape_to_canvas + // we can use reverse-mode AD to compute the dot product of the velocity and the Jacobian + // by passing the normal in d_xform_pt + auto d_shape_to_canvas_ = Matrix3x3f(); + auto d_local_boundary_pt = Vector2f{0, 0}; + d_xform_pt(shape_to_canvas, + local_boundary_pt, + normal * contrib, + d_shape_to_canvas_, + d_local_boundary_pt); + atomic_add(&d_shape_to_canvas(0, 0), d_shape_to_canvas_); +} + +DEVICE +Vector4f sample_color(const ColorType &color_type, + void *color, + const Vector2f &pt) { + switch (color_type) { + case ColorType::Constant: { + auto c = (const Constant*)color; + assert(isfinite(c->color)); + return c->color; + } case ColorType::LinearGradient: { + auto c = (const LinearGradient*)color; + // Project pt to (c->begin, c->end) + auto beg = c->begin; + auto end = c->end; + auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + return Vector4f{c->stop_colors[0], + c->stop_colors[1], + c->stop_colors[2], + c->stop_colors[3]}; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + assert(isfinite(color_curr)); + assert(isfinite(color_next)); + return color_curr * (1 - tt) + color_next * tt; + } + } + return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0], + c->stop_colors[4 * (c->num_stops - 1) + 1], + c->stop_colors[4 * (c->num_stops - 1) + 2], + c->stop_colors[4 * (c->num_stops - 1) + 3]}; + } case ColorType::RadialGradient: { + auto c = (const RadialGradient*)color; + // Distance from pt to center + auto offset = pt - c->center; + auto normalized_offset = offset / c->radius; + auto t = length(normalized_offset); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + return Vector4f{c->stop_colors[0], + c->stop_colors[1], + c->stop_colors[2], + c->stop_colors[3]}; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + assert(isfinite(color_curr)); + assert(isfinite(color_next)); + return color_curr * (1 - tt) + color_next * tt; + } + } + return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0], + c->stop_colors[4 * (c->num_stops - 1) + 1], + c->stop_colors[4 * (c->num_stops - 1) + 2], + c->stop_colors[4 * (c->num_stops - 1) + 3]}; + } default: { + assert(false); + } + } + return Vector4f{}; +} + +DEVICE +void d_sample_color(const ColorType &color_type, + void *color_ptr, + const Vector2f &pt, + const Vector4f &d_color, + void *d_color_ptr, + float *d_translation) { + switch (color_type) { + case ColorType::Constant: { + auto d_c = (Constant*)d_color_ptr; + atomic_add(&d_c->color[0], d_color); + return; + } case ColorType::LinearGradient: { + auto c = (const LinearGradient*)color_ptr; + auto d_c = (LinearGradient*)d_color_ptr; + // Project pt to (c->begin, c->end) + auto beg = c->begin; + auto end = c->end; + auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + atomic_add(&d_c->stop_colors[0], d_color); + return; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + // return color_curr * (1 - tt) + color_next * tt; + auto d_color_curr = d_color * (1 - tt); + auto d_color_next = d_color * tt; + auto d_tt = sum(d_color * (color_next - color_curr)); + auto d_offset_next = -d_tt * tt / (offset_next - offset_curr); + auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr)); + auto d_t = d_tt / (offset_next - offset_curr); + assert(isfinite(d_tt)); + atomic_add(&d_c->stop_colors[4 * i], d_color_curr); + atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next); + atomic_add(&d_c->stop_offsets[i], d_offset_curr); + atomic_add(&d_c->stop_offsets[i + 1], d_offset_next); + // auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-6f); + // l = max(dot(end - beg, end - beg), 1e-3f) + // t = dot(pt - beg, end - beg) / l; + auto l = max(dot(end - beg, end - beg), 1e-3f); + auto d_beg = d_t * (-(pt - beg)-(end - beg)) / l; + auto d_end = d_t * (pt - beg) / l; + auto d_l = -d_t * t / l; + if (dot(end - beg, end - beg) > 1e-3f) { + d_beg += 2 * d_l * (beg - end); + d_end += 2 * d_l * (end - beg); + } + atomic_add(&d_c->begin[0], d_beg); + atomic_add(&d_c->end[0], d_end); + if (d_translation != nullptr) { + atomic_add(d_translation, (d_beg + d_end)); + } + return; + } + } + atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color); + return; + } case ColorType::RadialGradient: { + auto c = (const RadialGradient*)color_ptr; + auto d_c = (RadialGradient*)d_color_ptr; + // Distance from pt to center + auto offset = pt - c->center; + auto normalized_offset = offset / c->radius; + auto t = length(normalized_offset); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + atomic_add(&d_c->stop_colors[0], d_color); + return; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + // return color_curr * (1 - tt) + color_next * tt; + auto d_color_curr = d_color * (1 - tt); + auto d_color_next = d_color * tt; + auto d_tt = sum(d_color * (color_next - color_curr)); + auto d_offset_next = -d_tt * tt / (offset_next - offset_curr); + auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr)); + auto d_t = d_tt / (offset_next - offset_curr); + assert(isfinite(d_t)); + atomic_add(&d_c->stop_colors[4 * i], d_color_curr); + atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next); + atomic_add(&d_c->stop_offsets[i], d_offset_curr); + atomic_add(&d_c->stop_offsets[i + 1], d_offset_next); + // offset = pt - c->center + // normalized_offset = offset / c->radius + // t = length(normalized_offset) + auto d_normalized_offset = d_length(normalized_offset, d_t); + auto d_offset = d_normalized_offset / c->radius; + auto d_radius = -d_normalized_offset * offset / (c->radius * c->radius); + auto d_center = -d_offset; + atomic_add(&d_c->center[0], d_center); + atomic_add(&d_c->radius[0], d_radius); + if (d_translation != nullptr) { + atomic_add(d_translation, d_center); + } + } + } + atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color); + return; + } default: { + assert(false); + } + } +} + +struct Fragment { + Vector3f color; + float alpha; + int group_id; + bool is_stroke; +}; + +struct PrefilterFragment { + Vector3f color; + float alpha; + int group_id; + bool is_stroke; + int shape_id; + float distance; + Vector2f closest_pt; + ClosestPointPathInfo path_info; + bool within_distance; +}; + +DEVICE +Vector4f sample_color(const SceneData &scene, + const Vector4f *background_color, + const Vector2f &screen_pt, + const Vector4f *d_color = nullptr, + EdgeQuery *edge_query = nullptr, + Vector4f *d_background_color = nullptr, + float *d_translation = nullptr) { + if (edge_query != nullptr) { + edge_query->hit = false; + } + + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + constexpr auto max_hit_shapes = 256; + constexpr auto max_bvh_stack_size = 64; + Fragment fragments[max_hit_shapes]; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + auto num_fragments = 0; + bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2; + while (stack_size > 0) { + const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto group_id = node.child0; + const ShapeGroup &shape_group = scene.shape_groups[group_id]; + if (shape_group.stroke_color != nullptr) { + if (within_distance(scene, group_id, pt, edge_query)) { + auto color_alpha = sample_color(shape_group.stroke_color_type, + shape_group.stroke_color, + pt); + Fragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.is_stroke = true; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + if (shape_group.fill_color != nullptr) { + if (is_inside(scene, group_id, pt, edge_query)) { + auto color_alpha = sample_color(shape_group.fill_color_type, + shape_group.fill_color, + pt); + Fragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.is_stroke = false; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = scene.bvh_nodes[node.child0].box; + if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = scene.bvh_nodes[node.child1].box; + if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (num_fragments <= 0) { + if (background_color != nullptr) { + if (d_background_color != nullptr) { + *d_background_color = *d_color; + } + return *background_color; + } + return Vector4f{0, 0, 0, 0}; + } + // Sort the fragments from back to front (i.e. increasing order of group id) + // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37 + for (int i = 1; i < num_fragments; i++) { + auto j = i; + auto temp = fragments[j]; + while (j > 0 && fragments[j - 1].group_id > temp.group_id) { + fragments[j] = fragments[j - 1]; + j--; + } + fragments[j] = temp; + } + // Blend the color + Vector3f accum_color[max_hit_shapes]; + float accum_alpha[max_hit_shapes]; + // auto hit_opaque = false; + auto first_alpha = 0.f; + auto first_color = Vector3f{0, 0, 0}; + if (background_color != nullptr) { + first_alpha = background_color->w; + first_color = Vector3f{background_color->x, + background_color->y, + background_color->z}; + } + for (int i = 0; i < num_fragments; i++) { + const Fragment &fragment = fragments[i]; + auto new_color = fragment.color; + auto new_alpha = fragment.alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + if (edge_query != nullptr) { + // Do we hit the target shape? + if (new_alpha >= 1.f && edge_query->hit) { + // A fully opaque shape in front of the target occludes it + edge_query->hit = false; + } + if (edge_query->shape_group_id == fragment.group_id) { + edge_query->hit = true; + } + } + // prev_color is alpha premultiplied, don't need to multiply with + // prev_alpha + accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color; + accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha; + } + auto final_color = accum_color[num_fragments - 1]; + auto final_alpha = accum_alpha[num_fragments - 1]; + if (final_alpha > 1e-6f) { + final_color /= final_alpha; + } + assert(isfinite(final_color)); + assert(isfinite(final_alpha)); + if (d_color != nullptr) { + // Backward pass + auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]}; + auto d_final_alpha = (*d_color)[3]; + auto d_curr_color = d_final_color; + auto d_curr_alpha = d_final_alpha; + if (final_alpha > 1e-6f) { + // final_color = curr_color / final_alpha + d_curr_color = d_final_color / final_alpha; + d_curr_alpha -= sum(d_final_color * final_color) / final_alpha; + } + assert(isfinite(*d_color)); + assert(isfinite(d_curr_color)); + assert(isfinite(d_curr_alpha)); + for (int i = num_fragments - 1; i >= 0; i--) { + // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color; + // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha); + auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha); + d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color)); + auto d_prev_color = d_curr_color * (1 - fragments[i].alpha); + auto d_color_i = d_curr_color * fragments[i].alpha; + auto group_id = fragments[i].group_id; + if (fragments[i].is_stroke) { + d_sample_color(scene.shape_groups[group_id].stroke_color_type, + scene.shape_groups[group_id].stroke_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].stroke_color, + d_translation); + } else { + d_sample_color(scene.shape_groups[group_id].fill_color_type, + scene.shape_groups[group_id].fill_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].fill_color, + d_translation); + } + d_curr_color = d_prev_color; + d_curr_alpha = d_prev_alpha; + } + if (d_background_color != nullptr) { + d_background_color->x += d_curr_color.x; + d_background_color->y += d_curr_color.y; + d_background_color->z += d_curr_color.z; + d_background_color->w += d_curr_alpha; + } + } + return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha}; +} + +DEVICE +float sample_distance(const SceneData &scene, + const Vector2f &screen_pt, + float weight, + const float *d_dist = nullptr, + float *d_translation = nullptr) { + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + // for each shape + auto min_group_id = -1; + auto min_distance = 0.f; + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto min_path_info = ClosestPointPathInfo{-1, -1, 0}; + for (int group_id = scene.num_shape_groups - 1; group_id >= 0; group_id--) { + auto s = -1; + auto p = Vector2f{0, 0}; + ClosestPointPathInfo local_path_info; + auto d = infinity(); + if (compute_distance(scene, group_id, pt, infinity(), &s, &p, &local_path_info, &d)) { + if (min_group_id == -1 || d < min_distance) { + min_distance = d; + min_group_id = group_id; + min_shape_id = s; + closest_pt = p; + min_path_info = local_path_info; + } + } + } + if (min_group_id == -1) { + return min_distance; + } + min_distance *= weight; + auto inside = false; + const ShapeGroup &shape_group = scene.shape_groups[min_group_id]; + if (shape_group.fill_color != nullptr) { + inside = is_inside(scene, + min_group_id, + pt, + nullptr); + if (inside) { + min_distance = -min_distance; + } + } + assert((min_group_id >= 0 && min_shape_id >= 0) || scene.num_shape_groups == 0); + if (d_dist != nullptr) { + auto d_abs_dist = inside ? -(*d_dist) : (*d_dist); + const ShapeGroup &shape_group = scene.shape_groups[min_group_id]; + const Shape &shape = scene.shapes[min_shape_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[min_group_id]; + Shape &d_shape = scene.d_shapes[min_shape_id]; + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + closest_pt, + min_path_info, + d_abs_dist, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + return min_distance; +} + +// Gather d_color from d_image inside the filter kernel, normalize by +// weight_image. +DEVICE +Vector4f gather_d_color(const Filter &filter, + const float *d_color_image, + const float *weight_image, + int width, + int height, + const Vector2f &pt) { + auto x = int(pt.x); + auto y = int(pt.y); + auto radius = filter.radius; + assert(radius > 0); + auto ri = (int)ceil(radius); + auto d_color = Vector4f{0, 0, 0, 0}; + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height) { + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = + compute_filter_weight(filter, xc - pt.x, yc - pt.y); + // pixel = \sum weight * color / \sum weight + auto weight_sum = weight_image[yy * width + xx]; + if (weight_sum > 0) { + d_color += (filter_weight / weight_sum) * Vector4f{ + d_color_image[4 * (yy * width + xx) + 0], + d_color_image[4 * (yy * width + xx) + 1], + d_color_image[4 * (yy * width + xx) + 2], + d_color_image[4 * (yy * width + xx) + 3], + }; + } + } + } + } + return d_color; +} + +DEVICE +float smoothstep(float d) { + auto t = clamp((d + 1.f) / 2.f, 0.f, 1.f); + return t * t * (3 - 2 * t); +} + +DEVICE +float d_smoothstep(float d, float d_ret) { + if (d < -1.f || d > 1.f) { + return 0.f; + } + auto t = (d + 1.f) / 2.f; + // ret = t * t * (3 - 2 * t) + // = 3 * t * t - 2 * t * t * t + auto d_t = d_ret * (6 * t - 6 * t * t); + return d_t / 2.f; +} + +DEVICE +Vector4f sample_color_prefiltered(const SceneData &scene, + const Vector4f *background_color, + const Vector2f &screen_pt, + const Vector4f *d_color = nullptr, + Vector4f *d_background_color = nullptr, + float *d_translation = nullptr) { + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + constexpr auto max_hit_shapes = 64; + constexpr auto max_bvh_stack_size = 64; + PrefilterFragment fragments[max_hit_shapes]; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + auto num_fragments = 0; + bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2; + while (stack_size > 0) { + const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto group_id = node.child0; + const ShapeGroup &shape_group = scene.shape_groups[group_id]; + if (shape_group.stroke_color != nullptr) { + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto local_path_info = ClosestPointPathInfo{-1, -1, 0}; + auto d = infinity(); + compute_distance(scene, group_id, pt, infinity(), + &min_shape_id, &closest_pt, &local_path_info, &d); + assert(min_shape_id != -1); + const auto &shape = scene.shapes[min_shape_id]; + auto w = smoothstep(fabs(d) + shape.stroke_width) - + smoothstep(fabs(d) - shape.stroke_width); + if (w > 0) { + auto color_alpha = sample_color(shape_group.stroke_color_type, + shape_group.stroke_color, + pt); + color_alpha[3] *= w; + + PrefilterFragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.shape_id = min_shape_id; + f.distance = d; + f.closest_pt = closest_pt; + f.is_stroke = true; + f.path_info = local_path_info; + f.within_distance = true; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + if (shape_group.fill_color != nullptr) { + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto local_path_info = ClosestPointPathInfo{-1, -1, 0}; + auto d = infinity(); + auto found = compute_distance(scene, + group_id, + pt, + 1.f, + &min_shape_id, + &closest_pt, + &local_path_info, + &d); + auto inside = is_inside(scene, group_id, pt, nullptr); + if (found || inside) { + if (!inside) { + d = -d; + } + auto w = smoothstep(d); + if (w > 0) { + auto color_alpha = sample_color(shape_group.fill_color_type, + shape_group.fill_color, + pt); + color_alpha[3] *= w; + + PrefilterFragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.shape_id = min_shape_id; + f.distance = d; + f.closest_pt = closest_pt; + f.is_stroke = false; + f.path_info = local_path_info; + f.within_distance = found; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = scene.bvh_nodes[node.child0].box; + if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = scene.bvh_nodes[node.child1].box; + if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (num_fragments <= 0) { + if (background_color != nullptr) { + if (d_background_color != nullptr) { + *d_background_color = *d_color; + } + return *background_color; + } + return Vector4f{0, 0, 0, 0}; + } + // Sort the fragments from back to front (i.e. increasing order of group id) + // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37 + for (int i = 1; i < num_fragments; i++) { + auto j = i; + auto temp = fragments[j]; + while (j > 0 && fragments[j - 1].group_id > temp.group_id) { + fragments[j] = fragments[j - 1]; + j--; + } + fragments[j] = temp; + } + // Blend the color + Vector3f accum_color[max_hit_shapes]; + float accum_alpha[max_hit_shapes]; + auto first_alpha = 0.f; + auto first_color = Vector3f{0, 0, 0}; + if (background_color != nullptr) { + first_alpha = background_color->w; + first_color = Vector3f{background_color->x, + background_color->y, + background_color->z}; + } + for (int i = 0; i < num_fragments; i++) { + const PrefilterFragment &fragment = fragments[i]; + auto new_color = fragment.color; + auto new_alpha = fragment.alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + // prev_color is alpha premultiplied, don't need to multiply with + // prev_alpha + accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color; + accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha; + } + auto final_color = accum_color[num_fragments - 1]; + auto final_alpha = accum_alpha[num_fragments - 1]; + if (final_alpha > 1e-6f) { + final_color /= final_alpha; + } + assert(isfinite(final_color)); + assert(isfinite(final_alpha)); + if (d_color != nullptr) { + // Backward pass + auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]}; + auto d_final_alpha = (*d_color)[3]; + auto d_curr_color = d_final_color; + auto d_curr_alpha = d_final_alpha; + if (final_alpha > 1e-6f) { + // final_color = curr_color / final_alpha + d_curr_color = d_final_color / final_alpha; + d_curr_alpha -= sum(d_final_color * final_color) / final_alpha; + } + assert(isfinite(*d_color)); + assert(isfinite(d_curr_color)); + assert(isfinite(d_curr_alpha)); + for (int i = num_fragments - 1; i >= 0; i--) { + // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color; + // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha); + auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha); + d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color)); + auto d_prev_color = d_curr_color * (1 - fragments[i].alpha); + auto d_color_i = d_curr_color * fragments[i].alpha; + auto group_id = fragments[i].group_id; + if (fragments[i].is_stroke) { + const auto &shape = scene.shapes[fragments[i].shape_id]; + auto d = fragments[i].distance; + auto abs_d_plus_width = fabs(d) + shape.stroke_width; + auto abs_d_minus_width = fabs(d) - shape.stroke_width; + auto w = smoothstep(abs_d_plus_width) - + smoothstep(abs_d_minus_width); + if (w != 0) { + auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f; + d_alpha_i *= w; + + // Backprop to color + d_sample_color(scene.shape_groups[group_id].stroke_color_type, + scene.shape_groups[group_id].stroke_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].stroke_color, + d_translation); + + auto d_abs_d_plus_width = d_smoothstep(abs_d_plus_width, d_w); + auto d_abs_d_minus_width = -d_smoothstep(abs_d_minus_width, d_w); + + auto d_d = d_abs_d_plus_width + d_abs_d_minus_width; + if (d < 0) { + d_d = -d_d; + } + auto d_stroke_width = d_abs_d_plus_width - d_abs_d_minus_width; + + const auto &shape_group = scene.shape_groups[group_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[group_id]; + Shape &d_shape = scene.d_shapes[fragments[i].shape_id]; + if (fabs(d_d) > 1e-10f) { + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + fragments[i].closest_pt, + fragments[i].path_info, + d_d, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + atomic_add(&d_shape.stroke_width, d_stroke_width); + } + } else { + const auto &shape = scene.shapes[fragments[i].shape_id]; + auto d = fragments[i].distance; + auto w = smoothstep(d); + if (w != 0) { + // color_alpha[3] = color_alpha[3] * w; + auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f; + d_alpha_i *= w; + + d_sample_color(scene.shape_groups[group_id].fill_color_type, + scene.shape_groups[group_id].fill_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].fill_color, + d_translation); + + // w = smoothstep(d) + auto d_d = d_smoothstep(d, d_w); + if (d < 0) { + d_d = -d_d; + } + + const auto &shape_group = scene.shape_groups[group_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[group_id]; + Shape &d_shape = scene.d_shapes[fragments[i].shape_id]; + if (fabs(d_d) > 1e-10f && fragments[i].within_distance) { + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + fragments[i].closest_pt, + fragments[i].path_info, + d_d, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + } + } + d_curr_color = d_prev_color; + d_curr_alpha = d_prev_alpha; + } + if (d_background_color != nullptr) { + d_background_color->x += d_curr_color.x; + d_background_color->y += d_curr_color.y; + d_background_color->z += d_curr_color.z; + d_background_color->w += d_curr_alpha; + } + } + return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha}; +} + +struct weight_kernel { + DEVICE void operator()(int idx) { + auto rng_state = init_pcg32(idx, seed); + // height * width * num_samples_y * num_samples_x + auto sx = idx % num_samples_x; + auto sy = (idx / num_samples_x) % num_samples_y; + auto x = (idx / (num_samples_x * num_samples_y)) % width; + auto y = (idx / (num_samples_x * num_samples_y * width)); + assert(y < height); + auto rx = next_pcg32_float(&rng_state); + auto ry = next_pcg32_float(&rng_state); + if (use_prefiltering) { + rx = ry = 0.5f; + } + auto pt = Vector2f{x + ((float)sx + rx) / num_samples_x, + y + ((float)sy + ry) / num_samples_y}; + auto radius = scene.filter->radius; + assert(radius >= 0); + auto ri = (int)ceil(radius); + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height) { + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y); + atomic_add(weight_image[yy * width + xx], filter_weight); + } + } + } + } + + SceneData scene; + float *weight_image; + int width; + int height; + int num_samples_x; + int num_samples_y; + uint64_t seed; + bool use_prefiltering; +}; + +// We use a "mega kernel" for rendering +struct render_kernel { + DEVICE void operator()(int idx) { + // height * width * num_samples_y * num_samples_x + auto pt = Vector2f{0, 0}; + auto x = 0; + auto y = 0; + if (eval_positions == nullptr) { + auto rng_state = init_pcg32(idx, seed); + auto sx = idx % num_samples_x; + auto sy = (idx / num_samples_x) % num_samples_y; + x = (idx / (num_samples_x * num_samples_y)) % width; + y = (idx / (num_samples_x * num_samples_y * width)); + assert(x < width && y < height); + auto rx = next_pcg32_float(&rng_state); + auto ry = next_pcg32_float(&rng_state); + if (use_prefiltering) { + rx = ry = 0.5f; + } + pt = Vector2f{x + ((float)sx + rx) / num_samples_x, + y + ((float)sy + ry) / num_samples_y}; + } else { + pt = Vector2f{eval_positions[2 * idx], + eval_positions[2 * idx + 1]}; + x = int(pt.x); + y = int(pt.y); + } + + // normalize pt to [0, 1] + auto npt = pt; + npt.x /= width; + npt.y /= height; + auto num_samples = num_samples_x * num_samples_y; + if (render_image != nullptr || d_render_image != nullptr) { + Vector4f d_color = Vector4f{0, 0, 0, 0}; + if (d_render_image != nullptr) { + // Gather d_color from d_render_image inside the filter kernel + // normalize using weight_image + d_color = gather_d_color(*scene.filter, + d_render_image, + weight_image, + width, + height, + pt); + } + auto color = Vector4f{0, 0, 0, 0}; + if (use_prefiltering) { + color = sample_color_prefiltered(scene, + background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr, + npt, + d_render_image != nullptr ? &d_color : nullptr, + d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + } else { + color = sample_color(scene, + background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr, + npt, + d_render_image != nullptr ? &d_color : nullptr, + nullptr, + d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + } + assert(isfinite(color)); + // Splat color onto render_image + auto radius = scene.filter->radius; + assert(radius >= 0); + auto ri = (int)ceil(radius); + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height && + weight_image[yy * width + xx] > 0) { + auto weight_sum = weight_image[yy * width + xx]; + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y); + auto weighted_color = filter_weight * color / weight_sum; + if (render_image != nullptr) { + atomic_add(render_image[4 * (yy * width + xx) + 0], + weighted_color[0]); + atomic_add(render_image[4 * (yy * width + xx) + 1], + weighted_color[1]); + atomic_add(render_image[4 * (yy * width + xx) + 2], + weighted_color[2]); + atomic_add(render_image[4 * (yy * width + xx) + 3], + weighted_color[3]); + } + if (d_render_image != nullptr) { + // Backprop to filter_weight + // pixel = \sum weight * color / \sum weight + auto d_pixel = Vector4f{ + d_render_image[4 * (yy * width + xx) + 0], + d_render_image[4 * (yy * width + xx) + 1], + d_render_image[4 * (yy * width + xx) + 2], + d_render_image[4 * (yy * width + xx) + 3], + }; + auto d_weight = + (dot(d_pixel, color) * weight_sum - + filter_weight * dot(d_pixel, color) * (weight_sum - filter_weight)) / + square(weight_sum); + d_compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y, + d_weight, + scene.d_filter); + } + } + } + } + } + if (sdf_image != nullptr || d_sdf_image != nullptr) { + float d_dist = 0.f; + if (d_sdf_image != nullptr) { + if (eval_positions == nullptr) { + d_dist = d_sdf_image[y * width + x]; + } else { + d_dist = d_sdf_image[idx]; + } + } + auto weight = eval_positions == nullptr ? 1.f / num_samples : 1.f; + auto dist = sample_distance(scene, npt, weight, + d_sdf_image != nullptr ? &d_dist : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + if (sdf_image != nullptr) { + if (eval_positions == nullptr) { + atomic_add(sdf_image[y * width + x], dist); + } else { + atomic_add(sdf_image[idx], dist); + } + } + } + } + + SceneData scene; + float *background_image; + float *render_image; + float *weight_image; + float *sdf_image; + float *d_background_image; + float *d_render_image; + float *d_sdf_image; + float *d_translation; + int width; + int height; + int num_samples_x; + int num_samples_y; + uint64_t seed; + bool use_prefiltering; + float *eval_positions; +}; + +struct BoundarySample { + Vector2f pt; + Vector2f local_pt; + Vector2f normal; + int shape_group_id; + int shape_id; + float t; + BoundaryData data; + float pdf; +}; + +struct sample_boundary_kernel { + DEVICE void operator()(int idx) { + boundary_samples[idx].pt = Vector2f{0, 0}; + boundary_samples[idx].shape_id = -1; + boundary_ids[idx] = idx; + morton_codes[idx] = 0; + + auto rng_state = init_pcg32(idx, seed); + auto u = next_pcg32_float(&rng_state); + // Sample a shape + auto sample_id = sample(scene.sample_shapes_cdf, + scene.num_total_shapes, + u); + assert(sample_id >= 0 && sample_id < scene.num_total_shapes); + auto shape_id = scene.sample_shape_id[sample_id]; + assert(shape_id >= 0 && shape_id < scene.num_shapes); + auto shape_group_id = scene.sample_group_id[sample_id]; + assert(shape_group_id >= 0 && shape_group_id < scene.num_shape_groups); + auto shape_pmf = scene.sample_shapes_pmf[shape_id]; + if (shape_pmf <= 0) { + return; + } + // Sample a point on the boundary of the shape + auto boundary_pdf = 0.f; + auto normal = Vector2f{0, 0}; + auto t = next_pcg32_float(&rng_state); + BoundaryData boundary_data; + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + auto local_boundary_pt = sample_boundary( + scene, shape_group_id, shape_id, + t, normal, boundary_pdf, boundary_data); + if (boundary_pdf <= 0) { + return; + } + + // local_boundary_pt & normal are in shape's local space, + // transform them to canvas space + auto boundary_pt = xform_pt(shape_group.shape_to_canvas, local_boundary_pt); + normal = xform_normal(shape_group.canvas_to_shape, normal); + // Normalize boundary_pt to [0, 1) + boundary_pt.x /= scene.canvas_width; + boundary_pt.y /= scene.canvas_height; + + boundary_samples[idx].pt = boundary_pt; + boundary_samples[idx].local_pt = local_boundary_pt; + boundary_samples[idx].normal = normal; + boundary_samples[idx].shape_group_id = shape_group_id; + boundary_samples[idx].shape_id = shape_id; + boundary_samples[idx].t = t; + boundary_samples[idx].data = boundary_data; + boundary_samples[idx].pdf = shape_pmf * boundary_pdf; + TVector2 p_i{boundary_pt.x * 1023, boundary_pt.y * 1023}; + morton_codes[idx] = (expand_bits(p_i.x) << 1u) | + (expand_bits(p_i.y) << 0u); + } + + SceneData scene; + uint64_t seed; + BoundarySample *boundary_samples; + int *boundary_ids; + uint32_t *morton_codes; +}; + +struct render_edge_kernel { + DEVICE void operator()(int idx) { + auto bid = boundary_ids[idx]; + if (boundary_samples[bid].shape_id == -1) { + return; + } + auto boundary_pt = boundary_samples[bid].pt; + auto local_boundary_pt = boundary_samples[bid].local_pt; + auto normal = boundary_samples[bid].normal; + auto shape_group_id = boundary_samples[bid].shape_group_id; + auto shape_id = boundary_samples[bid].shape_id; + auto t = boundary_samples[bid].t; + auto boundary_data = boundary_samples[bid].data; + auto pdf = boundary_samples[bid].pdf; + + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + + auto bx = int(boundary_pt.x * width); + auto by = int(boundary_pt.y * height); + if (bx < 0 || bx >= width || by < 0 || by >= height) { + return; + } + + // Sample the two sides of the boundary + auto inside_query = EdgeQuery{shape_group_id, shape_id, false}; + auto outside_query = EdgeQuery{shape_group_id, shape_id, false}; + auto color_inside = sample_color(scene, + background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr, + boundary_pt - 1e-4f * normal, + nullptr, &inside_query); + auto color_outside = sample_color(scene, + background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr, + boundary_pt + 1e-4f * normal, + nullptr, &outside_query); + if (!inside_query.hit && !outside_query.hit) { + // occluded + return; + } + if (!inside_query.hit) { + normal = -normal; + swap_(inside_query, outside_query); + swap_(color_inside, color_outside); + } + // Boundary point in screen space + auto sboundary_pt = boundary_pt; + sboundary_pt.x *= width; + sboundary_pt.y *= height; + auto d_color = gather_d_color(*scene.filter, + d_render_image, + weight_image, + width, + height, + sboundary_pt); + // Normalization factor + d_color /= float(scene.canvas_width * scene.canvas_height); + + assert(isfinite(d_color)); + assert(isfinite(pdf) && pdf > 0); + auto contrib = dot(color_inside - color_outside, d_color) / pdf; + ShapeGroup &d_shape_group = scene.d_shape_groups[shape_group_id]; + accumulate_boundary_gradient(scene.shapes[shape_id], + contrib, t, normal, boundary_data, scene.d_shapes[shape_id], + shape_group.shape_to_canvas, local_boundary_pt, d_shape_group.shape_to_canvas); + // Don't need to backprop to filter weights: + // \int f'(x) g(x) dx doesn't contain discontinuities + // if f is continuous, even if g is discontinuous + if (d_translation != nullptr) { + // According to Reynold transport theorem, + // the Jacobian of the boundary integral is dot(velocity, normal) + // The velocity of the object translating x is (1, 0) + // The velocity of the object translating y is (0, 1) + atomic_add(&d_translation[2 * (by * width + bx) + 0], normal.x * contrib); + atomic_add(&d_translation[2 * (by * width + bx) + 1], normal.y * contrib); + } + } + + SceneData scene; + const float *background_image; + const BoundarySample *boundary_samples; + const int *boundary_ids; + float *weight_image; + float *d_render_image; + float *d_translation; + int width; + int height; + int num_samples_x; + int num_samples_y; +}; + +void render(std::shared_ptr scene, + ptr background_image, + ptr render_image, + ptr render_sdf, + int width, + int height, + int num_samples_x, + int num_samples_y, + uint64_t seed, + ptr d_background_image, + ptr d_render_image, + ptr d_render_sdf, + ptr d_translation, + bool use_prefiltering, + ptr eval_positions, + int num_eval_positions) { +#ifdef __NVCC__ + int old_device_id = -1; + if (scene->use_gpu) { + checkCuda(cudaGetDevice(&old_device_id)); + if (scene->gpu_index != -1) { + checkCuda(cudaSetDevice(scene->gpu_index)); + } + } +#endif + parallel_init(); + + float *weight_image = nullptr; + // Allocate and zero the weight image + if (scene->use_gpu) { +#ifdef __CUDACC__ + if (eval_positions.get() == nullptr) { + checkCuda(cudaMallocManaged(&weight_image, width * height * sizeof(float))); + cudaMemset(weight_image, 0, width * height * sizeof(float)); + } +#else + assert(false); +#endif + } else { + if (eval_positions.get() == nullptr) { + weight_image = (float*)malloc(width * height * sizeof(float)); + memset(weight_image, 0, width * height * sizeof(float)); + } + } + + if (render_image.get() != nullptr || d_render_image.get() != nullptr || + render_sdf.get() != nullptr || d_render_sdf.get() != nullptr) { + if (weight_image != nullptr) { + parallel_for(weight_kernel{ + get_scene_data(*scene.get()), + weight_image, + width, + height, + num_samples_x, + num_samples_y, + seed + }, width * height * num_samples_x * num_samples_y, scene->use_gpu); + } + + auto num_samples = eval_positions.get() == nullptr ? + width * height * num_samples_x * num_samples_y : num_eval_positions; + parallel_for(render_kernel{ + get_scene_data(*scene.get()), + background_image.get(), + render_image.get(), + weight_image, + render_sdf.get(), + d_background_image.get(), + d_render_image.get(), + d_render_sdf.get(), + d_translation.get(), + width, + height, + num_samples_x, + num_samples_y, + seed, + use_prefiltering, + eval_positions.get() + }, num_samples, scene->use_gpu); + } + + // Boundary sampling + if (!use_prefiltering && d_render_image.get() != nullptr) { + auto num_samples = width * height * num_samples_x * num_samples_y; + BoundarySample *boundary_samples = nullptr; + int *boundary_ids = nullptr; // for sorting + uint32_t *morton_codes = nullptr; // for sorting + // Allocate boundary samples + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaMallocManaged(&boundary_samples, + num_samples * sizeof(BoundarySample))); + checkCuda(cudaMallocManaged(&boundary_ids, + num_samples * sizeof(int))); + checkCuda(cudaMallocManaged(&morton_codes, + num_samples * sizeof(uint32_t))); +#else + assert(false); + #endif + } else { + boundary_samples = (BoundarySample*)malloc( + num_samples * sizeof(BoundarySample)); + boundary_ids = (int*)malloc( + num_samples * sizeof(int)); + morton_codes = (uint32_t*)malloc( + num_samples * sizeof(uint32_t)); + } + + // Edge sampling + // We sort the boundary samples for better thread coherency + parallel_for(sample_boundary_kernel{ + get_scene_data(*scene.get()), + seed, + boundary_samples, + boundary_ids, + morton_codes + }, num_samples, scene->use_gpu); + if (scene->use_gpu) { + thrust::sort_by_key(thrust::device, morton_codes, morton_codes + num_samples, boundary_ids); + } else { + // Don't need to sort for CPU, we are not using SIMD hardware anyway. + // thrust::sort_by_key(thrust::host, morton_codes, morton_codes + num_samples, boundary_ids); + } + parallel_for(render_edge_kernel{ + get_scene_data(*scene.get()), + background_image.get(), + boundary_samples, + boundary_ids, + weight_image, + d_render_image.get(), + d_translation.get(), + width, + height, + num_samples_x, + num_samples_y + }, num_samples, scene->use_gpu); + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaFree(boundary_samples)); + checkCuda(cudaFree(boundary_ids)); + checkCuda(cudaFree(morton_codes)); +#else + assert(false); +#endif + } else { + free(boundary_samples); + free(boundary_ids); + free(morton_codes); + } + } + + // Clean up weight image + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaFree(weight_image)); +#else + assert(false); +#endif + } else { + free(weight_image); + } + + if (scene->use_gpu) { + cuda_synchronize(); + } + + parallel_cleanup(); +#ifdef __NVCC__ + if (old_device_id != -1) { + checkCuda(cudaSetDevice(old_device_id)); + } +#endif +} + +PYBIND11_MODULE(diffvg, m) { + m.doc() = "Differential Vector Graphics"; + + py::class_>(m, "void_ptr") + .def(py::init()) + .def("as_size_t", &ptr::as_size_t); + py::class_>(m, "float_ptr") + .def(py::init()); + py::class_>(m, "int_ptr") + .def(py::init()); + + py::class_(m, "Vector2f") + .def(py::init()) + .def_readwrite("x", &Vector2f::x) + .def_readwrite("y", &Vector2f::y); + + py::class_(m, "Vector3f") + .def(py::init()) + .def_readwrite("x", &Vector3f::x) + .def_readwrite("y", &Vector3f::y) + .def_readwrite("z", &Vector3f::z); + + py::class_(m, "Vector4f") + .def(py::init()) + .def_readwrite("x", &Vector4f::x) + .def_readwrite("y", &Vector4f::y) + .def_readwrite("z", &Vector4f::z) + .def_readwrite("w", &Vector4f::w); + + py::enum_(m, "ShapeType") + .value("circle", ShapeType::Circle) + .value("ellipse", ShapeType::Ellipse) + .value("path", ShapeType::Path) + .value("rect", ShapeType::Rect); + + py::class_(m, "Circle") + .def(py::init()) + .def("get_ptr", &Circle::get_ptr) + .def_readonly("radius", &Circle::radius) + .def_readonly("center", &Circle::center); + + py::class_(m, "Ellipse") + .def(py::init()) + .def("get_ptr", &Ellipse::get_ptr) + .def_readonly("radius", &Ellipse::radius) + .def_readonly("center", &Ellipse::center); + + py::class_(m, "Path") + .def(py::init, ptr, ptr, int, int, bool, bool>()) + .def("get_ptr", &Path::get_ptr) + .def("has_thickness", &Path::has_thickness) + .def("copy_to", &Path::copy_to) + .def_readonly("num_points", &Path::num_points); + + py::class_(m, "Rect") + .def(py::init()) + .def("get_ptr", &Rect::get_ptr) + .def_readonly("p_min", &Rect::p_min) + .def_readonly("p_max", &Rect::p_max); + + py::enum_(m, "ColorType") + .value("constant", ColorType::Constant) + .value("linear_gradient", ColorType::LinearGradient) + .value("radial_gradient", ColorType::RadialGradient); + + py::class_(m, "Constant") + .def(py::init()) + .def("get_ptr", &Constant::get_ptr) + .def_readonly("color", &Constant::color); + + py::class_(m, "LinearGradient") + .def(py::init, ptr>()) + .def("get_ptr", &LinearGradient::get_ptr) + .def("copy_to", &LinearGradient::copy_to) + .def_readonly("begin", &LinearGradient::begin) + .def_readonly("end", &LinearGradient::end) + .def_readonly("num_stops", &LinearGradient::num_stops); + + py::class_(m, "RadialGradient") + .def(py::init, ptr>()) + .def("get_ptr", &RadialGradient::get_ptr) + .def("copy_to", &RadialGradient::copy_to) + .def_readonly("center", &RadialGradient::center) + .def_readonly("radius", &RadialGradient::radius) + .def_readonly("num_stops", &RadialGradient::num_stops); + + py::class_(m, "Shape") + .def(py::init, float>()) + .def("as_circle", &Shape::as_circle) + .def("as_ellipse", &Shape::as_ellipse) + .def("as_path", &Shape::as_path) + .def("as_rect", &Shape::as_rect) + .def_readonly("type", &Shape::type) + .def_readonly("stroke_width", &Shape::stroke_width); + + py::class_(m, "ShapeGroup") + .def(py::init, + int, + ColorType, + ptr, + ColorType, + ptr, + bool, + ptr>()) + .def("fill_color_as_constant", &ShapeGroup::fill_color_as_constant) + .def("fill_color_as_linear_gradient", &ShapeGroup::fill_color_as_linear_gradient) + .def("fill_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient) + .def("stroke_color_as_constant", &ShapeGroup::stroke_color_as_constant) + .def("stroke_color_as_linear_gradient", &ShapeGroup::stroke_color_as_linear_gradient) + .def("stroke_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient) + .def("has_fill_color", &ShapeGroup::has_fill_color) + .def("has_stroke_color", &ShapeGroup::has_stroke_color) + .def("copy_to", &ShapeGroup::copy_to) + .def_readonly("fill_color_type", &ShapeGroup::fill_color_type) + .def_readonly("stroke_color_type", &ShapeGroup::stroke_color_type); + + py::enum_(m, "FilterType") + .value("box", FilterType::Box) + .value("tent", FilterType::Tent) + .value("parabolic", FilterType::RadialParabolic) + .value("hann", FilterType::Hann); + + py::class_(m, "Filter") + .def(py::init()); + + py::class_>(m, "Scene") + .def(py::init &, + const std::vector &, + const Filter &, + bool, + int>()) + .def("get_d_shape", &Scene::get_d_shape) + .def("get_d_shape_group", &Scene::get_d_shape_group) + .def("get_d_filter_radius", &Scene::get_d_filter_radius) + .def_readonly("num_shapes", &Scene::num_shapes) + .def_readonly("num_shape_groups", &Scene::num_shape_groups); + + m.def("render", &render, ""); +} diff --git a/diffvg/diffvg.egg-info/PKG-INFO b/diffvg/diffvg.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..236d4b1509e79707d9ce3619703d90ccf33bc376 --- /dev/null +++ b/diffvg/diffvg.egg-info/PKG-INFO @@ -0,0 +1,6 @@ +Metadata-Version: 2.1 +Name: diffvg +Version: 0.0.1 +Summary: Differentiable Vector Graphics +License-File: LICENSE +Requires-Dist: svgpathtools diff --git a/diffvg/diffvg.egg-info/SOURCES.txt b/diffvg/diffvg.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..752cbfbfbd9d52c38eac1908048da98b2b68e4f5 --- /dev/null +++ b/diffvg/diffvg.egg-info/SOURCES.txt @@ -0,0 +1,20 @@ +LICENSE +README.md +pyproject.toml +setup.py +diffvg.egg-info/PKG-INFO +diffvg.egg-info/SOURCES.txt +diffvg.egg-info/dependency_links.txt +diffvg.egg-info/not-zip-safe +diffvg.egg-info/requires.txt +diffvg.egg-info/top_level.txt +pydiffvg/__init__.py +pydiffvg/color.py +pydiffvg/device.py +pydiffvg/image.py +pydiffvg/optimize_svg.py +pydiffvg/parse_svg.py +pydiffvg/pixel_filter.py +pydiffvg/render_pytorch.py +pydiffvg/save_svg.py +pydiffvg/shape.py \ No newline at end of file diff --git a/diffvg/diffvg.egg-info/dependency_links.txt b/diffvg/diffvg.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/diffvg/diffvg.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/diffvg/diffvg.egg-info/not-zip-safe b/diffvg/diffvg.egg-info/not-zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/diffvg/diffvg.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/diffvg/diffvg.egg-info/requires.txt b/diffvg/diffvg.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..0df5e35e6887113d18490524d5f09d620f937e7e --- /dev/null +++ b/diffvg/diffvg.egg-info/requires.txt @@ -0,0 +1 @@ +svgpathtools diff --git a/diffvg/diffvg.egg-info/top_level.txt b/diffvg/diffvg.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..51094cb281c21cb47ce49a1fca992ca8408242fd --- /dev/null +++ b/diffvg/diffvg.egg-info/top_level.txt @@ -0,0 +1,2 @@ +diffvg +pydiffvg diff --git a/diffvg/diffvg.h b/diffvg/diffvg.h new file mode 100644 index 0000000000000000000000000000000000000000..400e4dc3f60d89061fe3842e09688f130d49c557 --- /dev/null +++ b/diffvg/diffvg.h @@ -0,0 +1,156 @@ +#pragma once + +#ifdef __NVCC__ + #define DEVICE __device__ __host__ +#else + #define DEVICE +#endif + +#ifndef __NVCC__ + #include + namespace { + inline float fmodf(float a, float b) { + return std::fmod(a, b); + } + inline double fmod(double a, double b) { + return std::fmod(a, b); + } + } + using std::isfinite; +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +#include +#include + +// We use Real for most of the internal computation. +// However, for PyTorch interfaces, Optix Prime and Embree queries +// we use float +using Real = float; + +template +DEVICE +inline T square(const T &x) { + return x * x; +} + +template +DEVICE +inline T cubic(const T &x) { + return x * x * x; +} + +template +DEVICE +inline T clamp(const T &v, const T &lo, const T &hi) { + if (v < lo) return lo; + else if (v > hi) return hi; + else return v; +} + +DEVICE +inline int modulo(int a, int b) { + auto r = a % b; + return (r < 0) ? r+b : r; +} + +DEVICE +inline float modulo(float a, float b) { + float r = ::fmodf(a, b); + return (r < 0.0f) ? r+b : r; +} + +DEVICE +inline double modulo(double a, double b) { + double r = ::fmod(a, b); + return (r < 0.0) ? r+b : r; +} + +template +DEVICE +inline T max(const T &a, const T &b) { + return a > b ? a : b; +} + +template +DEVICE +inline T min(const T &a, const T &b) { + return a < b ? a : b; +} + +/// Return ceil(x/y) for integers x and y +inline int idiv_ceil(int x, int y) { + return (x + y-1) / y; +} + +template +DEVICE +inline void swap_(T &a, T &b) { + T tmp = a; + a = b; + b = tmp; +} + +inline double log2(double x) { + return log(x) / log(Real(2)); +} + +template +DEVICE +inline T safe_acos(const T &x) { + if (x >= 1) return T(0); + else if(x <= -1) return T(M_PI); + return acos(x); +} + +// For Morton code computation. This can be made faster. +DEVICE +inline uint32_t expand_bits(uint32_t x) { + // Insert one zero after every bit given a 10-bit integer + constexpr uint64_t mask = 0x1u; + // We start from LSB (bit 31) + auto result = (x & (mask << 0u)); + result |= ((x & (mask << 1u)) << 1u); + result |= ((x & (mask << 2u)) << 2u); + result |= ((x & (mask << 3u)) << 3u); + result |= ((x & (mask << 4u)) << 4u); + result |= ((x & (mask << 5u)) << 5u); + result |= ((x & (mask << 6u)) << 6u); + result |= ((x & (mask << 7u)) << 7u); + result |= ((x & (mask << 8u)) << 8u); + result |= ((x & (mask << 9u)) << 9u); + return result; +} + +// DEVICE +// inline int clz(uint64_t x) { +// #ifdef __CUDA_ARCH__ +// return __clzll(x); +// #else +// // TODO: use _BitScanReverse in windows +// return x == 0 ? 64 : __builtin_clzll(x); +// #endif +// } + +// DEVICE +// inline int ffs(uint8_t x) { +// #ifdef __CUDA_ARCH__ +// return __ffs(x); +// #else +// // TODO: use _BitScanReverse in windows +// return __builtin_ffs(x); +// #endif +// } + +// DEVICE +// inline int popc(uint8_t x) { +// #ifdef __CUDA_ARCH__ +// return __popc(x); +// #else +// // TODO: use _popcnt in windows +// return __builtin_popcount(x); +// #endif +// } diff --git a/diffvg/dist/diffvg-0.0.1-py3.8-linux-x86_64.egg b/diffvg/dist/diffvg-0.0.1-py3.8-linux-x86_64.egg new file mode 100644 index 0000000000000000000000000000000000000000..543c6a0703fa65e3959e795a1b15cbe6c496dc54 --- /dev/null +++ b/diffvg/dist/diffvg-0.0.1-py3.8-linux-x86_64.egg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9d97720b8efaf9fd4f9efe001715a2c138c3ae7c0535bfb5eaee4835ac0de68 +size 1628207 diff --git a/diffvg/edge_query.h b/diffvg/edge_query.h new file mode 100644 index 0000000000000000000000000000000000000000..57f233a3203c1ea8d6b73f6624036578483442bb --- /dev/null +++ b/diffvg/edge_query.h @@ -0,0 +1,7 @@ +#pragma once + +struct EdgeQuery { + int shape_group_id; + int shape_id; + bool hit; // Do we hit the specified shape_group_id & shape_id? +}; diff --git a/diffvg/filter.h b/diffvg/filter.h new file mode 100644 index 0000000000000000000000000000000000000000..2dd0b62acb83e94da89696e9a8024c4b919f6749 --- /dev/null +++ b/diffvg/filter.h @@ -0,0 +1,106 @@ +#pragma once + +#include "diffvg.h" +#include "atomic.h" + +enum class FilterType { + Box, + Tent, + RadialParabolic, // 4/3(1 - (d/r)) + Hann // https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows +}; + +struct Filter { + FilterType type; + float radius; +}; + +struct DFilter { + float radius; +}; + +DEVICE +inline +float compute_filter_weight(const Filter &filter, + float dx, + float dy) { + if (fabs(dx) > filter.radius || fabs(dy) > filter.radius) { + return 0; + } + if (filter.type == FilterType::Box) { + return 1.f / square(2 * filter.radius); + } else if (filter.type == FilterType::Tent) { + return (filter.radius - fabs(dx)) * (filter.radius - fabs(dy)) / + square(square(filter.radius)); + } else if (filter.type == FilterType::RadialParabolic) { + return (4.f / 3.f) * (1 - square(dx / filter.radius)) * + (4.f / 3.f) * (1 - square(dy / filter.radius)); + } else { + assert(filter.type == FilterType::Hann); + // normalize dx, dy to [0, 1] + auto ndx = (dx / (2*filter.radius)) + 0.5f; + auto ndy = (dy / (2*filter.radius)) + 0.5f; + // the normalization factor is R^2 + return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * + 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / + square(filter.radius); + } +} + +DEVICE +inline +void d_compute_filter_weight(const Filter &filter, + float dx, + float dy, + float d_return, + DFilter *d_filter) { + if (filter.type == FilterType::Box) { + // return 1.f / square(2 * filter.radius); + atomic_add(d_filter->radius, + d_return * (-2) * 2 * filter.radius / cubic(2 * filter.radius)); + } else if (filter.type == FilterType::Tent) { + // return (filer.radius - fabs(dx)) * (filer.radius - fabs(dy)) / + // square(square(filter.radius)); + auto fx = filter.radius - fabs(dx); + auto fy = filter.radius - fabs(dy); + auto norm = 1 / square(filter.radius); + auto d_fx = d_return * fy * norm; + auto d_fy = d_return * fx * norm; + auto d_norm = d_return * fx * fy; + atomic_add(d_filter->radius, + d_fx + d_fy + (-4) * d_norm / pow(filter.radius, 5)); + } else if (filter.type == FilterType::RadialParabolic) { + // return (4.f / 3.f) * (1 - square(dx / filter.radius)) * + // (4.f / 3.f) * (1 - square(dy / filter.radius)); + // auto d_square_x = d_return * (-4.f / 3.f); + // auto d_square_y = d_return * (-4.f / 3.f); + auto r3 = filter.radius * filter.radius * filter.radius; + auto d_radius = -(2 * square(dx) + 2 * square(dy)) / r3; + atomic_add(d_filter->radius, d_radius); + } else { + assert(filter.type == FilterType::Hann); + // // normalize dx, dy to [0, 1] + // auto ndx = (dx / (2*filter.radius)) + 0.5f; + // auto ndy = (dy / (2*filter.radius)) + 0.5f; + // // the normalization factor is R^2 + // return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * + // 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / + // square(filter.radius); + + // normalize dx, dy to [0, 1] + auto ndx = (dx / (2*filter.radius)) + 0.5f; + auto ndy = (dy / (2*filter.radius)) + 0.5f; + auto fx = 0.5f * (1.f - cos(float(2*M_PI) * ndx)); + auto fy = 0.5f * (1.f - cos(float(2*M_PI) * ndy)); + auto norm = 1 / square(filter.radius); + auto d_fx = d_return * fy * norm; + auto d_fy = d_return * fx * norm; + auto d_norm = d_return * fx * fy; + auto d_ndx = d_fx * 0.5f * sin(float(2*M_PI) * ndx) * float(2*M_PI); + auto d_ndy = d_fy * 0.5f * sin(float(2*M_PI) * ndy) * float(2*M_PI); + atomic_add(d_filter->radius, + d_ndx * (-2*dx / square(2*filter.radius)) + + d_ndy * (-2*dy / square(2*filter.radius)) + + (-2) * d_norm / cubic(filter.radius)); + } +} diff --git a/diffvg/matrix.h b/diffvg/matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..b53f484e2abf613c6d0c1b36890a332d778f24b5 --- /dev/null +++ b/diffvg/matrix.h @@ -0,0 +1,544 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include + +template +struct TMatrix3x3 { + DEVICE + TMatrix3x3() { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + data[i][j] = T(0); + } + } + } + + template + DEVICE + TMatrix3x3(T2 *arr) { + data[0][0] = arr[0]; + data[0][1] = arr[1]; + data[0][2] = arr[2]; + data[1][0] = arr[3]; + data[1][1] = arr[4]; + data[1][2] = arr[5]; + data[2][0] = arr[6]; + data[2][1] = arr[7]; + data[2][2] = arr[8]; + } + DEVICE + TMatrix3x3(T v00, T v01, T v02, + T v10, T v11, T v12, + T v20, T v21, T v22) { + data[0][0] = v00; + data[0][1] = v01; + data[0][2] = v02; + data[1][0] = v10; + data[1][1] = v11; + data[1][2] = v12; + data[2][0] = v20; + data[2][1] = v21; + data[2][2] = v22; + } + + DEVICE + const T& operator()(int i, int j) const { + return data[i][j]; + } + DEVICE + T& operator()(int i, int j) { + return data[i][j]; + } + DEVICE + static TMatrix3x3 identity() { + TMatrix3x3 m(1, 0, 0, + 0, 1, 0, + 0, 0, 1); + return m; + } + + T data[3][3]; +}; + +using Matrix3x3 = TMatrix3x3; +using Matrix3x3f = TMatrix3x3; + +template +struct TMatrix4x4 { + DEVICE TMatrix4x4() { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = T(0); + } + } + } + + template + DEVICE TMatrix4x4(const T2 *arr) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = (T)arr[i * 4 + j]; + } + } + } + + template + DEVICE TMatrix4x4(const TMatrix4x4 &m) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = T(m.data[i][j]); + } + } + } + + template + DEVICE TMatrix4x4(T2 v00, T2 v01, T2 v02, T2 v03, + T2 v10, T2 v11, T2 v12, T2 v13, + T2 v20, T2 v21, T2 v22, T2 v23, + T2 v30, T2 v31, T2 v32, T2 v33) { + data[0][0] = (T)v00; + data[0][1] = (T)v01; + data[0][2] = (T)v02; + data[0][3] = (T)v03; + data[1][0] = (T)v10; + data[1][1] = (T)v11; + data[1][2] = (T)v12; + data[1][3] = (T)v13; + data[2][0] = (T)v20; + data[2][1] = (T)v21; + data[2][2] = (T)v22; + data[2][3] = (T)v23; + data[3][0] = (T)v30; + data[3][1] = (T)v31; + data[3][2] = (T)v32; + data[3][3] = (T)v33; + } + + DEVICE + const T& operator()(int i, int j) const { + return data[i][j]; + } + + DEVICE + T& operator()(int i, int j) { + return data[i][j]; + } + + DEVICE + static TMatrix4x4 identity() { + TMatrix4x4 m(1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1); + return m; + } + + T data[4][4]; +}; + +using Matrix4x4 = TMatrix4x4; +using Matrix4x4f = TMatrix4x4; + +template +DEVICE +inline auto operator+(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = m0(i, j) + m1(i, j); + } + } + return m; +} + +template +DEVICE +inline auto operator-(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = m0(i, j) - m1(i, j); + } + } + return m; +} + +template +DEVICE +inline auto operator*(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 ret; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + ret(i, j) = T(0); + for (int k = 0; k < 3; k++) { + ret(i, j) += m0(i, k) * m1(k, j); + } + } + } + return ret; +} + +template +DEVICE +inline auto operator*(const TVector3 &v, const TMatrix3x3 &m) -> TVector3 { + TVector3 ret; + for (int i = 0; i < 3; i++) { + ret[i] = T(0); + for (int j = 0; j < 3; j++) { + ret[i] += v[j] * m(j, i); + } + } + return ret; +} + +template +DEVICE +inline auto operator*(const TMatrix3x3 &m, const TVector3 &v) -> TVector3 { + TVector3 ret; + for (int i = 0; i < 3; i++) { + ret[i] = 0.f; + for (int j = 0; j < 3; j++) { + ret[i] += m(i, j) * v[j]; + } + } + return ret; +} + +template +DEVICE +inline auto inverse(const TMatrix3x3 &m) -> TMatrix3x3 { + // computes the inverse of a matrix m + auto det = m(0, 0) * (m(1, 1) * m(2, 2) - m(2, 1) * m(1, 2)) - + m(0, 1) * (m(1, 0) * m(2, 2) - m(1, 2) * m(2, 0)) + + m(0, 2) * (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0)); + + auto invdet = 1 / det; + + auto m_inv = TMatrix3x3{}; + m_inv(0, 0) = (m(1, 1) * m(2, 2) - m(2, 1) * m(1, 2)) * invdet; + m_inv(0, 1) = (m(0, 2) * m(2, 1) - m(0, 1) * m(2, 2)) * invdet; + m_inv(0, 2) = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1)) * invdet; + m_inv(1, 0) = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2)) * invdet; + m_inv(1, 1) = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0)) * invdet; + m_inv(1, 2) = (m(1, 0) * m(0, 2) - m(0, 0) * m(1, 2)) * invdet; + m_inv(2, 0) = (m(1, 0) * m(2, 1) - m(2, 0) * m(1, 1)) * invdet; + m_inv(2, 1) = (m(2, 0) * m(0, 1) - m(0, 0) * m(2, 1)) * invdet; + m_inv(2, 2) = (m(0, 0) * m(1, 1) - m(1, 0) * m(0, 1)) * invdet; + return m_inv; +} + +template +DEVICE +inline auto operator+(const TMatrix4x4 &m0, const TMatrix4x4 &m1) -> TMatrix4x4 { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = m0(i, j) + m1(i, j); + } + } + return m; +} + +template +DEVICE +TMatrix3x3 transpose(const TMatrix3x3 &m) { + return TMatrix3x3(m(0, 0), m(1, 0), m(2, 0), + m(0, 1), m(1, 1), m(2, 1), + m(0, 2), m(1, 2), m(2, 2)); +} + +template +DEVICE +TMatrix4x4 transpose(const TMatrix4x4 &m) { + return TMatrix4x4(m(0, 0), m(1, 0), m(2, 0), m(3, 0), + m(0, 1), m(1, 1), m(2, 1), m(3, 1), + m(0, 2), m(1, 2), m(2, 2), m(3, 2), + m(0, 3), m(1, 3), m(2, 3), m(3, 3)); +} + +template +DEVICE +inline TMatrix3x3 operator-(const TMatrix3x3 &m0) { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = -m0(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix4x4 operator-(const TMatrix4x4 &m0) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = -m0(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix4x4 operator-(const TMatrix4x4 &m0, const TMatrix4x4 &m1) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = m0(i, j) - m1(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix3x3& operator+=(TMatrix3x3 &m0, const TMatrix3x3 &m1) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m0(i, j) += m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4& operator+=(TMatrix4x4 &m0, const TMatrix4x4 &m1) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m0(i, j) += m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4& operator-=(TMatrix4x4 &m0, const TMatrix4x4 &m1) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m0(i, j) -= m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4 operator*(const TMatrix4x4 &m0, const TMatrix4x4 &m1) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 4; k++) { + m(i, j) += m0(i, k) * m1(k, j); + } + } + } + return m; +} + +template +DEVICE +TMatrix4x4 inverse(const TMatrix4x4 &m) { + // https://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + TMatrix4x4 inv; + + inv(0, 0) = m(1, 1) * m(2, 2) * m(3, 3) - + m(1, 1) * m(2, 3) * m(3, 2) - + m(2, 1) * m(1, 2) * m(3, 3) + + m(2, 1) * m(1, 3) * m(3, 2) + + m(3, 1) * m(1, 2) * m(2, 3) - + m(3, 1) * m(1, 3) * m(2, 2); + + inv(1, 0) = -m(1, 0) * m(2, 2) * m(3, 3) + + m(1, 0) * m(2, 3) * m(3, 2) + + m(2, 0) * m(1, 2) * m(3, 3) - + m(2, 0) * m(1, 3) * m(3, 2) - + m(3, 0) * m(1, 2) * m(2, 3) + + m(3, 0) * m(1, 3) * m(2, 2); + + inv(2, 0) = m(1, 0) * m(2, 1) * m(3, 3) - + m(1, 0) * m(2, 3) * m(3, 1) - + m(2, 0) * m(1, 1) * m(3, 3) + + m(2, 0) * m(1, 3) * m(3, 1) + + m(3, 0) * m(1, 1) * m(2, 3) - + m(3, 0) * m(1, 3) * m(2, 1); + + inv(3, 0) = -m(1, 0) * m(2, 1) * m(3, 2) + + m(1, 0) * m(2, 2) * m(3, 1) + + m(2, 0) * m(1, 1) * m(3, 2) - + m(2, 0) * m(1, 2) * m(3, 1) - + m(3, 0) * m(1, 1) * m(2, 2) + + m(3, 0) * m(1, 2) * m(2, 1); + + inv(0, 1) = -m(0, 1) * m(2, 2) * m(3, 3) + + m(0, 1) * m(2, 3) * m(3, 2) + + m(2, 1) * m(0, 2) * m(3, 3) - + m(2, 1) * m(0, 3) * m(3, 2) - + m(3, 1) * m(0, 2) * m(2, 3) + + m(3, 1) * m(0, 3) * m(2, 2); + + inv(1, 1) = m(0, 0) * m(2, 2) * m(3, 3) - + m(0, 0) * m(2, 3) * m(3, 2) - + m(2, 0) * m(0, 2) * m(3, 3) + + m(2, 0) * m(0, 3) * m(3, 2) + + m(3, 0) * m(0, 2) * m(2, 3) - + m(3, 0) * m(0, 3) * m(2, 2); + + inv(2, 1) = -m(0, 0) * m(2, 1) * m(3, 3) + + m(0, 0) * m(2, 3) * m(3, 1) + + m(2, 0) * m(0, 1) * m(3, 3) - + m(2, 0) * m(0, 3) * m(3, 1) - + m(3, 0) * m(0, 1) * m(2, 3) + + m(3, 0) * m(0, 3) * m(2, 1); + + inv(3, 1) = m(0, 0) * m(2, 1) * m(3, 2) - + m(0, 0) * m(2, 2) * m(3, 1) - + m(2, 0) * m(0, 1) * m(3, 2) + + m(2, 0) * m(0, 2) * m(3, 1) + + m(3, 0) * m(0, 1) * m(2, 2) - + m(3, 0) * m(0, 2) * m(2, 1); + + inv(0, 2) = m(0, 1) * m(1, 2) * m(3, 3) - + m(0, 1) * m(1, 3) * m(3, 2) - + m(1, 1) * m(0, 2) * m(3, 3) + + m(1, 1) * m(0, 3) * m(3, 2) + + m(3, 1) * m(0, 2) * m(1, 3) - + m(3, 1) * m(0, 3) * m(1, 2); + + inv(1, 2) = -m(0, 0) * m(1, 2) * m(3, 3) + + m(0, 0) * m(1, 3) * m(3, 2) + + m(1, 0) * m(0, 2) * m(3, 3) - + m(1, 0) * m(0, 3) * m(3, 2) - + m(3, 0) * m(0, 2) * m(1, 3) + + m(3, 0) * m(0, 3) * m(1, 2); + + inv(2, 2) = m(0, 0) * m(1, 1) * m(3, 3) - + m(0, 0) * m(1, 3) * m(3, 1) - + m(1, 0) * m(0, 1) * m(3, 3) + + m(1, 0) * m(0, 3) * m(3, 1) + + m(3, 0) * m(0, 1) * m(1, 3) - + m(3, 0) * m(0, 3) * m(1, 1); + + inv(3, 2) = -m(0, 0) * m(1, 1) * m(3, 2) + + m(0, 0) * m(1, 2) * m(3, 1) + + m(1, 0) * m(0, 1) * m(3, 2) - + m(1, 0) * m(0, 2) * m(3, 1) - + m(3, 0) * m(0, 1) * m(1, 2) + + m(3, 0) * m(0, 2) * m(1, 1); + + inv(0, 3) = -m(0, 1) * m(1, 2) * m(2, 3) + + m(0, 1) * m(1, 3) * m(2, 2) + + m(1, 1) * m(0, 2) * m(2, 3) - + m(1, 1) * m(0, 3) * m(2, 2) - + m(2, 1) * m(0, 2) * m(1, 3) + + m(2, 1) * m(0, 3) * m(1, 2); + + inv(1, 3) = m(0, 0) * m(1, 2) * m(2, 3) - + m(0, 0) * m(1, 3) * m(2, 2) - + m(1, 0) * m(0, 2) * m(2, 3) + + m(1, 0) * m(0, 3) * m(2, 2) + + m(2, 0) * m(0, 2) * m(1, 3) - + m(2, 0) * m(0, 3) * m(1, 2); + + inv(2, 3) = -m(0, 0) * m(1, 1) * m(2, 3) + + m(0, 0) * m(1, 3) * m(2, 1) + + m(1, 0) * m(0, 1) * m(2, 3) - + m(1, 0) * m(0, 3) * m(2, 1) - + m(2, 0) * m(0, 1) * m(1, 3) + + m(2, 0) * m(0, 3) * m(1, 1); + + inv(3, 3) = m(0, 0) * m(1, 1) * m(2, 2) - + m(0, 0) * m(1, 2) * m(2, 1) - + m(1, 0) * m(0, 1) * m(2, 2) + + m(1, 0) * m(0, 2) * m(2, 1) + + m(2, 0) * m(0, 1) * m(1, 2) - + m(2, 0) * m(0, 2) * m(1, 1); + + auto det = m(0, 0) * inv(0, 0) + + m(0, 1) * inv(1, 0) + + m(0, 2) * inv(2, 0) + + m(0, 3) * inv(3, 0); + + if (det == 0) { + return TMatrix4x4{}; + } + + auto inv_det = 1.0 / det; + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + inv(i, j) *= inv_det; + } + } + + return inv; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TMatrix3x3 &m) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + os << m(i, j) << " "; + } + os << std::endl; + } + return os; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TMatrix4x4 &m) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + os << m(i, j) << " "; + } + os << std::endl; + } + return os; +} + +template +DEVICE +TVector2 xform_pt(const TMatrix3x3 &m, const TVector2 &pt) { + TVector3 t{m(0, 0) * pt[0] + m(0, 1) * pt[1] + m(0, 2), + m(1, 0) * pt[0] + m(1, 1) * pt[1] + m(1, 2), + m(2, 0) * pt[0] + m(2, 1) * pt[1] + m(2, 2)}; + return TVector2{t[0] / t[2], t[1] / t[2]}; +} + +template +DEVICE +void d_xform_pt(const TMatrix3x3 &m, const TVector2 &pt, + const TVector2 &d_out, + TMatrix3x3 &d_m, + TVector2 &d_pt) { + TVector3 t{m(0, 0) * pt[0] + m(0, 1) * pt[1] + m(0, 2), + m(1, 0) * pt[0] + m(1, 1) * pt[1] + m(1, 2), + m(2, 0) * pt[0] + m(2, 1) * pt[1] + m(2, 2)}; + auto out = TVector2{t[0] / t[2], t[1] / t[2]}; + TVector3 d_t{d_out[0] / t[2], + d_out[1] / t[2], + -(d_out[0] * out[0] + d_out[1] * out[1]) / t[2]}; + d_m(0, 0) += d_t[0] * pt[0]; + d_m(0, 1) += d_t[0] * pt[1]; + d_m(0, 2) += d_t[0]; + d_m(1, 0) += d_t[1] * pt[0]; + d_m(1, 1) += d_t[1] * pt[1]; + d_m(1, 2) += d_t[1]; + d_m(2, 0) += d_t[2] * pt[0]; + d_m(2, 1) += d_t[2] * pt[1]; + d_m(2, 2) += d_t[2]; + d_pt[0] += d_t[0] * m(0, 0) + d_t[1] * m(1, 0) + d_t[2] * m(2, 0); + d_pt[1] += d_t[0] * m(0, 1) + d_t[1] * m(1, 1) + d_t[2] * m(2, 1); +} + +template +DEVICE +TVector2 xform_normal(const TMatrix3x3 &m_inv, const TVector2 &n) { + return normalize(TVector2{m_inv(0, 0) * n[0] + m_inv(1, 0) * n[1], + m_inv(0, 1) * n[0] + m_inv(1, 1) * n[1]}); +} diff --git a/diffvg/parallel.cpp b/diffvg/parallel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..365fc5bb305f9cacc780fb5276905e37d3b37e34 --- /dev/null +++ b/diffvg/parallel.cpp @@ -0,0 +1,273 @@ +#include "parallel.h" +#include +#include +#include +#include +#include + +// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.cpp + +static std::vector threads; +static bool shutdownThreads = false; +struct ParallelForLoop; +static ParallelForLoop *workList = nullptr; +static std::mutex workListMutex; + +struct ParallelForLoop { + ParallelForLoop(std::function func1D, int64_t maxIndex, int chunkSize) + : func1D(std::move(func1D)), maxIndex(maxIndex), chunkSize(chunkSize) { + } + ParallelForLoop(const std::function &f, const Vector2i count) + : func2D(f), maxIndex(count[0] * count[1]), chunkSize(1) { + nX = count[0]; + } + + std::function func1D; + std::function func2D; + const int64_t maxIndex; + const int chunkSize; + int64_t nextIndex = 0; + int activeWorkers = 0; + ParallelForLoop *next = nullptr; + int nX = -1; + + bool Finished() const { + return nextIndex >= maxIndex && activeWorkers == 0; + } +}; + +void Barrier::Wait() { + std::unique_lock lock(mutex); + assert(count > 0); + if (--count == 0) { + // This is the last thread to reach the barrier; wake up all of the + // other ones before exiting. + cv.notify_all(); + } else { + // Otherwise there are still threads that haven't reached it. Give + // up the lock and wait to be notified. + cv.wait(lock, [this] { return count == 0; }); + } +} + +static std::condition_variable workListCondition; + +static void worker_thread_func(const int tIndex, std::shared_ptr barrier) { + ThreadIndex = tIndex; + + // The main thread sets up a barrier so that it can be sure that all + // workers have called ProfilerWorkerThreadInit() before it continues + // (and actually starts the profiling system). + barrier->Wait(); + + // Release our reference to the Barrier so that it's freed once all of + // the threads have cleared it. + barrier.reset(); + + std::unique_lock lock(workListMutex); + while (!shutdownThreads) { + if (!workList) { + // Sleep until there are more tasks to run + workListCondition.wait(lock); + } else { + // Get work from _workList_ and run loop iterations + ParallelForLoop &loop = *workList; + + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) + workList = loop.next; + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + if (loop.Finished()) { + workListCondition.notify_all(); + } + } + } +} + +void parallel_for_host(const std::function &func, + int64_t count, + int chunkSize) { + // Run iterations immediately if not using threads or if _count_ is small + if (threads.empty() || count < chunkSize) { + for (int64_t i = 0; i < count; ++i) { + func(i); + } + return; + } + + // Create and enqueue _ParallelForLoop_ for this loop + ParallelForLoop loop(func, count, chunkSize); + workListMutex.lock(); + loop.next = workList; + workList = &loop; + workListMutex.unlock(); + + // Notify worker threads of work to be done + std::unique_lock lock(workListMutex); + workListCondition.notify_all(); + + // Help out with parallel loop iterations in the current thread + while (!loop.Finished()) { + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) { + workList = loop.next; + } + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + } +} + +thread_local int ThreadIndex; + +void parallel_for_host( + std::function func, const Vector2i count) { + // Launch worker threads if needed + if (threads.empty() || count.x * count.y <= 1) { + for (int y = 0; y < count.y; ++y) { + for (int x = 0; x < count.x; ++x) { + func(Vector2i{x, y}); + } + } + return; + } + + ParallelForLoop loop(std::move(func), count); + { + std::lock_guard lock(workListMutex); + loop.next = workList; + workList = &loop; + } + + std::unique_lock lock(workListMutex); + workListCondition.notify_all(); + + // Help out with parallel loop iterations in the current thread + while (!loop.Finished()) { + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) { + workList = loop.next; + } + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + } +} + +int num_system_cores() { + // return 1; + int ret = std::thread::hardware_concurrency(); + if (ret == 0) { + return 16; + } + return ret; +} + +void parallel_init() { + assert(threads.size() == 0); + int nThreads = num_system_cores(); + ThreadIndex = 0; + + // Create a barrier so that we can be sure all worker threads get past + // their call to ProfilerWorkerThreadInit() before we return from this + // function. In turn, we can be sure that the profiling system isn't + // started until after all worker threads have done that. + std::shared_ptr barrier = std::make_shared(nThreads); + + // Launch one fewer worker thread than the total number we want doing + // work, since the main thread helps out, too. + for (int i = 0; i < nThreads - 1; ++i) { + threads.push_back(std::thread(worker_thread_func, i + 1, barrier)); + } + + barrier->Wait(); +} + +void parallel_cleanup() { + if (threads.empty()) { + return; + } + + { + std::lock_guard lock(workListMutex); + shutdownThreads = true; + workListCondition.notify_all(); + } + + for (std::thread &thread : threads) { + thread.join(); + } + threads.erase(threads.begin(), threads.end()); + shutdownThreads = false; +} diff --git a/diffvg/parallel.h b/diffvg/parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..b7f9c712e471616d01921157c290a50adac768d9 --- /dev/null +++ b/diffvg/parallel.h @@ -0,0 +1,91 @@ +#pragma once + +#include "vector.h" + +#include +#include +#include +#include +#include +#include +#include +// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.h + +class Barrier { + public: + Barrier(int count) : count(count) { assert(count > 0); } + ~Barrier() { assert(count == 0); } + void Wait(); + + private: + std::mutex mutex; + std::condition_variable cv; + int count; +}; + +void parallel_for_host(const std::function &func, + int64_t count, + int chunkSize = 1); +extern thread_local int ThreadIndex; +void parallel_for_host( + std::function func, const Vector2i count); +int num_system_cores(); + +void parallel_init(); +void parallel_cleanup(); + +#ifdef __CUDACC__ +template +__global__ void parallel_for_device_kernel(T functor, int count) { + auto idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= count) { + return; + } + functor(idx); +} +template +inline void parallel_for_device(T functor, + int count, + int work_per_thread = 256) { + if (count <= 0) { + return; + } + auto block_size = work_per_thread; + auto block_count = idiv_ceil(count, block_size); + parallel_for_device_kernel<<>>(functor, count); +} +#endif + +template +inline void parallel_for(T functor, + int count, + bool use_gpu, + int work_per_thread = -1) { + if (work_per_thread == -1) { + work_per_thread = use_gpu ? 64 : 256; + } + if (count <= 0) { + return; + } + if (use_gpu) { +#ifdef __CUDACC__ + auto block_size = work_per_thread; + auto block_count = idiv_ceil(count, block_size); + parallel_for_device_kernel<<>>(functor, count); +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } else { + auto num_threads = idiv_ceil(count, work_per_thread); + parallel_for_host([&](int thread_index) { + auto id_offset = work_per_thread * thread_index; + auto work_end = std::min(id_offset + work_per_thread, count); + for (int work_id = id_offset; work_id < work_end; work_id++) { + auto idx = work_id; + assert(idx < count); + functor(idx); + } + }, num_threads); + } +} diff --git a/diffvg/pcg.h b/diffvg/pcg.h new file mode 100644 index 0000000000000000000000000000000000000000..55859a1e63d15d1d5d0b110c28561a064c5a446c --- /dev/null +++ b/diffvg/pcg.h @@ -0,0 +1,40 @@ +#pragma once + +#include "diffvg.h" + +// http://www.pcg-random.org/download.html +struct pcg32_state { + uint64_t state; + uint64_t inc; +}; + +DEVICE inline uint32_t next_pcg32(pcg32_state *rng) { + uint64_t oldstate = rng->state; + // Advance internal state + rng->state = oldstate * 6364136223846793005ULL + (rng->inc|1); + // Calculate output function (XSH RR), uses old state for max ILP + uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u; + uint32_t rot = oldstate >> 59u; + return (xorshifted >> rot) | (xorshifted << ((-rot) & 31)); +} + +// https://github.com/wjakob/pcg32/blob/master/pcg32.h +DEVICE inline float next_pcg32_float(pcg32_state *rng) { + union { + uint32_t u; + float f; + } x; + x.u = (next_pcg32(rng) >> 9) | 0x3f800000u; + return x.f - 1.0f; +} + +// Initialize each pixel with a PCG rng with a different stream +DEVICE inline pcg32_state init_pcg32(int idx, uint64_t seed) { + pcg32_state state; + state.state = 0U; + state.inc = (((uint64_t)idx + 1) << 1u) | 1u; + next_pcg32(&state); + state.state += (0x853c49e6748fea9bULL + seed); + next_pcg32(&state); + return state; +} diff --git a/diffvg/poetry.lock b/diffvg/poetry.lock new file mode 100644 index 0000000000000000000000000000000000000000..1debce072482cf8a611f6bcc1ab6d626428f8650 --- /dev/null +++ b/diffvg/poetry.lock @@ -0,0 +1,733 @@ +[[package]] +name = "certifi" +version = "2020.12.5" +description = "Python package for providing Mozilla's CA Bundle." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "cffi" +version = "1.14.5" +description = "Foreign Function Interface for Python calling C code." +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "chardet" +version = "4.0.0" +description = "Universal encoding detector for Python 2 and 3" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "cmake" +version = "3.20.5" +description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "coloredlogs" +version = "15.0" +description = "Colored terminal output for Python's logging module" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "cssutils" +version = "2.2.0" +description = "A CSS Cascading Style Sheets library for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "mock", "pytest-black (>=0.3.7)", "pytest-mypy"] + +[[package]] +name = "cycler" +version = "0.10.0" +description = "Composable style cycles" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +six = "*" + +[[package]] +name = "decorator" +version = "4.4.2" +description = "Decorators for Humans" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" + +[[package]] +name = "greenlet" +version = "1.0.0" +description = "Lightweight in-process concurrent programming" +category = "dev" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" + +[package.extras] +docs = ["sphinx"] + +[[package]] +name = "humanfriendly" +version = "9.1" +description = "Human friendly output for text interfaces using Python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +pyreadline = {version = "*", markers = "sys_platform == \"win32\""} + +[[package]] +name = "idna" +version = "2.10" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "imageio" +version = "2.9.0" +description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +numpy = "*" +pillow = "*" + +[package.extras] +ffmpeg = ["imageio-ffmpeg"] +fits = ["astropy"] +full = ["astropy", "gdal", "imageio-ffmpeg", "itk"] +gdal = ["gdal"] +itk = ["itk"] + +[[package]] +name = "imageio-ffmpeg" +version = "0.4.3" +description = "FFMPEG wrapper for Python" +category = "dev" +optional = false +python-versions = ">=3.4" + +[[package]] +name = "jinja2" +version = "2.11.3" +description = "A very fast and expressive template engine." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +MarkupSafe = ">=0.23" + +[package.extras] +i18n = ["Babel (>=0.8)"] + +[[package]] +name = "jsonpatch" +version = "1.32" +description = "Apply JSON-Patches (RFC 6902)" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "2.1" +description = "Identify specific nodes in a JSON document (RFC 6901)" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "kiwisolver" +version = "1.3.1" +description = "A fast implementation of the Cassowary constraint solver" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "llvmlite" +version = "0.38.1" +description = "lightweight wrapper around basic LLVM functionality" +category = "dev" +optional = false +python-versions = ">=3.7,<3.11" + +[[package]] +name = "markupsafe" +version = "1.1.1" +description = "Safely add untrusted strings to HTML/XML markup." +category = "dev" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" + +[[package]] +name = "matplotlib" +version = "3.4.1" +description = "Python plotting package" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +cycler = ">=0.10" +kiwisolver = ">=1.0.1" +numpy = ">=1.16" +pillow = ">=6.2.0" +pyparsing = ">=2.2.1" +python-dateutil = ">=2.7" + +[[package]] +name = "networkx" +version = "2.5.1" +description = "Python package for creating and manipulating graphs and networks" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +decorator = ">=4.3,<5" + +[package.extras] +all = ["numpy", "scipy", "pandas", "matplotlib", "pygraphviz", "pydot", "pyyaml", "lxml", "pytest"] +gdal = ["gdal"] +lxml = ["lxml"] +matplotlib = ["matplotlib"] +numpy = ["numpy"] +pandas = ["pandas"] +pydot = ["pydot"] +pygraphviz = ["pygraphviz"] +pytest = ["pytest"] +pyyaml = ["pyyaml"] +scipy = ["scipy"] + +[[package]] +name = "numba" +version = "0.55.2" +description = "compiling Python code using LLVM" +category = "dev" +optional = false +python-versions = ">=3.7,<3.11" + +[package.dependencies] +llvmlite = ">=0.38.0rc1,<0.39" +numpy = ">=1.18,<1.23" + +[[package]] +name = "numpy" +version = "1.22.4" +description = "NumPy is the fundamental package for array computing with Python." +category = "dev" +optional = false +python-versions = ">=3.8" + +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + +[[package]] +name = "pandas" +version = "1.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +category = "dev" +optional = false +python-versions = ">=3.7.1" + +[package.dependencies] +numpy = ">=1.16.5" +python-dateutil = ">=2.7.3" +pytz = ">=2017.3" + +[package.extras] +test = ["pytest (>=5.0.1)", "pytest-xdist", "hypothesis (>=3.58)"] + +[[package]] +name = "pillow" +version = "8.2.0" +description = "Python Imaging Library (Fork)" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "py" +version = "1.10.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyaml" +version = "20.4.0" +description = "PyYAML-based module to produce pretty and readable YAML-serialized data" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +PyYAML = "*" + +[[package]] +name = "pycparser" +version = "2.20" +description = "C parser in Python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pygame" +version = "2.0.1" +description = "Python Game Development" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pyparsing" +version = "2.4.7" +description = "Python parsing module" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "pyreadline" +version = "2.1" +description = "A python implmementation of GNU readline." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "python-dateutil" +version = "2.8.1" +description = "Extensions to the standard Python datetime module" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2021.1" +description = "World timezone definitions, modern and historical" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "pywavelets" +version = "1.1.1" +description = "PyWavelets, wavelet transform module" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +numpy = ">=1.13.3" + +[[package]] +name = "pyyaml" +version = "5.4.1" +description = "YAML parser and emitter for Python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + +[[package]] +name = "pyzmq" +version = "22.0.3" +description = "Python bindings for 0MQ" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} +py = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "requests" +version = "2.25.1" +description = "Python HTTP for Humans." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +certifi = ">=2017.4.17" +chardet = ">=3.0.2,<5" +idna = ">=2.5,<3" +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] +socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] + +[[package]] +name = "scikit-image" +version = "0.19.3" +description = "Image processing in Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +imageio = ">=2.4.1" +networkx = ">=2.2" +numpy = ">=1.17.0" +packaging = ">=20.0" +pillow = ">=6.1.0,<7.1.0 || >7.1.0,<7.1.1 || >7.1.1,<8.3.0 || >8.3.0" +PyWavelets = ">=1.1.1" +scipy = ">=1.4.1" +tifffile = ">=2019.7.26" + +[package.extras] +data = ["pooch (>=1.3.0)"] +docs = ["sphinx (>=1.8)", "sphinx-gallery (>=0.10.1)", "numpydoc (>=1.0)", "sphinx-copybutton", "pytest-runner", "scikit-learn", "matplotlib (>=3.3)", "dask[array] (>=0.15.0,!=2.17.0)", "cloudpickle (>=0.2.1)", "pandas (>=0.23.0)", "seaborn (>=0.7.1)", "pooch (>=1.3.0)", "tifffile (>=2020.5.30)", "myst-parser", "ipywidgets", "plotly (>=4.14.0)", "kaleido"] +optional = ["simpleitk", "astropy (>=3.1.2)", "cloudpickle (>=0.2.1)", "dask[array] (>=1.0.0,!=2.17.0)", "matplotlib (>=3.0.3)", "pooch (>=1.3.0)", "pyamg", "qtpy"] +test = ["asv", "codecov", "flake8", "matplotlib (>=3.0.3)", "pooch (>=1.3.0)", "pytest (>=5.2.0)", "pytest-cov (>=2.7.0)", "pytest-localserver", "pytest-faulthandler"] + +[[package]] +name = "scipy" +version = "1.7.3" +description = "SciPy: Scientific Library for Python" +category = "dev" +optional = false +python-versions = ">=3.7,<3.11" + +[package.dependencies] +numpy = ">=1.16.5,<1.23.0" + +[[package]] +name = "seaborn" +version = "0.11.1" +description = "seaborn: statistical data visualization" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +matplotlib = ">=2.2" +numpy = ">=1.15" +pandas = ">=0.23" +scipy = ">=1.0" + +[[package]] +name = "six" +version = "1.15.0" +description = "Python 2 and 3 compatibility utilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "sqlalchemy" +version = "1.4.6" +description = "Database Abstraction Library" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\""} + +[package.extras] +aiomysql = ["greenlet (!=0.4.17)", "aiomysql"] +aiosqlite = ["greenlet (!=0.4.17)", "aiosqlite"] +asyncio = ["greenlet (!=0.4.17)"] +mariadb_connector = ["mariadb (>=1.0.1)"] +mssql = ["pyodbc"] +mssql_pymssql = ["pymssql"] +mssql_pyodbc = ["pyodbc"] +mypy = ["sqlalchemy2-stubs", "mypy (>=0.800)"] +mysql = ["mysqlclient (>=1.4.0,<2)", "mysqlclient (>=1.4.0)"] +mysql_connector = ["mysqlconnector"] +oracle = ["cx_oracle (>=7,<8)", "cx_oracle (>=7)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql_asyncpg = ["greenlet (!=0.4.17)", "asyncpg"] +postgresql_pg8000 = ["pg8000 (>=1.16.6)"] +postgresql_psycopg2binary = ["psycopg2-binary"] +postgresql_psycopg2cffi = ["psycopg2cffi"] +pymysql = ["pymysql (<1)", "pymysql"] +sqlcipher = ["sqlcipher3-binary"] + +[[package]] +name = "svgpathtools" +version = "1.4.1" +description = "A collection of tools for manipulating and analyzing SVG Path objects and Bezier curves." +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = "*" +svgwrite = "*" + +[[package]] +name = "svgwrite" +version = "1.4.1" +description = "A Python library to create SVG drawings." +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "tifffile" +version = "2021.3.31" +description = "Read and write TIFF files" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +numpy = ">=1.15.1" + +[package.extras] +all = ["imagecodecs (>=2021.3.31)", "matplotlib (>=3.2)", "lxml"] + +[[package]] +name = "torch" +version = "1.10.2" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +category = "dev" +optional = false +python-versions = ">=3.6.2" + +[package.dependencies] +typing-extensions = "*" + +[[package]] +name = "torch-tools" +version = "0.1.5" +description = "A library of helpers to train, evaluate and visualize deep nets with PyTorch." +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +coloredlogs = "*" +imageio = "*" +imageio-ffmpeg = "*" +jinja2 = "*" +numpy = "*" +pyaml = "*" +seaborn = "*" +sqlalchemy = "*" +torch = "*" +torchvision = "*" +tqdm = "*" +visdom = "*" + +[package.extras] +dev = ["sphinx", "pytest"] +docs = ["sphinx"] +tests = ["pytest"] + +[[package]] +name = "torchfile" +version = "0.1.0" +description = "Torch7 binary serialized file parser" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "torchvision" +version = "0.11.3" +description = "image and video datasets and models for torch deep learning" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = "*" +pillow = ">=5.3.0,<8.3.0 || >8.3.0" +torch = "1.10.2" + +[package.extras] +scipy = ["scipy"] + +[[package]] +name = "tornado" +version = "6.1" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +category = "dev" +optional = false +python-versions = ">= 3.5" + +[[package]] +name = "tqdm" +version = "4.60.0" +description = "Fast, Extensible Progress Meter" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +notebook = ["ipywidgets (>=6)"] +telegram = ["requests"] + +[[package]] +name = "typing-extensions" +version = "3.7.4.3" +description = "Backported and Experimental Type Hints for Python 3.5+" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "urllib3" +version = "1.26.4" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" + +[package.extras] +secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotlipy (>=0.6.0)"] + +[[package]] +name = "visdom" +version = "0.1.8.9" +description = "A tool for visualizing live, rich data for Torch and Numpy" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +jsonpatch = "*" +numpy = ">=1.8" +pillow = "*" +pyzmq = "*" +requests = "*" +scipy = "*" +six = "*" +torchfile = "*" +tornado = "*" +websocket-client = "*" + +[[package]] +name = "websocket-client" +version = "0.58.0" +description = "WebSocket client for Python with low level API options" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +six = "*" + +[metadata] +lock-version = "1.1" +python-versions = ">=3.8" +content-hash = "bdb9e3e47aadb0192d745b2e3502513fe77939c15984403a82f10c2e1282ca51" + +[metadata.files] +certifi = [] +cffi = [] +chardet = [] +cmake = [] +coloredlogs = [] +cssutils = [] +cycler = [] +decorator = [] +greenlet = [] +humanfriendly = [] +idna = [] +imageio = [] +imageio-ffmpeg = [] +jinja2 = [] +jsonpatch = [] +jsonpointer = [] +kiwisolver = [] +llvmlite = [] +markupsafe = [] +matplotlib = [] +networkx = [] +numba = [] +numpy = [] +packaging = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] +pandas = [] +pillow = [] +py = [] +pyaml = [] +pycparser = [] +pygame = [] +pyparsing = [] +pyreadline = [] +python-dateutil = [] +pytz = [] +pywavelets = [] +pyyaml = [] +pyzmq = [] +requests = [] +scikit-image = [] +scipy = [] +seaborn = [] +six = [] +sqlalchemy = [] +svgpathtools = [] +svgwrite = [ + {file = "svgwrite-1.4.1-py3-none-any.whl", hash = "sha256:4b21652a1d9c543a6bf4f9f2a54146b214519b7540ca60cb99968ad09ef631d0"}, + {file = "svgwrite-1.4.1.zip", hash = "sha256:e220a4bf189e7e214a55e8a11421d152b5b6fb1dd660c86a8b6b61fe8cc2ac48"}, +] +tifffile = [] +torch = [] +torch-tools = [] +torchfile = [] +torchvision = [] +tornado = [] +tqdm = [] +typing-extensions = [] +urllib3 = [] +visdom = [] +websocket-client = [] diff --git a/diffvg/ptr.h b/diffvg/ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..f3f8e43e148d6b0b2abec6a1d4b830a81982f50b --- /dev/null +++ b/diffvg/ptr.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +/** + * Python doesn't have a pointer type, therefore we create a pointer wrapper + * see https://stackoverflow.com/questions/48982143/returning-and-passing-around-raw-pod-pointers-arrays-with-python-c-and-pyb?rq=1 + */ +template +class ptr { +public: + ptr() : p(nullptr) {} + ptr(T* p) : p(p) {} + ptr(std::size_t p) : p((T*)p) {} + ptr(const ptr& other) : ptr(other.p) {} + T* operator->() const { return p; } + T* get() const { return p; } + void destroy() { delete p; } + bool is_null() const { return p == nullptr; } + size_t as_size_t() const {return (size_t)p;} +private: + T* p; +}; diff --git a/diffvg/pybind11/.appveyor.yml b/diffvg/pybind11/.appveyor.yml new file mode 100644 index 0000000000000000000000000000000000000000..149a8a3dc9d0076811810036fdd781722fc83203 --- /dev/null +++ b/diffvg/pybind11/.appveyor.yml @@ -0,0 +1,37 @@ +version: 1.0.{build} +image: +- Visual Studio 2015 +test: off +skip_branch_with_pr: true +build: + parallel: true +platform: +- x86 +environment: + matrix: + - PYTHON: 36 + CONFIG: Debug + - PYTHON: 27 + CONFIG: Debug +install: +- ps: | + $env:CMAKE_GENERATOR = "Visual Studio 14 2015" + if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" } + $env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH" + python -W ignore -m pip install --upgrade pip wheel + python -W ignore -m pip install pytest numpy --no-warn-script-location +- ps: | + Start-FileDownload 'https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.zip' + 7z x eigen-3.3.7.zip -y > $null + $env:CMAKE_INCLUDE_PATH = "eigen-3.3.7;$env:CMAKE_INCLUDE_PATH" +build_script: +- cmake -G "%CMAKE_GENERATOR%" -A "%CMAKE_ARCH%" + -DCMAKE_CXX_STANDARD=14 + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DCMAKE_SUPPRESS_REGENERATION=1 + . +- set MSBuildLogger="C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" +- cmake --build . --config %CONFIG% --target pytest -- /m /v:m /logger:%MSBuildLogger% +- cmake --build . --config %CONFIG% --target cpptest -- /m /v:m /logger:%MSBuildLogger% +on_failure: if exist "tests\test_cmake_build" type tests\test_cmake_build\*.log* diff --git a/diffvg/pybind11/.cmake-format.yaml b/diffvg/pybind11/.cmake-format.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2a69f3f897da990a4255b593dc3fcdf4b89a764 --- /dev/null +++ b/diffvg/pybind11/.cmake-format.yaml @@ -0,0 +1,73 @@ +parse: + additional_commands: + pybind11_add_module: + flags: + - THIN_LTO + - MODULE + - SHARED + - NO_EXTRAS + - EXCLUDE_FROM_ALL + - SYSTEM + +format: + line_width: 99 + tab_size: 2 + + # If an argument group contains more than this many sub-groups + # (parg or kwarg groups) then force it to a vertical layout. + max_subgroups_hwrap: 2 + + # If a positional argument group contains more than this many + # arguments, then force it to a vertical layout. + max_pargs_hwrap: 6 + + # If a cmdline positional group consumes more than this many + # lines without nesting, then invalidate the layout (and nest) + max_rows_cmdline: 2 + separate_ctrl_name_with_space: false + separate_fn_name_with_space: false + dangle_parens: false + + # If the trailing parenthesis must be 'dangled' on its on + # 'line, then align it to this reference: `prefix`: the start' + # 'of the statement, `prefix-indent`: the start of the' + # 'statement, plus one indentation level, `child`: align to' + # the column of the arguments + dangle_align: prefix + # If the statement spelling length (including space and + # parenthesis) is smaller than this amount, then force reject + # nested layouts. + min_prefix_chars: 4 + + # If the statement spelling length (including space and + # parenthesis) is larger than the tab width by more than this + # amount, then force reject un-nested layouts. + max_prefix_chars: 10 + + # If a candidate layout is wrapped horizontally but it exceeds + # this many lines, then reject the layout. + max_lines_hwrap: 2 + + line_ending: unix + + # Format command names consistently as 'lower' or 'upper' case + command_case: canonical + + # Format keywords consistently as 'lower' or 'upper' case + # unchanged is valid too + keyword_case: 'upper' + + # A list of command names which should always be wrapped + always_wrap: [] + + # If true, the argument lists which are known to be sortable + # will be sorted lexicographically + enable_sort: true + + # If true, the parsers may infer whether or not an argument + # list is sortable (without annotation). + autosort: false + +# Causes a few issues - can be solved later, possibly. +markup: + enable_markup: false diff --git a/diffvg/pybind11/.github/CONTRIBUTING.md b/diffvg/pybind11/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..f61011d54059501e60f41d8da343aab10f259f6a --- /dev/null +++ b/diffvg/pybind11/.github/CONTRIBUTING.md @@ -0,0 +1,171 @@ +Thank you for your interest in this project! Please refer to the following +sections on how to contribute code and bug reports. + +### Reporting bugs + +Before submitting a question or bug report, please take a moment of your time +and ensure that your issue isn't already discussed in the project documentation +provided at [pybind11.readthedocs.org][] or in the [issue tracker][]. You can +also check [gitter][] to see if it came up before. + +Assuming that you have identified a previously unknown problem or an important +question, it's essential that you submit a self-contained and minimal piece of +code that reproduces the problem. In other words: no external dependencies, +isolate the function(s) that cause breakage, submit matched and complete C++ +and Python snippets that can be easily compiled and run in isolation; or +ideally make a small PR with a failing test case that can be used as a starting +point. + +## Pull requests + +Contributions are submitted, reviewed, and accepted using GitHub pull requests. +Please refer to [this article][using pull requests] for details and adhere to +the following rules to make the process as smooth as possible: + +* Make a new branch for every feature you're working on. +* Make small and clean pull requests that are easy to review but make sure they + do add value by themselves. +* Add tests for any new functionality and run the test suite (`cmake --build + build --target pytest`) to ensure that no existing features break. +* Please run [`pre-commit`][pre-commit] to check your code matches the + project style. (Note that `gawk` is required.) Use `pre-commit run + --all-files` before committing (or use installed-mode, check pre-commit docs) + to verify your code passes before pushing to save time. +* This project has a strong focus on providing general solutions using a + minimal amount of code, thus small pull requests are greatly preferred. + +### Licensing of contributions + +pybind11 is provided under a BSD-style license that can be found in the +``LICENSE`` file. By using, distributing, or contributing to this project, you +agree to the terms and conditions of this license. + +You are under no obligation whatsoever to provide any bug fixes, patches, or +upgrades to the features, functionality or performance of the source code +("Enhancements") to anyone; however, if you choose to make your Enhancements +available either publicly, or directly to the author of this software, without +imposing a separate written license agreement for such Enhancements, then you +hereby grant the following license: a non-exclusive, royalty-free perpetual +license to install, use, modify, prepare derivative works, incorporate into +other computer software, distribute, and sublicense such enhancements or +derivative works thereof, in binary and source code form. + + +## Development of pybind11 + +To setup an ideal development environment, run the following commands on a +system with CMake 3.14+: + +```bash +python3 -m venv venv +source venv/bin/activate +pip install -r tests/requirements.txt +cmake -S . -B build -DDOWNLOAD_CATCH=ON -DDOWNLOAD_EIGEN=ON +cmake --build build -j4 +``` + +Tips: + +* You can use `virtualenv` (from PyPI) instead of `venv` (which is Python 3 + only). +* You can select any name for your environment folder; if it contains "env" it + will be ignored by git. +* If you don’t have CMake 3.14+, just add β€œcmake” to the pip install command. +* You can use `-DPYBIND11_FINDPYTHON=ON` to use FindPython on CMake 3.12+ +* In classic mode, you may need to set `-DPYTHON_EXECUTABLE=/path/to/python`. + FindPython uses `-DPython_ROOT_DIR=/path/to` or + `-DPython_EXECUTABLE=/path/to/python`. + +### Configuration options + +In CMake, configuration options are given with β€œ-D”. Options are stored in the +build directory, in the `CMakeCache.txt` file, so they are remembered for each +build directory. Two selections are special - the generator, given with `-G`, +and the compiler, which is selected based on environment variables `CXX` and +similar, or `-DCMAKE_CXX_COMPILER=`. Unlike the others, these cannot be changed +after the initial run. + +The valid options are: + +* `-DCMAKE_BUILD_TYPE`: Release, Debug, MinSizeRel, RelWithDebInfo +* `-DPYBIND11_FINDPYTHON=ON`: Use CMake 3.12+’s FindPython instead of the + classic, deprecated, custom FindPythonLibs +* `-DPYBIND11_NOPYTHON=ON`: Disable all Python searching (disables tests) +* `-DBUILD_TESTING=ON`: Enable the tests +* `-DDOWNLOAD_CATCH=ON`: Download catch to build the C++ tests +* `-DOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests +* `-DPYBIND11_INSTALL=ON/OFF`: Enable the install target (on by default for the + master project) +* `-DUSE_PYTHON_INSTALL_DIR=ON`: Try to install into the python dir + + +
A few standard CMake tricks: (click to expand)

+ +* Use `cmake --build build -v` to see the commands used to build the files. +* Use `cmake build -LH` to list the CMake options with help. +* Use `ccmake` if available to see a curses (terminal) gui, or `cmake-gui` for + a completely graphical interface (not present in the PyPI package). +* Use `cmake --build build -j12` to build with 12 cores (for example). +* Use `-G` and the name of a generator to use something different. `cmake + --help` lists the generators available. + - On Unix, setting `CMAKE_GENERATER=Ninja` in your environment will give + you automatic mulithreading on all your CMake projects! +* Open the `CMakeLists.txt` with QtCreator to generate for that IDE. +* You can use `-DCMAKE_EXPORT_COMPILE_COMMANDS=ON` to generate the `.json` file + that some tools expect. + +

+ + +To run the tests, you can "build" the check target: + +```bash +cmake --build build --target check +``` + +`--target` can be spelled `-t` in CMake 3.15+. You can also run individual +tests with these targets: + +* `pytest`: Python tests only +* `cpptest`: C++ tests only +* `test_cmake_build`: Install / subdirectory tests + +If you want to build just a subset of tests, use +`-DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp"`. If this is +empty, all tests will be built. + +### Formatting + +All formatting is handled by pre-commit. + +Install with brew (macOS) or pip (any OS): + +```bash +# Any OS +python3 -m pip install pre-commit + +# OR macOS with homebrew: +brew install pre-commit +``` + +Then, you can run it on the items you've added to your staging area, or all +files: + +```bash +pre-commit run +# OR +pre-commit run --all-files +``` + +And, if you want to always use it, you can install it as a git hook (hence the +name, pre-commit): + +```bash +pre-commit install +``` + +[pre-commit]: https://pre-commit.com +[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/latest +[issue tracker]: https://github.com/pybind/pybind11/issues +[gitter]: https://gitter.im/pybind/Lobby +[using pull requests]: https://help.github.com/articles/using-pull-requests diff --git a/diffvg/pybind11/.github/ISSUE_TEMPLATE/bug-report.md b/diffvg/pybind11/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000000000000000000000000000000000..ae36ea65083643dfc6f252249141b94c7ecb65e7 --- /dev/null +++ b/diffvg/pybind11/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,28 @@ +--- +name: Bug Report +about: File an issue about a bug +title: "[BUG] " +--- + + +Make sure you've completed the following steps before submitting your issue -- thank you! + +1. Make sure you've read the [documentation][]. Your issue may be addressed there. +2. Search the [issue tracker][] to verify that this hasn't already been reported. +1 or comment there if it has. +3. Consider asking first in the [Gitter chat room][]. +4. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible. + a. If possible, make a PR with a new, failing test to give us a starting point to work on! + +[documentation]: https://pybind11.readthedocs.io +[issue tracker]: https://github.com/pybind/pybind11/issues +[Gitter chat room]: https://gitter.im/pybind/Lobby + +*After reading, remove this checklist and the template text in parentheses below.* + +## Issue description + +(Provide a short description, state the expected behavior and what actually happens.) + +## Reproducible example code + +(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.) diff --git a/diffvg/pybind11/.github/ISSUE_TEMPLATE/config.yml b/diffvg/pybind11/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..20e743136f3f2efea7deadf3e08c5b104c22a7f3 --- /dev/null +++ b/diffvg/pybind11/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Gitter room + url: https://gitter.im/pybind/Lobby + about: A room for discussing pybind11 with an active community diff --git a/diffvg/pybind11/.github/ISSUE_TEMPLATE/feature-request.md b/diffvg/pybind11/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000000000000000000000000000000000..5f6ec81ec972b13e38520ef2e37d85022c2642c9 --- /dev/null +++ b/diffvg/pybind11/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,16 @@ +--- +name: Feature Request +about: File an issue about adding a feature +title: "[FEAT] " +--- + + +Make sure you've completed the following steps before submitting your issue -- thank you! + +1. Check if your feature has already been mentioned / rejected / planned in other issues. +2. If those resources didn't help, consider asking in the [Gitter chat room][] to see if this is interesting / useful to a larger audience and possible to implement reasonably, +4. If you have a useful feature that passes the previous items (or not suitable for chat), please fill in the details below. + +[Gitter chat room]: https://gitter.im/pybind/Lobby + +*After reading, remove this checklist.* diff --git a/diffvg/pybind11/.github/ISSUE_TEMPLATE/question.md b/diffvg/pybind11/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000000000000000000000000000000000000..b199b6ee8ad446994aed54f67b0d1c22049d53c1 --- /dev/null +++ b/diffvg/pybind11/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,21 @@ +--- +name: Question +about: File an issue about unexplained behavior +title: "[QUESTION] " +--- + +If you have a question, please check the following first: + +1. Check if your question has already been answered in the [FAQ][] section. +2. Make sure you've read the [documentation][]. Your issue may be addressed there. +3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][] +4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer. +5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below. +6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible. + +[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html +[documentation]: https://pybind11.readthedocs.io +[issue tracker]: https://github.com/pybind/pybind11/issues +[Gitter chat room]: https://gitter.im/pybind/Lobby + +*After reading, remove this checklist.* diff --git a/diffvg/pybind11/.github/workflows/ci.yml b/diffvg/pybind11/.github/workflows/ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..825631beae41c182a250ffc10072e60e740736ef --- /dev/null +++ b/diffvg/pybind11/.github/workflows/ci.yml @@ -0,0 +1,359 @@ +name: CI + +on: + workflow_dispatch: + pull_request: + push: + branches: + - master + - stable + - v* + +jobs: + standard: + strategy: + matrix: + runs-on: [ubuntu-latest, windows-latest, macos-latest] + arch: [x64] + python: + - 2.7 + - 3.5 + - 3.8 + - pypy2 + - pypy3 + + include: + - runs-on: ubuntu-latest + python: 3.6 + arch: x64 + args: > + -DPYBIND11_FINDPYTHON=ON + - runs-on: windows-2016 + python: 3.7 + arch: x86 + args2: > + -DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR" + - runs-on: windows-latest + python: 3.6 + arch: x64 + args: > + -DPYBIND11_FINDPYTHON=ON + - runs-on: windows-latest + python: 3.7 + arch: x64 + + - runs-on: ubuntu-latest + python: 3.9-dev + arch: x64 + - runs-on: macos-latest + python: 3.9-dev + arch: x64 + args: > + -DPYBIND11_FINDPYTHON=ON + + exclude: + # Currently 32bit only, and we build 64bit + - runs-on: windows-latest + python: pypy2 + arch: x64 + - runs-on: windows-latest + python: pypy3 + arch: x64 + + # Currently broken on embed_test + - runs-on: windows-latest + python: 3.8 + arch: x64 + - runs-on: windows-latest + python: 3.9-dev + arch: x64 + + + name: "🐍 ${{ matrix.python }} β€’ ${{ matrix.runs-on }} β€’ ${{ matrix.arch }} ${{ matrix.args }}" + runs-on: ${{ matrix.runs-on }} + continue-on-error: ${{ endsWith(matrix.python, 'dev') }} + + steps: + - uses: actions/checkout@v2 + + - name: Setup Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + architecture: ${{ matrix.arch }} + + - name: Setup Boost (Windows / Linux latest) + run: echo "::set-env name=BOOST_ROOT::$BOOST_ROOT_1_72_0" + + - name: Update CMake + uses: jwlawson/actions-setup-cmake@v1.3 + + - name: Cache wheels + if: runner.os == 'macOS' + uses: actions/cache@v2 + with: + # This path is specific to macOS - we really only need it for PyPy NumPy wheels + # See https://github.com/actions/cache/blob/master/examples.md#python---pip + # for ways to do this more generally + path: ~/Library/Caches/pip + # Look to see if there is a cache hit for the corresponding requirements file + key: ${{ runner.os }}-pip-${{ matrix.python }}-${{ matrix.arch }}-${{ hashFiles('tests/requirements.txt') }} + + - name: Prepare env + run: python -m pip install -r tests/requirements.txt --prefer-binary + + - name: Setup annotations + run: python -m pip install pytest-github-actions-annotate-failures + + - name: Configure C++11 ${{ matrix.args }} + run: > + cmake -S . -B . + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DDOWNLOAD_EIGEN=ON + -DCMAKE_CXX_STANDARD=11 + ${{ matrix.args }} + + - name: Build C++11 + run: cmake --build . -j 2 + + - name: Python tests C++11 + run: cmake --build . --target pytest -j 2 + + - name: C++11 tests + run: cmake --build . --target cpptest -j 2 + + - name: Interface test C++11 + run: cmake --build . --target test_cmake_build + + - name: Clean directory + run: git clean -fdx + + - name: Configure ${{ matrix.args2 }} + run: > + cmake -S . -B build2 + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DDOWNLOAD_EIGEN=ON + -DCMAKE_CXX_STANDARD=17 + ${{ matrix.args }} + ${{ matrix.args2 }} + + - name: Build + run: cmake --build build2 -j 2 + + - name: Python tests + run: cmake --build build2 --target pytest + + - name: C++ tests + run: cmake --build build2 --target cpptest + + - name: Interface test + run: cmake --build build2 --target test_cmake_build + + clang: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + clang: + - 3.6 + - 3.7 + - 3.9 + - 5 + - 7 + - 9 + - dev + + name: "🐍 3 β€’ Clang ${{ matrix.clang }} β€’ x64" + container: "silkeh/clang:${{ matrix.clang }}" + + steps: + - uses: actions/checkout@v2 + + - name: Add wget and python3 + run: apt-get update && apt-get install -y python3-dev python3-numpy python3-pytest libeigen3-dev + + - name: Configure + shell: bash + run: > + cmake -S . -B build + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + + - name: Build + run: cmake --build build -j 2 + + - name: Python tests + run: cmake --build build --target pytest + + - name: C++ tests + run: cmake --build build --target cpptest + + - name: Interface test + run: cmake --build build --target test_cmake_build + + gcc: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + gcc: + - 7 + - latest + + name: "🐍 3 β€’ GCC ${{ matrix.gcc }} β€’ x64" + container: "gcc:${{ matrix.gcc }}" + + steps: + - uses: actions/checkout@v1 + + - name: Add Python 3 + run: apt-get update; apt-get install -y python3-dev python3-numpy python3-pytest python3-pip libeigen3-dev + + - name: Update pip + run: python3 -m pip install --upgrade pip + + - name: Setup CMake 3.18 + uses: jwlawson/actions-setup-cmake@v1.3 + with: + cmake-version: 3.18 + + - name: Configure + shell: bash + run: > + cmake -S . -B build + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DCMAKE_CXX_STANDARD=11 + -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + + - name: Build + run: cmake --build build -j 2 + + - name: Python tests + run: cmake --build build --target pytest + + - name: C++ tests + run: cmake --build build --target cpptest + + - name: Interface test + run: cmake --build build --target test_cmake_build + + centos: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + centos: + - 7 # GCC 4.8 + - 8 + + name: "🐍 3 β€’ CentOS ${{ matrix.centos }} β€’ x64" + container: "centos:${{ matrix.centos }}" + + steps: + - uses: actions/checkout@v2 + + - name: Add Python 3 + run: yum update -y && yum install -y python3-devel gcc-c++ make git + + - name: Update pip + run: python3 -m pip install --upgrade pip + + - name: Install dependencies + run: python3 -m pip install cmake -r tests/requirements.txt --prefer-binary + + - name: Configure + shell: bash + run: > + cmake -S . -B build + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DDOWNLOAD_EIGEN=ON + -DCMAKE_CXX_STANDARD=11 + -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + + - name: Build + run: cmake --build build -j 2 + + - name: Python tests + run: cmake --build build --target pytest + + - name: C++ tests + run: cmake --build build --target cpptest + + - name: Interface test + run: cmake --build build --target test_cmake_build + + install-classic: + name: "🐍 3.5 β€’ Debian β€’ x86 β€’ Install" + runs-on: ubuntu-latest + container: i386/debian:stretch + + steps: + - uses: actions/checkout@v1 + + - name: Install requirements + run: | + apt-get update + apt-get install -y git make cmake g++ libeigen3-dev python3-dev python3-pip + pip3 install "pytest==3.1.*" + + - name: Configure for install + run: > + cmake . + -DPYBIND11_INSTALL=1 -DPYBIND11_TEST=0 + -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + + - name: Make and install + run: make install + + - name: Copy tests to new directory + run: cp -a tests /pybind11-tests + + - name: Make a new test directory + run: mkdir /build-tests + + - name: Configure tests + run: > + cmake ../pybind11-tests + -DDOWNLOAD_CATCH=ON + -DPYBIND11_WERROR=ON + -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + working-directory: /build-tests + + - name: Run tests + run: make pytest -j 2 + working-directory: /build-tests + + + doxygen: + name: "Documentation build test" + runs-on: ubuntu-latest + container: alpine:3.12 + + steps: + - uses: actions/checkout@v2 + + - name: Install system requirements + run: apk add doxygen python3-dev + + - name: Ensure pip + run: python3 -m ensurepip + + - name: Install docs & setup requirements + run: python3 -m pip install -r docs/requirements.txt pytest setuptools + + - name: Build docs + run: python3 -m sphinx -W -b html docs docs/.build + + - name: Make SDist + run: python3 setup.py sdist + + - name: Compare Dists (headers only) + run: | + python3 -m pip install --user -U ./dist/* + installed=$(python3 -c "import pybind11; print(pybind11.get_include(True) + '/pybind11')") + diff -rq $installed ./include/pybind11 diff --git a/diffvg/pybind11/.github/workflows/configure.yml b/diffvg/pybind11/.github/workflows/configure.yml new file mode 100644 index 0000000000000000000000000000000000000000..d472f4b1917060053e464baaebebd9e90a5172f0 --- /dev/null +++ b/diffvg/pybind11/.github/workflows/configure.yml @@ -0,0 +1,78 @@ +name: Config + +on: + workflow_dispatch: + pull_request: + push: + branches: + - master + - stable + - v* + +jobs: + cmake: + strategy: + fail-fast: false + matrix: + runs-on: [ubuntu-latest, macos-latest, windows-latest] + arch: [x64] + cmake: [3.18] + + include: + - runs-on: ubuntu-latest + arch: x64 + cmake: 3.4 + + - runs-on: macos-latest + arch: x64 + cmake: 3.7 + + - runs-on: windows-2016 + arch: x86 + cmake: 3.8 + + - runs-on: windows-2016 + arch: x86 + cmake: 3.18 + + name: 🐍 3.7 β€’ CMake ${{ matrix.cmake }} β€’ ${{ matrix.runs-on }} + runs-on: ${{ matrix.runs-on }} + + steps: + - uses: actions/checkout@v2 + + - name: Setup Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + architecture: ${{ matrix.arch }} + + - name: Prepare env + run: python -m pip install -r tests/requirements.txt + + - name: Setup CMake ${{ matrix.cmake }} + uses: jwlawson/actions-setup-cmake@v1.3 + with: + cmake-version: ${{ matrix.cmake }} + + - name: Make build directories + run: mkdir "build dir" + + - name: Configure + working-directory: build dir + shell: bash + run: > + cmake .. + -DPYBIND11_WERROR=ON + -DDOWNLOAD_CATCH=ON + -DPYTHON_EXECUTABLE=$(python -c "import sys; print(sys.executable)") + + - name: Build + working-directory: build dir + if: github.event_name == 'workflow_dispatch' + run: cmake --build . --config Release + + - name: Test + working-directory: build dir + if: github.event_name == 'workflow_dispatch' + run: cmake --build . --config Release --target check diff --git a/diffvg/pybind11/.github/workflows/format.yml b/diffvg/pybind11/.github/workflows/format.yml new file mode 100644 index 0000000000000000000000000000000000000000..e92f96e6ef06662528c8acc2000710db91d3fe0a --- /dev/null +++ b/diffvg/pybind11/.github/workflows/format.yml @@ -0,0 +1,19 @@ +name: Format + +on: + workflow_dispatch: + pull_request: + push: + branches: + - master + - stable + - "v*" + +jobs: + pre-commit: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2.0.0 diff --git a/diffvg/pybind11/.gitignore b/diffvg/pybind11/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5613b367d257a8c155ff2c9bdc3a9cae99fe7376 --- /dev/null +++ b/diffvg/pybind11/.gitignore @@ -0,0 +1,41 @@ +CMakeCache.txt +CMakeFiles +Makefile +cmake_install.cmake +cmake_uninstall.cmake +.DS_Store +*.so +*.pyd +*.dll +*.sln +*.sdf +*.opensdf +*.vcxproj +*.vcxproj.user +*.filters +example.dir +Win32 +x64 +Release +Debug +.vs +CTestTestfile.cmake +Testing +autogen +MANIFEST +/.ninja_* +/*.ninja +/docs/.build +*.py[co] +*.egg-info +*~ +.*.swp +.DS_Store +/dist +/*build* +.cache/ +sosize-*.txt +pybind11Config*.cmake +pybind11Targets.cmake +/*env* +/.vscode diff --git a/diffvg/pybind11/.gitmodules b/diffvg/pybind11/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..4d698f93f828a5c7538ba9af2059a881ec99ac55 --- /dev/null +++ b/diffvg/pybind11/.gitmodules @@ -0,0 +1,3 @@ +[submodule "tools/clang"] + path = tools/clang + url = ../../wjakob/clang-cindex-python3.git diff --git a/diffvg/pybind11/.pre-commit-config.yaml b/diffvg/pybind11/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a046c6fcfe6b60666c7b4353372c06ab4a8d13c1 --- /dev/null +++ b/diffvg/pybind11/.pre-commit-config.yaml @@ -0,0 +1,44 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: mixed-line-ending + - id: requirements-txt-fixer + - id: trailing-whitespace + - id: fix-encoding-pragma + +- repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.1.9 + hooks: + - id: remove-tabs + +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.8.3 + hooks: + - id: flake8 + additional_dependencies: [flake8-bugbear, pep8-naming] + exclude: ^(docs/.*|tools/.*)$ + +- repo: https://github.com/cheshirekow/cmake-format-precommit + rev: v0.6.11 + hooks: + - id: cmake-format + additional_dependencies: [pyyaml] + types: [file] + files: (\.cmake|CMakeLists.txt)(.in)?$ + +- repo: local + hooks: + - id: check-style + name: Classic check-style + language: system + types: + - c++ + entry: ./tools/check-style.sh diff --git a/diffvg/pybind11/.readthedocs.yml b/diffvg/pybind11/.readthedocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9c61617ca9b13a3e31d33226c52ba9529872a0d --- /dev/null +++ b/diffvg/pybind11/.readthedocs.yml @@ -0,0 +1,3 @@ +python: + version: 3 +requirements_file: docs/requirements.txt diff --git a/diffvg/pybind11/CMakeLists.txt b/diffvg/pybind11/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b460494a86a7b7d7487bfd10d4b2560441d5a76 --- /dev/null +++ b/diffvg/pybind11/CMakeLists.txt @@ -0,0 +1,271 @@ +# CMakeLists.txt -- Build system for the pybind11 modules +# +# Copyright (c) 2015 Wenzel Jakob +# +# All rights reserved. Use of this source code is governed by a +# BSD-style license that can be found in the LICENSE file. + +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +# Extract project version from source +file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/pybind11/detail/common.h" + pybind11_version_defines REGEX "#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) ") + +foreach(ver ${pybind11_version_defines}) + if(ver MATCHES [[#define PYBIND11_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$]]) + set(PYBIND11_VERSION_${CMAKE_MATCH_1} "${CMAKE_MATCH_2}") + endif() +endforeach() + +if(PYBIND11_VERSION_PATCH MATCHES [[([a-zA-Z]+)]]) + set(pybind11_VERSION_TYPE "${CMAKE_MATCH_1}") +endif() +string(REGEX MATCH "[0-9]+" PYBIND11_VERSION_PATCH "${PYBIND11_VERSION_PATCH}") + +project( + pybind11 + LANGUAGES CXX + VERSION "${PYBIND11_VERSION_MAJOR}.${PYBIND11_VERSION_MINOR}.${PYBIND11_VERSION_PATCH}") + +# Standard includes +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) +include(CMakeDependentOption) + +if(NOT pybind11_FIND_QUIETLY) + message(STATUS "pybind11 v${pybind11_VERSION} ${pybind11_VERSION_TYPE}") +endif() + +# Check if pybind11 is being used directly or via add_subdirectory +if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) + ### Warn if not an out-of-source builds + if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) + set(lines + "You are building in-place. If that is not what you intended to " + "do, you can clean the source directory with:\n" + "rm -r CMakeCache.txt CMakeFiles/ cmake_uninstall.cmake pybind11Config.cmake " + "pybind11ConfigVersion.cmake tests/CMakeFiles/\n") + message(AUTHOR_WARNING ${lines}) + endif() + + set(PYBIND11_MASTER_PROJECT ON) + + if(OSX AND CMAKE_VERSION VERSION_LESS 3.7) + # Bug in macOS CMake < 3.7 is unable to download catch + message(WARNING "CMAKE 3.7+ needed on macOS to download catch, and newer HIGHLY recommended") + elseif(WINDOWS AND CMAKE_VERSION VERSION_LESS 3.8) + # Only tested with 3.8+ in CI. + message(WARNING "CMAKE 3.8+ tested on Windows, previous versions untested") + endif() + + message(STATUS "CMake ${CMAKE_VERSION}") + + if(CMAKE_CXX_STANDARD) + set(CMAKE_CXX_EXTENSIONS OFF) + set(CMAKE_CXX_STANDARD_REQUIRED ON) + endif() +else() + set(PYBIND11_MASTER_PROJECT OFF) + set(pybind11_system SYSTEM) +endif() + +# Options +option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT}) +option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT}) +option(PYBIND11_NOPYTHON "Disable search for Python" OFF) + +cmake_dependent_option( + USE_PYTHON_INCLUDE_DIR + "Install pybind11 headers in Python include directory instead of default installation prefix" + OFF "PYBIND11_INSTALL" OFF) + +cmake_dependent_option(PYBIND11_FINDPYTHON "Force new FindPython" OFF + "NOT CMAKE_VERSION VERSION_LESS 3.12" OFF) + +# NB: when adding a header don't forget to also add it to setup.py +set(PYBIND11_HEADERS + include/pybind11/detail/class.h + include/pybind11/detail/common.h + include/pybind11/detail/descr.h + include/pybind11/detail/init.h + include/pybind11/detail/internals.h + include/pybind11/detail/typeid.h + include/pybind11/attr.h + include/pybind11/buffer_info.h + include/pybind11/cast.h + include/pybind11/chrono.h + include/pybind11/common.h + include/pybind11/complex.h + include/pybind11/options.h + include/pybind11/eigen.h + include/pybind11/embed.h + include/pybind11/eval.h + include/pybind11/iostream.h + include/pybind11/functional.h + include/pybind11/numpy.h + include/pybind11/operators.h + include/pybind11/pybind11.h + include/pybind11/pytypes.h + include/pybind11/stl.h + include/pybind11/stl_bind.h) + +# Compare with grep and warn if mismatched +if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12) + file( + GLOB_RECURSE _pybind11_header_check + LIST_DIRECTORIES false + RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" + CONFIGURE_DEPENDS "include/pybind11/*.h") + set(_pybind11_here_only ${PYBIND11_HEADERS}) + set(_pybind11_disk_only ${_pybind11_header_check}) + list(REMOVE_ITEM _pybind11_here_only ${_pybind11_header_check}) + list(REMOVE_ITEM _pybind11_disk_only ${PYBIND11_HEADERS}) + if(_pybind11_here_only) + message(AUTHOR_WARNING "PYBIND11_HEADERS has extra files:" ${_pybind11_here_only}) + endif() + if(_pybind11_disk_only) + message(AUTHOR_WARNING "PYBIND11_HEADERS is missing files:" ${_pybind11_disk_only}) + endif() +endif() + +# CMake 3.12 added list(TRANSFORM PREPEND +# But we can't use it yet +string(REPLACE "include/" "${CMAKE_CURRENT_SOURCE_DIR}/include/" PYBIND11_HEADERS + "${PYBIND11_HEADERS}") + +# Cache variables so pybind11_add_module can be used in parent projects +set(PYBIND11_INCLUDE_DIR + "${CMAKE_CURRENT_LIST_DIR}/include" + CACHE INTERNAL "") + +# Note: when creating targets, you cannot use if statements at configure time - +# you need generator expressions, because those will be placed in the target file. +# You can also place ifs *in* the Config.in, but not here. + +# This section builds targets, but does *not* touch Python + +# Build the headers-only target (no Python included): +# (long name used here to keep this from clashing in subdirectory mode) +add_library(pybind11_headers INTERFACE) +add_library(pybind11::pybind11_headers ALIAS pybind11_headers) # to match exported target +add_library(pybind11::headers ALIAS pybind11_headers) # easier to use/remember + +include("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11Common.cmake") + +if(NOT PYBIND11_MASTER_PROJECT AND NOT pybind11_FIND_QUIETLY) + message(STATUS "Using pybind11: (version \"${pybind11_VERSION}\" ${pybind11_VERSION_TYPE})") +endif() + +# Relative directory setting +if(USE_PYTHON_INCLUDE_DIR AND DEFINED Python_INCLUDE_DIRS) + file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${Python_INCLUDE_DIRS}) +elseif(USE_PYTHON_INCLUDE_DIR AND DEFINED PYTHON_INCLUDE_DIR) + file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${PYTHON_INCLUDE_DIRS}) +endif() + +# Fill in headers target +target_include_directories( + pybind11_headers ${pybind11_system} INTERFACE $ + $) + +target_compile_features(pybind11_headers INTERFACE cxx_inheriting_constructors cxx_user_literals + cxx_right_angle_brackets) + +if(PYBIND11_INSTALL) + install(DIRECTORY ${PYBIND11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + # GNUInstallDirs "DATADIR" wrong here; CMake search path wants "share". + set(PYBIND11_CMAKECONFIG_INSTALL_DIR + "share/cmake/${PROJECT_NAME}" + CACHE STRING "install path for pybind11Config.cmake") + + configure_package_config_file( + tools/${PROJECT_NAME}Config.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR}) + + if(CMAKE_VERSION VERSION_LESS 3.14) + # Remove CMAKE_SIZEOF_VOID_P from ConfigVersion.cmake since the library does + # not depend on architecture specific settings or libraries. + set(_PYBIND11_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P}) + unset(CMAKE_SIZEOF_VOID_P) + + write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY AnyNewerVersion) + + set(CMAKE_SIZEOF_VOID_P ${_PYBIND11_CMAKE_SIZEOF_VOID_P}) + else() + # CMake 3.14+ natively supports header-only libraries + write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY AnyNewerVersion ARCH_INDEPENDENT) + endif() + + install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake + tools/FindPythonLibsNew.cmake + tools/pybind11Common.cmake + tools/pybind11Tools.cmake + tools/pybind11NewTools.cmake + DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR}) + + if(NOT PYBIND11_EXPORT_NAME) + set(PYBIND11_EXPORT_NAME "${PROJECT_NAME}Targets") + endif() + + install(TARGETS pybind11_headers EXPORT "${PYBIND11_EXPORT_NAME}") + + install( + EXPORT "${PYBIND11_EXPORT_NAME}" + NAMESPACE "pybind11::" + DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR}) + + # Uninstall target + if(PYBIND11_MASTER_PROJECT) + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tools/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) + + add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P + ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) + endif() +endif() + +# BUILD_TESTING takes priority, but only if this is the master project +if(PYBIND11_MASTER_PROJECT AND DEFINED BUILD_TESTING) + if(BUILD_TESTING) + if(_pybind11_nopython) + message(FATAL_ERROR "Cannot activate tests in NOPYTHON mode") + else() + add_subdirectory(tests) + endif() + endif() +else() + if(PYBIND11_TEST) + if(_pybind11_nopython) + message(FATAL_ERROR "Cannot activate tests in NOPYTHON mode") + else() + add_subdirectory(tests) + endif() + endif() +endif() + +# Better symmetry with find_package(pybind11 CONFIG) mode. +if(NOT PYBIND11_MASTER_PROJECT) + set(pybind11_FOUND + TRUE + CACHE INTERNAL "true if pybind11 and all required components found on the system") + set(pybind11_INCLUDE_DIR + "${PYBIND11_INCLUDE_DIR}" + CACHE INTERNAL "Directory where pybind11 headers are located") +endif() diff --git a/diffvg/pybind11/LICENSE b/diffvg/pybind11/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e466b0dfda14f3a7c8ece512937eb99c8b7b6d68 --- /dev/null +++ b/diffvg/pybind11/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2016 Wenzel Jakob , All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of +external contributions to this project including patches, pull requests, etc. diff --git a/diffvg/pybind11/MANIFEST.in b/diffvg/pybind11/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..6fe84ced8d4334e90d2539c6cde208e838a9ba7e --- /dev/null +++ b/diffvg/pybind11/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include include/pybind11 *.h +include LICENSE README.md .github/CONTRIBUTING.md diff --git a/diffvg/pybind11/README.md b/diffvg/pybind11/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bae6cf2b5c46f7181e28bef6a1c2d4101086433d --- /dev/null +++ b/diffvg/pybind11/README.md @@ -0,0 +1,143 @@ +![pybind11 logo](https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png) + +# pybind11 β€” Seamless operability between C++11 and Python + +[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=master)](http://pybind11.readthedocs.org/en/master/?badge=master) +[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=stable)](http://pybind11.readthedocs.org/en/stable/?badge=stable) +[![Gitter chat](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/pybind/Lobby) +[![CI](https://github.com/pybind/pybind11/workflows/CI/badge.svg)](https://github.com/pybind/pybind11/actions) +[![Build status](https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true)](https://ci.appveyor.com/project/wjakob/pybind11) + +**pybind11** is a lightweight header-only library that exposes C++ types in +Python and vice versa, mainly to create Python bindings of existing C++ code. +Its goals and syntax are similar to the excellent [Boost.Python][] library by +David Abrahams: to minimize boilerplate code in traditional extension modules +by inferring type information using compile-time introspection. + +The main issue with Boost.Pythonβ€”and the reason for creating such a similar +projectβ€”is Boost. Boost is an enormously large and complex suite of utility +libraries that works with almost every C++ compiler in existence. This +compatibility has its cost: arcane template tricks and workarounds are +necessary to support the oldest and buggiest of compiler specimens. Now that +C++11-compatible compilers are widely available, this heavy machinery has +become an excessively large and unnecessary dependency. + +Think of this library as a tiny self-contained version of Boost.Python with +everything stripped away that isn't relevant for binding generation. Without +comments, the core header files only require ~4K lines of code and depend on +Python (2.7 or 3.5+, or PyPy) and the C++ standard library. This compact +implementation was possible thanks to some of the new C++11 language features +(specifically: tuples, lambda functions and variadic templates). Since its +creation, this library has grown beyond Boost.Python in many ways, leading to +dramatically simpler binding code in many common situations. + +Tutorial and reference documentation is provided at +[pybind11.readthedocs.org][]. A PDF version of the manual is available +[here][docs-pdf]. + +## Core features +pybind11 can map the following core C++ features to Python: + +- Functions accepting and returning custom data structures per value, reference, or pointer +- Instance methods and static methods +- Overloaded functions +- Instance attributes and static attributes +- Arbitrary exception types +- Enumerations +- Callbacks +- Iterators and ranges +- Custom operators +- Single and multiple inheritance +- STL data structures +- Smart pointers with reference counting like `std::shared_ptr` +- Internal references with correct reference counting +- C++ classes with virtual (and pure virtual) methods can be extended in Python + +## Goodies +In addition to the core functionality, pybind11 provides some extra goodies: + +- Python 2.7, 3.5+, and PyPy (tested on 7.3) are supported with an implementation-agnostic + interface. + +- It is possible to bind C++11 lambda functions with captured variables. The + lambda capture data is stored inside the resulting Python function object. + +- pybind11 uses C++11 move constructors and move assignment operators whenever + possible to efficiently transfer custom data types. + +- It's easy to expose the internal storage of custom data types through + Pythons' buffer protocols. This is handy e.g. for fast conversion between + C++ matrix classes like Eigen and NumPy without expensive copy operations. + +- pybind11 can automatically vectorize functions so that they are transparently + applied to all entries of one or more NumPy array arguments. + +- Python's slice-based access and assignment operations can be supported with + just a few lines of code. + +- Everything is contained in just a few header files; there is no need to link + against any additional libraries. + +- Binaries are generally smaller by a factor of at least 2 compared to + equivalent bindings generated by Boost.Python. A recent pybind11 conversion + of PyRosetta, an enormous Boost.Python binding project, + [reported][pyrosetta-report] a binary size reduction of **5.4x** and compile + time reduction by **5.8x**. + +- Function signatures are precomputed at compile time (using `constexpr`), + leading to smaller binaries. + +- With little extra effort, C++ types can be pickled and unpickled similar to + regular Python objects. + +## Supported compilers + +1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer) +2. GCC 4.8 or newer +3. Microsoft Visual Studio 2015 Update 3 or newer +4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11 + v2.0 and a [workaround][intel-15-workaround]) +5. Cygwin/GCC (tested on 2.5.1) + +## About + +This project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob). +Significant features and/or improvements to the code were contributed by +Jonas Adler, +Lori A. Burns, +Sylvain Corlay, +Trent Houliston, +Axel Huebl, +@hulucc, +Sergey Lyskov +Johan Mabille, +Tomasz MiΔ…sko, +Dean Moldovan, +Ben Pritchard, +Jason Rhinelander, +Boris SchΓ€ling, +Pim Schellart, +Henry Schreiner, +Ivan Smirnov, and +Patrick Stewart. + +### Contributing + +See the [contributing guide][] for information on building and contributing to +pybind11. + + +### License + +pybind11 is provided under a BSD-style license that can be found in the +[`LICENSE`][] file. By using, distributing, or contributing to this project, +you agree to the terms and conditions of this license. + + +[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/master +[docs-pdf]: https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf +[Boost.Python]: http://www.boost.org/doc/libs/1_58_0/libs/python/doc/ +[pyrosetta-report]: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf +[contributing guide]: https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md +[`LICENSE`]: https://github.com/pybind/pybind11/blob/master/LICENSE +[intel-15-workaround]: https://github.com/pybind/pybind11/issues/276 diff --git a/diffvg/pybind11/docs/Doxyfile b/diffvg/pybind11/docs/Doxyfile new file mode 100644 index 0000000000000000000000000000000000000000..24ece0d8dbac25c6c2ab076610f867beb2fe0c9a --- /dev/null +++ b/diffvg/pybind11/docs/Doxyfile @@ -0,0 +1,22 @@ +PROJECT_NAME = pybind11 +INPUT = ../include/pybind11/ +RECURSIVE = YES + +GENERATE_HTML = NO +GENERATE_LATEX = NO +GENERATE_XML = YES +XML_OUTPUT = .build/doxygenxml +XML_PROGRAMLISTING = YES + +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = YES +EXPAND_AS_DEFINED = PYBIND11_RUNTIME_EXCEPTION + +ALIASES = "rst=\verbatim embed:rst" +ALIASES += "endrst=\endverbatim" + +QUIET = YES +WARNINGS = YES +WARN_IF_UNDOCUMENTED = NO +PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS \ + PY_MAJOR_VERSION=3 diff --git a/diffvg/pybind11/docs/Makefile b/diffvg/pybind11/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..511b47c2d5434ced54ed49ac35d33df80afc684a --- /dev/null +++ b/diffvg/pybind11/docs/Makefile @@ -0,0 +1,192 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = .build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pybind11.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pybind11.qhc" + +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/pybind11" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pybind11" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/diffvg/pybind11/docs/_static/theme_overrides.css b/diffvg/pybind11/docs/_static/theme_overrides.css new file mode 100644 index 0000000000000000000000000000000000000000..1071809fa0fecf7c28d3356f37363266e9128b81 --- /dev/null +++ b/diffvg/pybind11/docs/_static/theme_overrides.css @@ -0,0 +1,11 @@ +.wy-table-responsive table td, +.wy-table-responsive table th { + white-space: initial !important; +} +.rst-content table.docutils td { + vertical-align: top !important; +} +div[class^='highlight'] pre { + white-space: pre; + white-space: pre-wrap; +} diff --git a/diffvg/pybind11/docs/advanced/cast/chrono.rst b/diffvg/pybind11/docs/advanced/cast/chrono.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbd46057aa392c86ae3747c2b21768367205ea49 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/chrono.rst @@ -0,0 +1,81 @@ +Chrono +====== + +When including the additional header file :file:`pybind11/chrono.h` conversions +from C++11 chrono datatypes to python datetime objects are automatically enabled. +This header also enables conversions of python floats (often from sources such +as ``time.monotonic()``, ``time.perf_counter()`` and ``time.process_time()``) +into durations. + +An overview of clocks in C++11 +------------------------------ + +A point of confusion when using these conversions is the differences between +clocks provided in C++11. There are three clock types defined by the C++11 +standard and users can define their own if needed. Each of these clocks have +different properties and when converting to and from python will give different +results. + +The first clock defined by the standard is ``std::chrono::system_clock``. This +clock measures the current date and time. However, this clock changes with to +updates to the operating system time. For example, if your time is synchronised +with a time server this clock will change. This makes this clock a poor choice +for timing purposes but good for measuring the wall time. + +The second clock defined in the standard is ``std::chrono::steady_clock``. +This clock ticks at a steady rate and is never adjusted. This makes it excellent +for timing purposes, however the value in this clock does not correspond to the +current date and time. Often this clock will be the amount of time your system +has been on, although it does not have to be. This clock will never be the same +clock as the system clock as the system clock can change but steady clocks +cannot. + +The third clock defined in the standard is ``std::chrono::high_resolution_clock``. +This clock is the clock that has the highest resolution out of the clocks in the +system. It is normally a typedef to either the system clock or the steady clock +but can be its own independent clock. This is important as when using these +conversions as the types you get in python for this clock might be different +depending on the system. +If it is a typedef of the system clock, python will get datetime objects, but if +it is a different clock they will be timedelta objects. + +Provided conversions +-------------------- + +.. rubric:: C++ to Python + +- ``std::chrono::system_clock::time_point`` β†’ ``datetime.datetime`` + System clock times are converted to python datetime instances. They are + in the local timezone, but do not have any timezone information attached + to them (they are naive datetime objects). + +- ``std::chrono::duration`` β†’ ``datetime.timedelta`` + Durations are converted to timedeltas, any precision in the duration + greater than microseconds is lost by rounding towards zero. + +- ``std::chrono::[other_clocks]::time_point`` β†’ ``datetime.timedelta`` + Any clock time that is not the system clock is converted to a time delta. + This timedelta measures the time from the clocks epoch to now. + +.. rubric:: Python to C++ + +- ``datetime.datetime`` or ``datetime.date`` or ``datetime.time`` β†’ ``std::chrono::system_clock::time_point`` + Date/time objects are converted into system clock timepoints. Any + timezone information is ignored and the type is treated as a naive + object. + +- ``datetime.timedelta`` β†’ ``std::chrono::duration`` + Time delta are converted into durations with microsecond precision. + +- ``datetime.timedelta`` β†’ ``std::chrono::[other_clocks]::time_point`` + Time deltas that are converted into clock timepoints are treated as + the amount of time from the start of the clocks epoch. + +- ``float`` β†’ ``std::chrono::duration`` + Floats that are passed to C++ as durations be interpreted as a number of + seconds. These will be converted to the duration using ``duration_cast`` + from the float. + +- ``float`` β†’ ``std::chrono::[other_clocks]::time_point`` + Floats that are passed to C++ as time points will be interpreted as the + number of seconds from the start of the clocks epoch. diff --git a/diffvg/pybind11/docs/advanced/cast/custom.rst b/diffvg/pybind11/docs/advanced/cast/custom.rst new file mode 100644 index 0000000000000000000000000000000000000000..e4f99ac5b086355ff8e691a22ad5e16ea84dfed7 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/custom.rst @@ -0,0 +1,91 @@ +Custom type casters +=================== + +In very rare cases, applications may require custom type casters that cannot be +expressed using the abstractions provided by pybind11, thus requiring raw +Python C API calls. This is fairly advanced usage and should only be pursued by +experts who are familiar with the intricacies of Python reference counting. + +The following snippets demonstrate how this works for a very simple ``inty`` +type that that should be convertible from Python types that provide a +``__int__(self)`` method. + +.. code-block:: cpp + + struct inty { long long_value; }; + + void print(inty s) { + std::cout << s.long_value << std::endl; + } + +The following Python snippet demonstrates the intended usage from the Python side: + +.. code-block:: python + + class A: + def __int__(self): + return 123 + + from example import print + print(A()) + +To register the necessary conversion routines, it is necessary to add +a partial overload to the ``pybind11::detail::type_caster`` template. +Although this is an implementation detail, adding partial overloads to this +type is explicitly allowed. + +.. code-block:: cpp + + namespace pybind11 { namespace detail { + template <> struct type_caster { + public: + /** + * This macro establishes the name 'inty' in + * function signatures and declares a local variable + * 'value' of type inty + */ + PYBIND11_TYPE_CASTER(inty, _("inty")); + + /** + * Conversion part 1 (Python->C++): convert a PyObject into a inty + * instance or return false upon failure. The second argument + * indicates whether implicit conversions should be applied. + */ + bool load(handle src, bool) { + /* Extract PyObject from handle */ + PyObject *source = src.ptr(); + /* Try converting into a Python integer value */ + PyObject *tmp = PyNumber_Long(source); + if (!tmp) + return false; + /* Now try to convert into a C++ int */ + value.long_value = PyLong_AsLong(tmp); + Py_DECREF(tmp); + /* Ensure return code was OK (to avoid out-of-range errors etc) */ + return !(value.long_value == -1 && !PyErr_Occurred()); + } + + /** + * Conversion part 2 (C++ -> Python): convert an inty instance into + * a Python object. The second and third arguments are used to + * indicate the return value policy and parent object (for + * ``return_value_policy::reference_internal``) and are generally + * ignored by implicit casters. + */ + static handle cast(inty src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromLong(src.long_value); + } + }; + }} // namespace pybind11::detail + +.. note:: + + A ``type_caster`` defined with ``PYBIND11_TYPE_CASTER(T, ...)`` requires + that ``T`` is default-constructible (``value`` is first default constructed + and then ``load()`` assigns to it). + +.. warning:: + + When using custom type casters, it's important to declare them consistently + in every compilation unit of the Python extension module. Otherwise, + undefined behavior can ensue. diff --git a/diffvg/pybind11/docs/advanced/cast/eigen.rst b/diffvg/pybind11/docs/advanced/cast/eigen.rst new file mode 100644 index 0000000000000000000000000000000000000000..59ba08c3c4297556f6aaa2e0c8db328eb1490e40 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/eigen.rst @@ -0,0 +1,310 @@ +Eigen +##### + +`Eigen `_ is C++ header-based library for dense and +sparse linear algebra. Due to its popularity and widespread adoption, pybind11 +provides transparent conversion and limited mapping support between Eigen and +Scientific Python linear algebra data types. + +To enable the built-in Eigen support you must include the optional header file +:file:`pybind11/eigen.h`. + +Pass-by-value +============= + +When binding a function with ordinary Eigen dense object arguments (for +example, ``Eigen::MatrixXd``), pybind11 will accept any input value that is +already (or convertible to) a ``numpy.ndarray`` with dimensions compatible with +the Eigen type, copy its values into a temporary Eigen variable of the +appropriate type, then call the function with this temporary variable. + +Sparse matrices are similarly copied to or from +``scipy.sparse.csr_matrix``/``scipy.sparse.csc_matrix`` objects. + +Pass-by-reference +================= + +One major limitation of the above is that every data conversion implicitly +involves a copy, which can be both expensive (for large matrices) and disallows +binding functions that change their (Matrix) arguments. Pybind11 allows you to +work around this by using Eigen's ``Eigen::Ref`` class much as you +would when writing a function taking a generic type in Eigen itself (subject to +some limitations discussed below). + +When calling a bound function accepting a ``Eigen::Ref`` +type, pybind11 will attempt to avoid copying by using an ``Eigen::Map`` object +that maps into the source ``numpy.ndarray`` data: this requires both that the +data types are the same (e.g. ``dtype='float64'`` and ``MatrixType::Scalar`` is +``double``); and that the storage is layout compatible. The latter limitation +is discussed in detail in the section below, and requires careful +consideration: by default, numpy matrices and Eigen matrices are *not* storage +compatible. + +If the numpy matrix cannot be used as is (either because its types differ, e.g. +passing an array of integers to an Eigen parameter requiring doubles, or +because the storage is incompatible), pybind11 makes a temporary copy and +passes the copy instead. + +When a bound function parameter is instead ``Eigen::Ref`` (note the +lack of ``const``), pybind11 will only allow the function to be called if it +can be mapped *and* if the numpy array is writeable (that is +``a.flags.writeable`` is true). Any access (including modification) made to +the passed variable will be transparently carried out directly on the +``numpy.ndarray``. + +This means you can can write code such as the following and have it work as +expected: + +.. code-block:: cpp + + void scale_by_2(Eigen::Ref v) { + v *= 2; + } + +Note, however, that you will likely run into limitations due to numpy and +Eigen's difference default storage order for data; see the below section on +:ref:`storage_orders` for details on how to bind code that won't run into such +limitations. + +.. note:: + + Passing by reference is not supported for sparse types. + +Returning values to Python +========================== + +When returning an ordinary dense Eigen matrix type to numpy (e.g. +``Eigen::MatrixXd`` or ``Eigen::RowVectorXf``) pybind11 keeps the matrix and +returns a numpy array that directly references the Eigen matrix: no copy of the +data is performed. The numpy array will have ``array.flags.owndata`` set to +``False`` to indicate that it does not own the data, and the lifetime of the +stored Eigen matrix will be tied to the returned ``array``. + +If you bind a function with a non-reference, ``const`` return type (e.g. +``const Eigen::MatrixXd``), the same thing happens except that pybind11 also +sets the numpy array's ``writeable`` flag to false. + +If you return an lvalue reference or pointer, the usual pybind11 rules apply, +as dictated by the binding function's return value policy (see the +documentation on :ref:`return_value_policies` for full details). That means, +without an explicit return value policy, lvalue references will be copied and +pointers will be managed by pybind11. In order to avoid copying, you should +explicitly specify an appropriate return value policy, as in the following +example: + +.. code-block:: cpp + + class MyClass { + Eigen::MatrixXd big_mat = Eigen::MatrixXd::Zero(10000, 10000); + public: + Eigen::MatrixXd &getMatrix() { return big_mat; } + const Eigen::MatrixXd &viewMatrix() { return big_mat; } + }; + + // Later, in binding code: + py::class_(m, "MyClass") + .def(py::init<>()) + .def("copy_matrix", &MyClass::getMatrix) // Makes a copy! + .def("get_matrix", &MyClass::getMatrix, py::return_value_policy::reference_internal) + .def("view_matrix", &MyClass::viewMatrix, py::return_value_policy::reference_internal) + ; + +.. code-block:: python + + a = MyClass() + m = a.get_matrix() # flags.writeable = True, flags.owndata = False + v = a.view_matrix() # flags.writeable = False, flags.owndata = False + c = a.copy_matrix() # flags.writeable = True, flags.owndata = True + # m[5,6] and v[5,6] refer to the same element, c[5,6] does not. + +Note in this example that ``py::return_value_policy::reference_internal`` is +used to tie the life of the MyClass object to the life of the returned arrays. + +You may also return an ``Eigen::Ref``, ``Eigen::Map`` or other map-like Eigen +object (for example, the return value of ``matrix.block()`` and related +methods) that map into a dense Eigen type. When doing so, the default +behaviour of pybind11 is to simply reference the returned data: you must take +care to ensure that this data remains valid! You may ask pybind11 to +explicitly *copy* such a return value by using the +``py::return_value_policy::copy`` policy when binding the function. You may +also use ``py::return_value_policy::reference_internal`` or a +``py::keep_alive`` to ensure the data stays valid as long as the returned numpy +array does. + +When returning such a reference of map, pybind11 additionally respects the +readonly-status of the returned value, marking the numpy array as non-writeable +if the reference or map was itself read-only. + +.. note:: + + Sparse types are always copied when returned. + +.. _storage_orders: + +Storage orders +============== + +Passing arguments via ``Eigen::Ref`` has some limitations that you must be +aware of in order to effectively pass matrices by reference. First and +foremost is that the default ``Eigen::Ref`` class requires +contiguous storage along columns (for column-major types, the default in Eigen) +or rows if ``MatrixType`` is specifically an ``Eigen::RowMajor`` storage type. +The former, Eigen's default, is incompatible with ``numpy``'s default row-major +storage, and so you will not be able to pass numpy arrays to Eigen by reference +without making one of two changes. + +(Note that this does not apply to vectors (or column or row matrices): for such +types the "row-major" and "column-major" distinction is meaningless). + +The first approach is to change the use of ``Eigen::Ref`` to the +more general ``Eigen::Ref>`` (or similar type with a fully dynamic stride type in the +third template argument). Since this is a rather cumbersome type, pybind11 +provides a ``py::EigenDRef`` type alias for your convenience (along +with EigenDMap for the equivalent Map, and EigenDStride for just the stride +type). + +This type allows Eigen to map into any arbitrary storage order. This is not +the default in Eigen for performance reasons: contiguous storage allows +vectorization that cannot be done when storage is not known to be contiguous at +compile time. The default ``Eigen::Ref`` stride type allows non-contiguous +storage along the outer dimension (that is, the rows of a column-major matrix +or columns of a row-major matrix), but not along the inner dimension. + +This type, however, has the added benefit of also being able to map numpy array +slices. For example, the following (contrived) example uses Eigen with a numpy +slice to multiply by 2 all coefficients that are both on even rows (0, 2, 4, +...) and in columns 2, 5, or 8: + +.. code-block:: cpp + + m.def("scale", [](py::EigenDRef m, double c) { m *= c; }); + +.. code-block:: python + + # a = np.array(...) + scale_by_2(myarray[0::2, 2:9:3]) + +The second approach to avoid copying is more intrusive: rearranging the +underlying data types to not run into the non-contiguous storage problem in the +first place. In particular, that means using matrices with ``Eigen::RowMajor`` +storage, where appropriate, such as: + +.. code-block:: cpp + + using RowMatrixXd = Eigen::Matrix; + // Use RowMatrixXd instead of MatrixXd + +Now bound functions accepting ``Eigen::Ref`` arguments will be +callable with numpy's (default) arrays without involving a copying. + +You can, alternatively, change the storage order that numpy arrays use by +adding the ``order='F'`` option when creating an array: + +.. code-block:: python + + myarray = np.array(source, order='F') + +Such an object will be passable to a bound function accepting an +``Eigen::Ref`` (or similar column-major Eigen type). + +One major caveat with this approach, however, is that it is not entirely as +easy as simply flipping all Eigen or numpy usage from one to the other: some +operations may alter the storage order of a numpy array. For example, ``a2 = +array.transpose()`` results in ``a2`` being a view of ``array`` that references +the same data, but in the opposite storage order! + +While this approach allows fully optimized vectorized calculations in Eigen, it +cannot be used with array slices, unlike the first approach. + +When *returning* a matrix to Python (either a regular matrix, a reference via +``Eigen::Ref<>``, or a map/block into a matrix), no special storage +consideration is required: the created numpy array will have the required +stride that allows numpy to properly interpret the array, whatever its storage +order. + +Failing rather than copying +=========================== + +The default behaviour when binding ``Eigen::Ref`` Eigen +references is to copy matrix values when passed a numpy array that does not +conform to the element type of ``MatrixType`` or does not have a compatible +stride layout. If you want to explicitly avoid copying in such a case, you +should bind arguments using the ``py::arg().noconvert()`` annotation (as +described in the :ref:`nonconverting_arguments` documentation). + +The following example shows an example of arguments that don't allow data +copying to take place: + +.. code-block:: cpp + + // The method and function to be bound: + class MyClass { + // ... + double some_method(const Eigen::Ref &matrix) { /* ... */ } + }; + float some_function(const Eigen::Ref &big, + const Eigen::Ref &small) { + // ... + } + + // The associated binding code: + using namespace pybind11::literals; // for "arg"_a + py::class_(m, "MyClass") + // ... other class definitions + .def("some_method", &MyClass::some_method, py::arg().noconvert()); + + m.def("some_function", &some_function, + "big"_a.noconvert(), // <- Don't allow copying for this arg + "small"_a // <- This one can be copied if needed + ); + +With the above binding code, attempting to call the the ``some_method(m)`` +method on a ``MyClass`` object, or attempting to call ``some_function(m, m2)`` +will raise a ``RuntimeError`` rather than making a temporary copy of the array. +It will, however, allow the ``m2`` argument to be copied into a temporary if +necessary. + +Note that explicitly specifying ``.noconvert()`` is not required for *mutable* +Eigen references (e.g. ``Eigen::Ref`` without ``const`` on the +``MatrixXd``): mutable references will never be called with a temporary copy. + +Vectors versus column/row matrices +================================== + +Eigen and numpy have fundamentally different notions of a vector. In Eigen, a +vector is simply a matrix with the number of columns or rows set to 1 at +compile time (for a column vector or row vector, respectively). Numpy, in +contrast, has comparable 2-dimensional 1xN and Nx1 arrays, but *also* has +1-dimensional arrays of size N. + +When passing a 2-dimensional 1xN or Nx1 array to Eigen, the Eigen type must +have matching dimensions: That is, you cannot pass a 2-dimensional Nx1 numpy +array to an Eigen value expecting a row vector, or a 1xN numpy array as a +column vector argument. + +On the other hand, pybind11 allows you to pass 1-dimensional arrays of length N +as Eigen parameters. If the Eigen type can hold a column vector of length N it +will be passed as such a column vector. If not, but the Eigen type constraints +will accept a row vector, it will be passed as a row vector. (The column +vector takes precedence when both are supported, for example, when passing a +1D numpy array to a MatrixXd argument). Note that the type need not be +explicitly a vector: it is permitted to pass a 1D numpy array of size 5 to an +Eigen ``Matrix``: you would end up with a 1x5 Eigen matrix. +Passing the same to an ``Eigen::MatrixXd`` would result in a 5x1 Eigen matrix. + +When returning an Eigen vector to numpy, the conversion is ambiguous: a row +vector of length 4 could be returned as either a 1D array of length 4, or as a +2D array of size 1x4. When encountering such a situation, pybind11 compromises +by considering the returned Eigen type: if it is a compile-time vector--that +is, the type has either the number of rows or columns set to 1 at compile +time--pybind11 converts to a 1D numpy array when returning the value. For +instances that are a vector only at run-time (e.g. ``MatrixXd``, +``Matrix``), pybind11 returns the vector as a 2D array to +numpy. If this isn't want you want, you can use ``array.reshape(...)`` to get +a view of the same data in the desired dimensions. + +.. seealso:: + + The file :file:`tests/test_eigen.cpp` contains a complete example that + shows how to pass Eigen sparse and dense data types in more detail. diff --git a/diffvg/pybind11/docs/advanced/cast/functional.rst b/diffvg/pybind11/docs/advanced/cast/functional.rst new file mode 100644 index 0000000000000000000000000000000000000000..d9b46057598f0d182422accd088fbff9785b0b53 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/functional.rst @@ -0,0 +1,109 @@ +Functional +########## + +The following features must be enabled by including :file:`pybind11/functional.h`. + + +Callbacks and passing anonymous functions +========================================= + +The C++11 standard brought lambda functions and the generic polymorphic +function wrapper ``std::function<>`` to the C++ programming language, which +enable powerful new ways of working with functions. Lambda functions come in +two flavors: stateless lambda function resemble classic function pointers that +link to an anonymous piece of code, while stateful lambda functions +additionally depend on captured variables that are stored in an anonymous +*lambda closure object*. + +Here is a simple example of a C++ function that takes an arbitrary function +(stateful or stateless) with signature ``int -> int`` as an argument and runs +it with the value 10. + +.. code-block:: cpp + + int func_arg(const std::function &f) { + return f(10); + } + +The example below is more involved: it takes a function of signature ``int -> int`` +and returns another function of the same kind. The return value is a stateful +lambda function, which stores the value ``f`` in the capture object and adds 1 to +its return value upon execution. + +.. code-block:: cpp + + std::function func_ret(const std::function &f) { + return [f](int i) { + return f(i) + 1; + }; + } + +This example demonstrates using python named parameters in C++ callbacks which +requires using ``py::cpp_function`` as a wrapper. Usage is similar to defining +methods of classes: + +.. code-block:: cpp + + py::cpp_function func_cpp() { + return py::cpp_function([](int i) { return i+1; }, + py::arg("number")); + } + +After including the extra header file :file:`pybind11/functional.h`, it is almost +trivial to generate binding code for all of these functions. + +.. code-block:: cpp + + #include + + PYBIND11_MODULE(example, m) { + m.def("func_arg", &func_arg); + m.def("func_ret", &func_ret); + m.def("func_cpp", &func_cpp); + } + +The following interactive session shows how to call them from Python. + +.. code-block:: pycon + + $ python + >>> import example + >>> def square(i): + ... return i * i + ... + >>> example.func_arg(square) + 100L + >>> square_plus_1 = example.func_ret(square) + >>> square_plus_1(4) + 17L + >>> plus_1 = func_cpp() + >>> plus_1(number=43) + 44L + +.. warning:: + + Keep in mind that passing a function from C++ to Python (or vice versa) + will instantiate a piece of wrapper code that translates function + invocations between the two languages. Naturally, this translation + increases the computational cost of each function call somewhat. A + problematic situation can arise when a function is copied back and forth + between Python and C++ many times in a row, in which case the underlying + wrappers will accumulate correspondingly. The resulting long sequence of + C++ -> Python -> C++ -> ... roundtrips can significantly decrease + performance. + + There is one exception: pybind11 detects case where a stateless function + (i.e. a function pointer or a lambda function without captured variables) + is passed as an argument to another C++ function exposed in Python. In this + case, there is no overhead. Pybind11 will extract the underlying C++ + function pointer from the wrapped function to sidestep a potential C++ -> + Python -> C++ roundtrip. This is demonstrated in :file:`tests/test_callbacks.cpp`. + +.. note:: + + This functionality is very useful when generating bindings for callbacks in + C++ libraries (e.g. GUI libraries, asynchronous networking libraries, etc.). + + The file :file:`tests/test_callbacks.cpp` contains a complete example + that demonstrates how to work with callbacks and anonymous functions in + more detail. diff --git a/diffvg/pybind11/docs/advanced/cast/index.rst b/diffvg/pybind11/docs/advanced/cast/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..724585c9202f6fe7cd03daa5cf4df4e1daf0ffe7 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/index.rst @@ -0,0 +1,41 @@ +Type conversions +################ + +Apart from enabling cross-language function calls, a fundamental problem +that a binding tool like pybind11 must address is to provide access to +native Python types in C++ and vice versa. There are three fundamentally +different ways to do thisβ€”which approach is preferable for a particular type +depends on the situation at hand. + +1. Use a native C++ type everywhere. In this case, the type must be wrapped + using pybind11-generated bindings so that Python can interact with it. + +2. Use a native Python type everywhere. It will need to be wrapped so that + C++ functions can interact with it. + +3. Use a native C++ type on the C++ side and a native Python type on the + Python side. pybind11 refers to this as a *type conversion*. + + Type conversions are the most "natural" option in the sense that native + (non-wrapped) types are used everywhere. The main downside is that a copy + of the data must be made on every Python ↔ C++ transition: this is + needed since the C++ and Python versions of the same type generally won't + have the same memory layout. + + pybind11 can perform many kinds of conversions automatically. An overview + is provided in the table ":ref:`conversion_table`". + +The following subsections discuss the differences between these options in more +detail. The main focus in this section is on type conversions, which represent +the last case of the above list. + +.. toctree:: + :maxdepth: 1 + + overview + strings + stl + functional + chrono + eigen + custom diff --git a/diffvg/pybind11/docs/advanced/cast/overview.rst b/diffvg/pybind11/docs/advanced/cast/overview.rst new file mode 100644 index 0000000000000000000000000000000000000000..b0e32a52f9c8bb2945e230416216c38e3c3a4a9b --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/overview.rst @@ -0,0 +1,165 @@ +Overview +######## + +.. rubric:: 1. Native type in C++, wrapper in Python + +Exposing a custom C++ type using :class:`py::class_` was covered in detail +in the :doc:`/classes` section. There, the underlying data structure is +always the original C++ class while the :class:`py::class_` wrapper provides +a Python interface. Internally, when an object like this is sent from C++ to +Python, pybind11 will just add the outer wrapper layer over the native C++ +object. Getting it back from Python is just a matter of peeling off the +wrapper. + +.. rubric:: 2. Wrapper in C++, native type in Python + +This is the exact opposite situation. Now, we have a type which is native to +Python, like a ``tuple`` or a ``list``. One way to get this data into C++ is +with the :class:`py::object` family of wrappers. These are explained in more +detail in the :doc:`/advanced/pycpp/object` section. We'll just give a quick +example here: + +.. code-block:: cpp + + void print_list(py::list my_list) { + for (auto item : my_list) + std::cout << item << " "; + } + +.. code-block:: pycon + + >>> print_list([1, 2, 3]) + 1 2 3 + +The Python ``list`` is not converted in any way -- it's just wrapped in a C++ +:class:`py::list` class. At its core it's still a Python object. Copying a +:class:`py::list` will do the usual reference-counting like in Python. +Returning the object to Python will just remove the thin wrapper. + +.. rubric:: 3. Converting between native C++ and Python types + +In the previous two cases we had a native type in one language and a wrapper in +the other. Now, we have native types on both sides and we convert between them. + +.. code-block:: cpp + + void print_vector(const std::vector &v) { + for (auto item : v) + std::cout << item << "\n"; + } + +.. code-block:: pycon + + >>> print_vector([1, 2, 3]) + 1 2 3 + +In this case, pybind11 will construct a new ``std::vector`` and copy each +element from the Python ``list``. The newly constructed object will be passed +to ``print_vector``. The same thing happens in the other direction: a new +``list`` is made to match the value returned from C++. + +Lots of these conversions are supported out of the box, as shown in the table +below. They are very convenient, but keep in mind that these conversions are +fundamentally based on copying data. This is perfectly fine for small immutable +types but it may become quite expensive for large data structures. This can be +avoided by overriding the automatic conversion with a custom wrapper (i.e. the +above-mentioned approach 1). This requires some manual effort and more details +are available in the :ref:`opaque` section. + +.. _conversion_table: + +List of all builtin conversions +------------------------------- + +The following basic data types are supported out of the box (some may require +an additional extension header to be included). To pass other data structures +as arguments and return values, refer to the section on binding :ref:`classes`. + ++------------------------------------+---------------------------+-------------------------------+ +| Data type | Description | Header file | ++====================================+===========================+===============================+ +| ``int8_t``, ``uint8_t`` | 8-bit integers | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``int16_t``, ``uint16_t`` | 16-bit integers | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``int32_t``, ``uint32_t`` | 32-bit integers | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``int64_t``, ``uint64_t`` | 64-bit integers | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``ssize_t``, ``size_t`` | Platform-dependent size | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``float``, ``double`` | Floating point types | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``bool`` | Two-state Boolean type | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``char`` | Character literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``char16_t`` | UTF-16 character literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``char32_t`` | UTF-32 character literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``wchar_t`` | Wide character literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``const char *`` | UTF-8 string literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``const char16_t *`` | UTF-16 string literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``const char32_t *`` | UTF-32 string literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``const wchar_t *`` | Wide string literal | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::string`` | STL dynamic UTF-8 string | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::u16string`` | STL dynamic UTF-16 string | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::u32string`` | STL dynamic UTF-32 string | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::wstring`` | STL dynamic wide string | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::string_view``, | STL C++17 string views | :file:`pybind11/pybind11.h` | +| ``std::u16string_view``, etc. | | | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::pair`` | Pair of two custom types | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::tuple<...>`` | Arbitrary tuple of types | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::reference_wrapper<...>`` | Reference type wrapper | :file:`pybind11/pybind11.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::complex`` | Complex numbers | :file:`pybind11/complex.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::array`` | STL static array | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::vector`` | STL dynamic array | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::deque`` | STL double-ended queue | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::valarray`` | STL value array | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::list`` | STL linked list | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::map`` | STL ordered map | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::unordered_map`` | STL unordered map | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::set`` | STL ordered set | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::unordered_set`` | STL unordered set | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::optional`` | STL optional type (C++17) | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::experimental::optional`` | STL optional type (exp.) | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::variant<...>`` | Type-safe union (C++17) | :file:`pybind11/stl.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::function<...>`` | STL polymorphic function | :file:`pybind11/functional.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::chrono::duration<...>`` | STL time duration | :file:`pybind11/chrono.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``std::chrono::time_point<...>`` | STL date/time | :file:`pybind11/chrono.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``Eigen::Matrix<...>`` | Eigen: dense matrix | :file:`pybind11/eigen.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``Eigen::Map<...>`` | Eigen: mapped memory | :file:`pybind11/eigen.h` | ++------------------------------------+---------------------------+-------------------------------+ +| ``Eigen::SparseMatrix<...>`` | Eigen: sparse matrix | :file:`pybind11/eigen.h` | ++------------------------------------+---------------------------+-------------------------------+ diff --git a/diffvg/pybind11/docs/advanced/cast/stl.rst b/diffvg/pybind11/docs/advanced/cast/stl.rst new file mode 100644 index 0000000000000000000000000000000000000000..e48409f025d021b35e4e26f4fee754b2d858daa4 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/stl.rst @@ -0,0 +1,240 @@ +STL containers +############## + +Automatic conversion +==================== + +When including the additional header file :file:`pybind11/stl.h`, conversions +between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``, +``std::set<>``/``std::unordered_set<>``, and +``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and +``dict`` data structures are automatically enabled. The types ``std::pair<>`` +and ``std::tuple<>`` are already supported out of the box with just the core +:file:`pybind11/pybind11.h` header. + +The major downside of these implicit conversions is that containers must be +converted (i.e. copied) on every Python->C++ and C++->Python transition, which +can have implications on the program semantics and performance. Please read the +next sections for more details and alternative approaches that avoid this. + +.. note:: + + Arbitrary nesting of any of these types is possible. + +.. seealso:: + + The file :file:`tests/test_stl.cpp` contains a complete + example that demonstrates how to pass STL data types in more detail. + +.. _cpp17_container_casters: + +C++17 library containers +======================== + +The :file:`pybind11/stl.h` header also includes support for ``std::optional<>`` +and ``std::variant<>``. These require a C++17 compiler and standard library. +In C++14 mode, ``std::experimental::optional<>`` is supported if available. + +Various versions of these containers also exist for C++11 (e.g. in Boost). +pybind11 provides an easy way to specialize the ``type_caster`` for such +types: + +.. code-block:: cpp + + // `boost::optional` as an example -- can be any `std::optional`-like container + namespace pybind11 { namespace detail { + template + struct type_caster> : optional_caster> {}; + }} + +The above should be placed in a header file and included in all translation units +where automatic conversion is needed. Similarly, a specialization can be provided +for custom variant types: + +.. code-block:: cpp + + // `boost::variant` as an example -- can be any `std::variant`-like container + namespace pybind11 { namespace detail { + template + struct type_caster> : variant_caster> {}; + + // Specifies the function used to visit the variant -- `apply_visitor` instead of `visit` + template <> + struct visit_helper { + template + static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) { + return boost::apply_visitor(args...); + } + }; + }} // namespace pybind11::detail + +The ``visit_helper`` specialization is not required if your ``name::variant`` provides +a ``name::visit()`` function. For any other function name, the specialization must be +included to tell pybind11 how to visit the variant. + +.. note:: + + pybind11 only supports the modern implementation of ``boost::variant`` + which makes use of variadic templates. This requires Boost 1.56 or newer. + Additionally, on Windows, MSVC 2017 is required because ``boost::variant`` + falls back to the old non-variadic implementation on MSVC 2015. + +.. _opaque: + +Making opaque types +=================== + +pybind11 heavily relies on a template matching mechanism to convert parameters +and return values that are constructed from STL data types such as vectors, +linked lists, hash tables, etc. This even works in a recursive manner, for +instance to deal with lists of hash maps of pairs of elementary and custom +types, etc. + +However, a fundamental limitation of this approach is that internal conversions +between Python and C++ types involve a copy operation that prevents +pass-by-reference semantics. What does this mean? + +Suppose we bind the following function + +.. code-block:: cpp + + void append_1(std::vector &v) { + v.push_back(1); + } + +and call it from Python, the following happens: + +.. code-block:: pycon + + >>> v = [5, 6] + >>> append_1(v) + >>> print(v) + [5, 6] + +As you can see, when passing STL data structures by reference, modifications +are not propagated back the Python side. A similar situation arises when +exposing STL data structures using the ``def_readwrite`` or ``def_readonly`` +functions: + +.. code-block:: cpp + + /* ... definition ... */ + + class MyClass { + std::vector contents; + }; + + /* ... binding code ... */ + + py::class_(m, "MyClass") + .def(py::init<>()) + .def_readwrite("contents", &MyClass::contents); + +In this case, properties can be read and written in their entirety. However, an +``append`` operation involving such a list type has no effect: + +.. code-block:: pycon + + >>> m = MyClass() + >>> m.contents = [5, 6] + >>> print(m.contents) + [5, 6] + >>> m.contents.append(7) + >>> print(m.contents) + [5, 6] + +Finally, the involved copy operations can be costly when dealing with very +large lists. To deal with all of the above situations, pybind11 provides a +macro named ``PYBIND11_MAKE_OPAQUE(T)`` that disables the template-based +conversion machinery of types, thus rendering them *opaque*. The contents of +opaque objects are never inspected or extracted, hence they *can* be passed by +reference. For instance, to turn ``std::vector`` into an opaque type, add +the declaration + +.. code-block:: cpp + + PYBIND11_MAKE_OPAQUE(std::vector); + +before any binding code (e.g. invocations to ``class_::def()``, etc.). This +macro must be specified at the top level (and outside of any namespaces), since +it instantiates a partial template overload. If your binding code consists of +multiple compilation units, it must be present in every file (typically via a +common header) preceding any usage of ``std::vector``. Opaque types must +also have a corresponding ``class_`` declaration to associate them with a name +in Python, and to define a set of available operations, e.g.: + +.. code-block:: cpp + + py::class_>(m, "IntVector") + .def(py::init<>()) + .def("clear", &std::vector::clear) + .def("pop_back", &std::vector::pop_back) + .def("__len__", [](const std::vector &v) { return v.size(); }) + .def("__iter__", [](std::vector &v) { + return py::make_iterator(v.begin(), v.end()); + }, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */ + // .... + +.. seealso:: + + The file :file:`tests/test_opaque_types.cpp` contains a complete + example that demonstrates how to create and expose opaque types using + pybind11 in more detail. + +.. _stl_bind: + +Binding STL containers +====================== + +The ability to expose STL containers as native Python objects is a fairly +common request, hence pybind11 also provides an optional header file named +:file:`pybind11/stl_bind.h` that does exactly this. The mapped containers try +to match the behavior of their native Python counterparts as much as possible. + +The following example showcases usage of :file:`pybind11/stl_bind.h`: + +.. code-block:: cpp + + // Don't forget this + #include + + PYBIND11_MAKE_OPAQUE(std::vector); + PYBIND11_MAKE_OPAQUE(std::map); + + // ... + + // later in binding code: + py::bind_vector>(m, "VectorInt"); + py::bind_map>(m, "MapStringDouble"); + +When binding STL containers pybind11 considers the types of the container's +elements to decide whether the container should be confined to the local module +(via the :ref:`module_local` feature). If the container element types are +anything other than already-bound custom types bound without +``py::module_local()`` the container binding will have ``py::module_local()`` +applied. This includes converting types such as numeric types, strings, Eigen +types; and types that have not yet been bound at the time of the stl container +binding. This module-local binding is designed to avoid potential conflicts +between module bindings (for example, from two separate modules each attempting +to bind ``std::vector`` as a python type). + +It is possible to override this behavior to force a definition to be either +module-local or global. To do so, you can pass the attributes +``py::module_local()`` (to make the binding module-local) or +``py::module_local(false)`` (to make the binding global) into the +``py::bind_vector`` or ``py::bind_map`` arguments: + +.. code-block:: cpp + + py::bind_vector>(m, "VectorInt", py::module_local(false)); + +Note, however, that such a global binding would make it impossible to load this +module at the same time as any other pybind module that also attempts to bind +the same container type (``std::vector`` in the above example). + +See :ref:`module_local` for more details on module-local bindings. + +.. seealso:: + + The file :file:`tests/test_stl_binders.cpp` shows how to use the + convenience STL container wrappers. diff --git a/diffvg/pybind11/docs/advanced/cast/strings.rst b/diffvg/pybind11/docs/advanced/cast/strings.rst new file mode 100644 index 0000000000000000000000000000000000000000..e25701ecabd80142f4fd705f5419ef7c10cc6c56 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/cast/strings.rst @@ -0,0 +1,305 @@ +Strings, bytes and Unicode conversions +###################################### + +.. note:: + + This section discusses string handling in terms of Python 3 strings. For + Python 2.7, replace all occurrences of ``str`` with ``unicode`` and + ``bytes`` with ``str``. Python 2.7 users may find it best to use ``from + __future__ import unicode_literals`` to avoid unintentionally using ``str`` + instead of ``unicode``. + +Passing Python strings to C++ +============================= + +When a Python ``str`` is passed from Python to a C++ function that accepts +``std::string`` or ``char *`` as arguments, pybind11 will encode the Python +string to UTF-8. All Python ``str`` can be encoded in UTF-8, so this operation +does not fail. + +The C++ language is encoding agnostic. It is the responsibility of the +programmer to track encodings. It's often easiest to simply `use UTF-8 +everywhere `_. + +.. code-block:: c++ + + m.def("utf8_test", + [](const std::string &s) { + cout << "utf-8 is icing on the cake.\n"; + cout << s; + } + ); + m.def("utf8_charptr", + [](const char *s) { + cout << "My favorite food is\n"; + cout << s; + } + ); + +.. code-block:: python + + >>> utf8_test('πŸŽ‚') + utf-8 is icing on the cake. + πŸŽ‚ + + >>> utf8_charptr('πŸ•') + My favorite food is + πŸ• + +.. note:: + + Some terminal emulators do not support UTF-8 or emoji fonts and may not + display the example above correctly. + +The results are the same whether the C++ function accepts arguments by value or +reference, and whether or not ``const`` is used. + +Passing bytes to C++ +-------------------- + +A Python ``bytes`` object will be passed to C++ functions that accept +``std::string`` or ``char*`` *without* conversion. On Python 3, in order to +make a function *only* accept ``bytes`` (and not ``str``), declare it as taking +a ``py::bytes`` argument. + + +Returning C++ strings to Python +=============================== + +When a C++ function returns a ``std::string`` or ``char*`` to a Python caller, +**pybind11 will assume that the string is valid UTF-8** and will decode it to a +native Python ``str``, using the same API as Python uses to perform +``bytes.decode('utf-8')``. If this implicit conversion fails, pybind11 will +raise a ``UnicodeDecodeError``. + +.. code-block:: c++ + + m.def("std_string_return", + []() { + return std::string("This string needs to be UTF-8 encoded"); + } + ); + +.. code-block:: python + + >>> isinstance(example.std_string_return(), str) + True + + +Because UTF-8 is inclusive of pure ASCII, there is never any issue with +returning a pure ASCII string to Python. If there is any possibility that the +string is not pure ASCII, it is necessary to ensure the encoding is valid +UTF-8. + +.. warning:: + + Implicit conversion assumes that a returned ``char *`` is null-terminated. + If there is no null terminator a buffer overrun will occur. + +Explicit conversions +-------------------- + +If some C++ code constructs a ``std::string`` that is not a UTF-8 string, one +can perform a explicit conversion and return a ``py::str`` object. Explicit +conversion has the same overhead as implicit conversion. + +.. code-block:: c++ + + // This uses the Python C API to convert Latin-1 to Unicode + m.def("str_output", + []() { + std::string s = "Send your r\xe9sum\xe9 to Alice in HR"; // Latin-1 + py::str py_s = PyUnicode_DecodeLatin1(s.data(), s.length()); + return py_s; + } + ); + +.. code-block:: python + + >>> str_output() + 'Send your rΓ©sumΓ© to Alice in HR' + +The `Python C API +`_ provides +several built-in codecs. + + +One could also use a third party encoding library such as libiconv to transcode +to UTF-8. + +Return C++ strings without conversion +------------------------------------- + +If the data in a C++ ``std::string`` does not represent text and should be +returned to Python as ``bytes``, then one can return the data as a +``py::bytes`` object. + +.. code-block:: c++ + + m.def("return_bytes", + []() { + std::string s("\xba\xd0\xba\xd0"); // Not valid UTF-8 + return py::bytes(s); // Return the data without transcoding + } + ); + +.. code-block:: python + + >>> example.return_bytes() + b'\xba\xd0\xba\xd0' + + +Note the asymmetry: pybind11 will convert ``bytes`` to ``std::string`` without +encoding, but cannot convert ``std::string`` back to ``bytes`` implicitly. + +.. code-block:: c++ + + m.def("asymmetry", + [](std::string s) { // Accepts str or bytes from Python + return s; // Looks harmless, but implicitly converts to str + } + ); + +.. code-block:: python + + >>> isinstance(example.asymmetry(b"have some bytes"), str) + True + + >>> example.asymmetry(b"\xba\xd0\xba\xd0") # invalid utf-8 as bytes + UnicodeDecodeError: 'utf-8' codec can't decode byte 0xba in position 0: invalid start byte + + +Wide character strings +====================== + +When a Python ``str`` is passed to a C++ function expecting ``std::wstring``, +``wchar_t*``, ``std::u16string`` or ``std::u32string``, the ``str`` will be +encoded to UTF-16 or UTF-32 depending on how the C++ compiler implements each +type, in the platform's native endianness. When strings of these types are +returned, they are assumed to contain valid UTF-16 or UTF-32, and will be +decoded to Python ``str``. + +.. code-block:: c++ + + #define UNICODE + #include + + m.def("set_window_text", + [](HWND hwnd, std::wstring s) { + // Call SetWindowText with null-terminated UTF-16 string + ::SetWindowText(hwnd, s.c_str()); + } + ); + m.def("get_window_text", + [](HWND hwnd) { + const int buffer_size = ::GetWindowTextLength(hwnd) + 1; + auto buffer = std::make_unique< wchar_t[] >(buffer_size); + + ::GetWindowText(hwnd, buffer.data(), buffer_size); + + std::wstring text(buffer.get()); + + // wstring will be converted to Python str + return text; + } + ); + +.. warning:: + + Wide character strings may not work as described on Python 2.7 or Python + 3.3 compiled with ``--enable-unicode=ucs2``. + +Strings in multibyte encodings such as Shift-JIS must transcoded to a +UTF-8/16/32 before being returned to Python. + + +Character literals +================== + +C++ functions that accept character literals as input will receive the first +character of a Python ``str`` as their input. If the string is longer than one +Unicode character, trailing characters will be ignored. + +When a character literal is returned from C++ (such as a ``char`` or a +``wchar_t``), it will be converted to a ``str`` that represents the single +character. + +.. code-block:: c++ + + m.def("pass_char", [](char c) { return c; }); + m.def("pass_wchar", [](wchar_t w) { return w; }); + +.. code-block:: python + + >>> example.pass_char('A') + 'A' + +While C++ will cast integers to character types (``char c = 0x65;``), pybind11 +does not convert Python integers to characters implicitly. The Python function +``chr()`` can be used to convert integers to characters. + +.. code-block:: python + + >>> example.pass_char(0x65) + TypeError + + >>> example.pass_char(chr(0x65)) + 'A' + +If the desire is to work with an 8-bit integer, use ``int8_t`` or ``uint8_t`` +as the argument type. + +Grapheme clusters +----------------- + +A single grapheme may be represented by two or more Unicode characters. For +example 'Γ©' is usually represented as U+00E9 but can also be expressed as the +combining character sequence U+0065 U+0301 (that is, the letter 'e' followed by +a combining acute accent). The combining character will be lost if the +two-character sequence is passed as an argument, even though it renders as a +single grapheme. + +.. code-block:: python + + >>> example.pass_wchar('Γ©') + 'Γ©' + + >>> combining_e_acute = 'e' + '\u0301' + + >>> combining_e_acute + 'é' + + >>> combining_e_acute == 'Γ©' + False + + >>> example.pass_wchar(combining_e_acute) + 'e' + +Normalizing combining characters before passing the character literal to C++ +may resolve *some* of these issues: + +.. code-block:: python + + >>> example.pass_wchar(unicodedata.normalize('NFC', combining_e_acute)) + 'Γ©' + +In some languages (Thai for example), there are `graphemes that cannot be +expressed as a single Unicode code point +`_, so there is +no way to capture them in a C++ character type. + + +C++17 string views +================== + +C++17 string views are automatically supported when compiling in C++17 mode. +They follow the same rules for encoding and decoding as the corresponding STL +string type (for example, a ``std::u16string_view`` argument will be passed +UTF-16-encoded data, and a returned ``std::string_view`` will be decoded as +UTF-8). + +References +========== + +* `The Absolute Minimum Every Software Developer Absolutely, Positively Must Know About Unicode and Character Sets (No Excuses!) `_ +* `C++ - Using STL Strings at Win32 API Boundaries `_ diff --git a/diffvg/pybind11/docs/advanced/classes.rst b/diffvg/pybind11/docs/advanced/classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..f4efc68f8b4b5d8ed70527de064560395fe4ed00 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/classes.rst @@ -0,0 +1,1234 @@ +Classes +####### + +This section presents advanced binding code for classes and it is assumed +that you are already familiar with the basics from :doc:`/classes`. + +.. _overriding_virtuals: + +Overriding virtual functions in Python +====================================== + +Suppose that a C++ class or interface has a virtual function that we'd like to +to override from within Python (we'll focus on the class ``Animal``; ``Dog`` is +given as a specific example of how one would do this with traditional C++ +code). + +.. code-block:: cpp + + class Animal { + public: + virtual ~Animal() { } + virtual std::string go(int n_times) = 0; + }; + + class Dog : public Animal { + public: + std::string go(int n_times) override { + std::string result; + for (int i=0; igo(3); + } + +Normally, the binding code for these classes would look as follows: + +.. code-block:: cpp + + PYBIND11_MODULE(example, m) { + py::class_(m, "Animal") + .def("go", &Animal::go); + + py::class_(m, "Dog") + .def(py::init<>()); + + m.def("call_go", &call_go); + } + +However, these bindings are impossible to extend: ``Animal`` is not +constructible, and we clearly require some kind of "trampoline" that +redirects virtual calls back to Python. + +Defining a new type of ``Animal`` from within Python is possible but requires a +helper class that is defined as follows: + +.. code-block:: cpp + + class PyAnimal : public Animal { + public: + /* Inherit the constructors */ + using Animal::Animal; + + /* Trampoline (need one for each virtual function) */ + std::string go(int n_times) override { + PYBIND11_OVERLOAD_PURE( + std::string, /* Return type */ + Animal, /* Parent class */ + go, /* Name of function in C++ (must match Python name) */ + n_times /* Argument(s) */ + ); + } + }; + +The macro :c:macro:`PYBIND11_OVERLOAD_PURE` should be used for pure virtual +functions, and :c:macro:`PYBIND11_OVERLOAD` should be used for functions which have +a default implementation. There are also two alternate macros +:c:macro:`PYBIND11_OVERLOAD_PURE_NAME` and :c:macro:`PYBIND11_OVERLOAD_NAME` which +take a string-valued name argument between the *Parent class* and *Name of the +function* slots, which defines the name of function in Python. This is required +when the C++ and Python versions of the +function have different names, e.g. ``operator()`` vs ``__call__``. + +The binding code also needs a few minor adaptations (highlighted): + +.. code-block:: cpp + :emphasize-lines: 2,3 + + PYBIND11_MODULE(example, m) { + py::class_(m, "Animal") + .def(py::init<>()) + .def("go", &Animal::go); + + py::class_(m, "Dog") + .def(py::init<>()); + + m.def("call_go", &call_go); + } + +Importantly, pybind11 is made aware of the trampoline helper class by +specifying it as an extra template argument to :class:`class_`. (This can also +be combined with other template arguments such as a custom holder type; the +order of template types does not matter). Following this, we are able to +define a constructor as usual. + +Bindings should be made against the actual class, not the trampoline helper class. + +.. code-block:: cpp + :emphasize-lines: 3 + + py::class_(m, "Animal"); + .def(py::init<>()) + .def("go", &PyAnimal::go); /* <--- THIS IS WRONG, use &Animal::go */ + +Note, however, that the above is sufficient for allowing python classes to +extend ``Animal``, but not ``Dog``: see :ref:`virtual_and_inheritance` for the +necessary steps required to providing proper overload support for inherited +classes. + +The Python session below shows how to override ``Animal::go`` and invoke it via +a virtual method call. + +.. code-block:: pycon + + >>> from example import * + >>> d = Dog() + >>> call_go(d) + u'woof! woof! woof! ' + >>> class Cat(Animal): + ... def go(self, n_times): + ... return "meow! " * n_times + ... + >>> c = Cat() + >>> call_go(c) + u'meow! meow! meow! ' + +If you are defining a custom constructor in a derived Python class, you *must* +ensure that you explicitly call the bound C++ constructor using ``__init__``, +*regardless* of whether it is a default constructor or not. Otherwise, the +memory for the C++ portion of the instance will be left uninitialized, which +will generally leave the C++ instance in an invalid state and cause undefined +behavior if the C++ instance is subsequently used. + +.. versionchanged:: 2.6 + The default pybind11 metaclass will throw a ``TypeError`` when it detects + that ``__init__`` was not called by a derived class. + +Here is an example: + +.. code-block:: python + + class Dachshund(Dog): + def __init__(self, name): + Dog.__init__(self) # Without this, a TypeError is raised. + self.name = name + def bark(self): + return "yap!" + +Note that a direct ``__init__`` constructor *should be called*, and ``super()`` +should not be used. For simple cases of linear inheritance, ``super()`` +may work, but once you begin mixing Python and C++ multiple inheritance, +things will fall apart due to differences between Python's MRO and C++'s +mechanisms. + +Please take a look at the :ref:`macro_notes` before using this feature. + +.. note:: + + When the overridden type returns a reference or pointer to a type that + pybind11 converts from Python (for example, numeric values, std::string, + and other built-in value-converting types), there are some limitations to + be aware of: + + - because in these cases there is no C++ variable to reference (the value + is stored in the referenced Python variable), pybind11 provides one in + the PYBIND11_OVERLOAD macros (when needed) with static storage duration. + Note that this means that invoking the overloaded method on *any* + instance will change the referenced value stored in *all* instances of + that type. + + - Attempts to modify a non-const reference will not have the desired + effect: it will change only the static cache variable, but this change + will not propagate to underlying Python instance, and the change will be + replaced the next time the overload is invoked. + +.. seealso:: + + The file :file:`tests/test_virtual_functions.cpp` contains a complete + example that demonstrates how to override virtual functions using pybind11 + in more detail. + +.. _virtual_and_inheritance: + +Combining virtual functions and inheritance +=========================================== + +When combining virtual methods with inheritance, you need to be sure to provide +an override for each method for which you want to allow overrides from derived +python classes. For example, suppose we extend the above ``Animal``/``Dog`` +example as follows: + +.. code-block:: cpp + + class Animal { + public: + virtual std::string go(int n_times) = 0; + virtual std::string name() { return "unknown"; } + }; + class Dog : public Animal { + public: + std::string go(int n_times) override { + std::string result; + for (int i=0; i class PyAnimal : public AnimalBase { + public: + using AnimalBase::AnimalBase; // Inherit constructors + std::string go(int n_times) override { PYBIND11_OVERLOAD_PURE(std::string, AnimalBase, go, n_times); } + std::string name() override { PYBIND11_OVERLOAD(std::string, AnimalBase, name, ); } + }; + template class PyDog : public PyAnimal { + public: + using PyAnimal::PyAnimal; // Inherit constructors + // Override PyAnimal's pure virtual go() with a non-pure one: + std::string go(int n_times) override { PYBIND11_OVERLOAD(std::string, DogBase, go, n_times); } + std::string bark() override { PYBIND11_OVERLOAD(std::string, DogBase, bark, ); } + }; + +This technique has the advantage of requiring just one trampoline method to be +declared per virtual method and pure virtual method override. It does, +however, require the compiler to generate at least as many methods (and +possibly more, if both pure virtual and overridden pure virtual methods are +exposed, as above). + +The classes are then registered with pybind11 using: + +.. code-block:: cpp + + py::class_> animal(m, "Animal"); + py::class_> dog(m, "Dog"); + py::class_> husky(m, "Husky"); + // ... add animal, dog, husky definitions + +Note that ``Husky`` did not require a dedicated trampoline template class at +all, since it neither declares any new virtual methods nor provides any pure +virtual method implementations. + +With either the repeated-virtuals or templated trampoline methods in place, you +can now create a python class that inherits from ``Dog``: + +.. code-block:: python + + class ShihTzu(Dog): + def bark(self): + return "yip!" + +.. seealso:: + + See the file :file:`tests/test_virtual_functions.cpp` for complete examples + using both the duplication and templated trampoline approaches. + +.. _extended_aliases: + +Extended trampoline class functionality +======================================= + +.. _extended_class_functionality_forced_trampoline: + +Forced trampoline class initialisation +-------------------------------------- +The trampoline classes described in the previous sections are, by default, only +initialized when needed. More specifically, they are initialized when a python +class actually inherits from a registered type (instead of merely creating an +instance of the registered type), or when a registered constructor is only +valid for the trampoline class but not the registered class. This is primarily +for performance reasons: when the trampoline class is not needed for anything +except virtual method dispatching, not initializing the trampoline class +improves performance by avoiding needing to do a run-time check to see if the +inheriting python instance has an overloaded method. + +Sometimes, however, it is useful to always initialize a trampoline class as an +intermediate class that does more than just handle virtual method dispatching. +For example, such a class might perform extra class initialization, extra +destruction operations, and might define new members and methods to enable a +more python-like interface to a class. + +In order to tell pybind11 that it should *always* initialize the trampoline +class when creating new instances of a type, the class constructors should be +declared using ``py::init_alias()`` instead of the usual +``py::init()``. This forces construction via the trampoline class, +ensuring member initialization and (eventual) destruction. + +.. seealso:: + + See the file :file:`tests/test_virtual_functions.cpp` for complete examples + showing both normal and forced trampoline instantiation. + +Different method signatures +--------------------------- +The macro's introduced in :ref:`overriding_virtuals` cover most of the standard +use cases when exposing C++ classes to Python. Sometimes it is hard or unwieldy +to create a direct one-on-one mapping between the arguments and method return +type. + +An example would be when the C++ signature contains output arguments using +references (See also :ref:`faq_reference_arguments`). Another way of solving +this is to use the method body of the trampoline class to do conversions to the +input and return of the Python method. + +The main building block to do so is the :func:`get_overload`, this function +allows retrieving a method implemented in Python from within the trampoline's +methods. Consider for example a C++ method which has the signature +``bool myMethod(int32_t& value)``, where the return indicates whether +something should be done with the ``value``. This can be made convenient on the +Python side by allowing the Python function to return ``None`` or an ``int``: + +.. code-block:: cpp + + bool MyClass::myMethod(int32_t& value) + { + pybind11::gil_scoped_acquire gil; // Acquire the GIL while in this scope. + // Try to look up the overloaded method on the Python side. + pybind11::function overload = pybind11::get_overload(this, "myMethod"); + if (overload) { // method is found + auto obj = overload(value); // Call the Python function. + if (py::isinstance(obj)) { // check if it returned a Python integer type + value = obj.cast(); // Cast it and assign it to the value. + return true; // Return true; value should be used. + } else { + return false; // Python returned none, return false. + } + } + return false; // Alternatively return MyClass::myMethod(value); + } + + +.. _custom_constructors: + +Custom constructors +=================== + +The syntax for binding constructors was previously introduced, but it only +works when a constructor of the appropriate arguments actually exists on the +C++ side. To extend this to more general cases, pybind11 makes it possible +to bind factory functions as constructors. For example, suppose you have a +class like this: + +.. code-block:: cpp + + class Example { + private: + Example(int); // private constructor + public: + // Factory function: + static Example create(int a) { return Example(a); } + }; + + py::class_(m, "Example") + .def(py::init(&Example::create)); + +While it is possible to create a straightforward binding of the static +``create`` method, it may sometimes be preferable to expose it as a constructor +on the Python side. This can be accomplished by calling ``.def(py::init(...))`` +with the function reference returning the new instance passed as an argument. +It is also possible to use this approach to bind a function returning a new +instance by raw pointer or by the holder (e.g. ``std::unique_ptr``). + +The following example shows the different approaches: + +.. code-block:: cpp + + class Example { + private: + Example(int); // private constructor + public: + // Factory function - returned by value: + static Example create(int a) { return Example(a); } + + // These constructors are publicly callable: + Example(double); + Example(int, int); + Example(std::string); + }; + + py::class_(m, "Example") + // Bind the factory function as a constructor: + .def(py::init(&Example::create)) + // Bind a lambda function returning a pointer wrapped in a holder: + .def(py::init([](std::string arg) { + return std::unique_ptr(new Example(arg)); + })) + // Return a raw pointer: + .def(py::init([](int a, int b) { return new Example(a, b); })) + // You can mix the above with regular C++ constructor bindings as well: + .def(py::init()) + ; + +When the constructor is invoked from Python, pybind11 will call the factory +function and store the resulting C++ instance in the Python instance. + +When combining factory functions constructors with :ref:`virtual function +trampolines ` there are two approaches. The first is to +add a constructor to the alias class that takes a base value by +rvalue-reference. If such a constructor is available, it will be used to +construct an alias instance from the value returned by the factory function. +The second option is to provide two factory functions to ``py::init()``: the +first will be invoked when no alias class is required (i.e. when the class is +being used but not inherited from in Python), and the second will be invoked +when an alias is required. + +You can also specify a single factory function that always returns an alias +instance: this will result in behaviour similar to ``py::init_alias<...>()``, +as described in the :ref:`extended trampoline class documentation +`. + +The following example shows the different factory approaches for a class with +an alias: + +.. code-block:: cpp + + #include + class Example { + public: + // ... + virtual ~Example() = default; + }; + class PyExample : public Example { + public: + using Example::Example; + PyExample(Example &&base) : Example(std::move(base)) {} + }; + py::class_(m, "Example") + // Returns an Example pointer. If a PyExample is needed, the Example + // instance will be moved via the extra constructor in PyExample, above. + .def(py::init([]() { return new Example(); })) + // Two callbacks: + .def(py::init([]() { return new Example(); } /* no alias needed */, + []() { return new PyExample(); } /* alias needed */)) + // *Always* returns an alias instance (like py::init_alias<>()) + .def(py::init([]() { return new PyExample(); })) + ; + +Brace initialization +-------------------- + +``pybind11::init<>`` internally uses C++11 brace initialization to call the +constructor of the target class. This means that it can be used to bind +*implicit* constructors as well: + +.. code-block:: cpp + + struct Aggregate { + int a; + std::string b; + }; + + py::class_(m, "Aggregate") + .def(py::init()); + +.. note:: + + Note that brace initialization preferentially invokes constructor overloads + taking a ``std::initializer_list``. In the rare event that this causes an + issue, you can work around it by using ``py::init(...)`` with a lambda + function that constructs the new object as desired. + +.. _classes_with_non_public_destructors: + +Non-public destructors +====================== + +If a class has a private or protected destructor (as might e.g. be the case in +a singleton pattern), a compile error will occur when creating bindings via +pybind11. The underlying issue is that the ``std::unique_ptr`` holder type that +is responsible for managing the lifetime of instances will reference the +destructor even if no deallocations ever take place. In order to expose classes +with private or protected destructors, it is possible to override the holder +type via a holder type argument to ``class_``. Pybind11 provides a helper class +``py::nodelete`` that disables any destructor invocations. In this case, it is +crucial that instances are deallocated on the C++ side to avoid memory leaks. + +.. code-block:: cpp + + /* ... definition ... */ + + class MyClass { + private: + ~MyClass() { } + }; + + /* ... binding code ... */ + + py::class_>(m, "MyClass") + .def(py::init<>()) + +.. _destructors_that_call_python: + +Destructors that call Python +============================ + +If a Python function is invoked from a C++ destructor, an exception may be thrown +of type :class:`error_already_set`. If this error is thrown out of a class destructor, +``std::terminate()`` will be called, terminating the process. Class destructors +must catch all exceptions of type :class:`error_already_set` to discard the Python +exception using :func:`error_already_set::discard_as_unraisable`. + +Every Python function should be treated as *possibly throwing*. When a Python generator +stops yielding items, Python will throw a ``StopIteration`` exception, which can pass +though C++ destructors if the generator's stack frame holds the last reference to C++ +objects. + +For more information, see :ref:`the documentation on exceptions `. + +.. code-block:: cpp + + class MyClass { + public: + ~MyClass() { + try { + py::print("Even printing is dangerous in a destructor"); + py::exec("raise ValueError('This is an unraisable exception')"); + } catch (py::error_already_set &e) { + // error_context should be information about where/why the occurred, + // e.g. use __func__ to get the name of the current function + e.discard_as_unraisable(__func__); + } + } + }; + +.. note:: + + pybind11 does not support C++ destructors marked ``noexcept(false)``. + +.. versionadded:: 2.6 + +.. _implicit_conversions: + +Implicit conversions +==================== + +Suppose that instances of two types ``A`` and ``B`` are used in a project, and +that an ``A`` can easily be converted into an instance of type ``B`` (examples of this +could be a fixed and an arbitrary precision number type). + +.. code-block:: cpp + + py::class_(m, "A") + /// ... members ... + + py::class_(m, "B") + .def(py::init()) + /// ... members ... + + m.def("func", + [](const B &) { /* .... */ } + ); + +To invoke the function ``func`` using a variable ``a`` containing an ``A`` +instance, we'd have to write ``func(B(a))`` in Python. On the other hand, C++ +will automatically apply an implicit type conversion, which makes it possible +to directly write ``func(a)``. + +In this situation (i.e. where ``B`` has a constructor that converts from +``A``), the following statement enables similar implicit conversions on the +Python side: + +.. code-block:: cpp + + py::implicitly_convertible(); + +.. note:: + + Implicit conversions from ``A`` to ``B`` only work when ``B`` is a custom + data type that is exposed to Python via pybind11. + + To prevent runaway recursion, implicit conversions are non-reentrant: an + implicit conversion invoked as part of another implicit conversion of the + same type (i.e. from ``A`` to ``B``) will fail. + +.. _static_properties: + +Static properties +================= + +The section on :ref:`properties` discussed the creation of instance properties +that are implemented in terms of C++ getters and setters. + +Static properties can also be created in a similar way to expose getters and +setters of static class attributes. Note that the implicit ``self`` argument +also exists in this case and is used to pass the Python ``type`` subclass +instance. This parameter will often not be needed by the C++ side, and the +following example illustrates how to instantiate a lambda getter function +that ignores it: + +.. code-block:: cpp + + py::class_(m, "Foo") + .def_property_readonly_static("foo", [](py::object /* self */) { return Foo(); }); + +Operator overloading +==================== + +Suppose that we're given the following ``Vector2`` class with a vector addition +and scalar multiplication operation, all implemented using overloaded operators +in C++. + +.. code-block:: cpp + + class Vector2 { + public: + Vector2(float x, float y) : x(x), y(y) { } + + Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); } + Vector2 operator*(float value) const { return Vector2(x * value, y * value); } + Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; } + Vector2& operator*=(float v) { x *= v; y *= v; return *this; } + + friend Vector2 operator*(float f, const Vector2 &v) { + return Vector2(f * v.x, f * v.y); + } + + std::string toString() const { + return "[" + std::to_string(x) + ", " + std::to_string(y) + "]"; + } + private: + float x, y; + }; + +The following snippet shows how the above operators can be conveniently exposed +to Python. + +.. code-block:: cpp + + #include + + PYBIND11_MODULE(example, m) { + py::class_(m, "Vector2") + .def(py::init()) + .def(py::self + py::self) + .def(py::self += py::self) + .def(py::self *= float()) + .def(float() * py::self) + .def(py::self * float()) + .def(-py::self) + .def("__repr__", &Vector2::toString); + } + +Note that a line like + +.. code-block:: cpp + + .def(py::self * float()) + +is really just short hand notation for + +.. code-block:: cpp + + .def("__mul__", [](const Vector2 &a, float b) { + return a * b; + }, py::is_operator()) + +This can be useful for exposing additional operators that don't exist on the +C++ side, or to perform other types of customization. The ``py::is_operator`` +flag marker is needed to inform pybind11 that this is an operator, which +returns ``NotImplemented`` when invoked with incompatible arguments rather than +throwing a type error. + +.. note:: + + To use the more convenient ``py::self`` notation, the additional + header file :file:`pybind11/operators.h` must be included. + +.. seealso:: + + The file :file:`tests/test_operator_overloading.cpp` contains a + complete example that demonstrates how to work with overloaded operators in + more detail. + +.. _pickling: + +Pickling support +================ + +Python's ``pickle`` module provides a powerful facility to serialize and +de-serialize a Python object graph into a binary data stream. To pickle and +unpickle C++ classes using pybind11, a ``py::pickle()`` definition must be +provided. Suppose the class in question has the following signature: + +.. code-block:: cpp + + class Pickleable { + public: + Pickleable(const std::string &value) : m_value(value) { } + const std::string &value() const { return m_value; } + + void setExtra(int extra) { m_extra = extra; } + int extra() const { return m_extra; } + private: + std::string m_value; + int m_extra = 0; + }; + +Pickling support in Python is enabled by defining the ``__setstate__`` and +``__getstate__`` methods [#f3]_. For pybind11 classes, use ``py::pickle()`` +to bind these two functions: + +.. code-block:: cpp + + py::class_(m, "Pickleable") + .def(py::init()) + .def("value", &Pickleable::value) + .def("extra", &Pickleable::extra) + .def("setExtra", &Pickleable::setExtra) + .def(py::pickle( + [](const Pickleable &p) { // __getstate__ + /* Return a tuple that fully encodes the state of the object */ + return py::make_tuple(p.value(), p.extra()); + }, + [](py::tuple t) { // __setstate__ + if (t.size() != 2) + throw std::runtime_error("Invalid state!"); + + /* Create a new C++ instance */ + Pickleable p(t[0].cast()); + + /* Assign any additional state */ + p.setExtra(t[1].cast()); + + return p; + } + )); + +The ``__setstate__`` part of the ``py::picke()`` definition follows the same +rules as the single-argument version of ``py::init()``. The return type can be +a value, pointer or holder type. See :ref:`custom_constructors` for details. + +An instance can now be pickled as follows: + +.. code-block:: python + + try: + import cPickle as pickle # Use cPickle on Python 2.7 + except ImportError: + import pickle + + p = Pickleable("test_value") + p.setExtra(15) + data = pickle.dumps(p, 2) + + +.. note:: + Note that only the cPickle module is supported on Python 2.7. + + The second argument to ``dumps`` is also crucial: it selects the pickle + protocol version 2, since the older version 1 is not supported. Newer + versions are also fineβ€”for instance, specify ``-1`` to always use the + latest available version. Beware: failure to follow these instructions + will cause important pybind11 memory allocation routines to be skipped + during unpickling, which will likely lead to memory corruption and/or + segmentation faults. + +.. seealso:: + + The file :file:`tests/test_pickling.cpp` contains a complete example + that demonstrates how to pickle and unpickle types using pybind11 in more + detail. + +.. [#f3] http://docs.python.org/3/library/pickle.html#pickling-class-instances + +Deepcopy support +================ + +Python normally uses references in assignments. Sometimes a real copy is needed +to prevent changing all copies. The ``copy`` module [#f5]_ provides these +capabilities. + +On Python 3, a class with pickle support is automatically also (deep)copy +compatible. However, performance can be improved by adding custom +``__copy__`` and ``__deepcopy__`` methods. With Python 2.7, these custom methods +are mandatory for (deep)copy compatibility, because pybind11 only supports +cPickle. + +For simple classes (deep)copy can be enabled by using the copy constructor, +which should look as follows: + +.. code-block:: cpp + + py::class_(m, "Copyable") + .def("__copy__", [](const Copyable &self) { + return Copyable(self); + }) + .def("__deepcopy__", [](const Copyable &self, py::dict) { + return Copyable(self); + }, "memo"_a); + +.. note:: + + Dynamic attributes will not be copied in this example. + +.. [#f5] https://docs.python.org/3/library/copy.html + +Multiple Inheritance +==================== + +pybind11 can create bindings for types that derive from multiple base types +(aka. *multiple inheritance*). To do so, specify all bases in the template +arguments of the ``class_`` declaration: + +.. code-block:: cpp + + py::class_(m, "MyType") + ... + +The base types can be specified in arbitrary order, and they can even be +interspersed with alias types and holder types (discussed earlier in this +document)---pybind11 will automatically find out which is which. The only +requirement is that the first template argument is the type to be declared. + +It is also permitted to inherit multiply from exported C++ classes in Python, +as well as inheriting from multiple Python and/or pybind11-exported classes. + +There is one caveat regarding the implementation of this feature: + +When only one base type is specified for a C++ type that actually has multiple +bases, pybind11 will assume that it does not participate in multiple +inheritance, which can lead to undefined behavior. In such cases, add the tag +``multiple_inheritance`` to the class constructor: + +.. code-block:: cpp + + py::class_(m, "MyType", py::multiple_inheritance()); + +The tag is redundant and does not need to be specified when multiple base types +are listed. + +.. _module_local: + +Module-local class bindings +=========================== + +When creating a binding for a class, pybind11 by default makes that binding +"global" across modules. What this means is that a type defined in one module +can be returned from any module resulting in the same Python type. For +example, this allows the following: + +.. code-block:: cpp + + // In the module1.cpp binding code for module1: + py::class_(m, "Pet") + .def(py::init()) + .def_readonly("name", &Pet::name); + +.. code-block:: cpp + + // In the module2.cpp binding code for module2: + m.def("create_pet", [](std::string name) { return new Pet(name); }); + +.. code-block:: pycon + + >>> from module1 import Pet + >>> from module2 import create_pet + >>> pet1 = Pet("Kitty") + >>> pet2 = create_pet("Doggy") + >>> pet2.name() + 'Doggy' + +When writing binding code for a library, this is usually desirable: this +allows, for example, splitting up a complex library into multiple Python +modules. + +In some cases, however, this can cause conflicts. For example, suppose two +unrelated modules make use of an external C++ library and each provide custom +bindings for one of that library's classes. This will result in an error when +a Python program attempts to import both modules (directly or indirectly) +because of conflicting definitions on the external type: + +.. code-block:: cpp + + // dogs.cpp + + // Binding for external library class: + py::class(m, "Pet") + .def("name", &pets::Pet::name); + + // Binding for local extension class: + py::class(m, "Dog") + .def(py::init()); + +.. code-block:: cpp + + // cats.cpp, in a completely separate project from the above dogs.cpp. + + // Binding for external library class: + py::class(m, "Pet") + .def("get_name", &pets::Pet::name); + + // Binding for local extending class: + py::class(m, "Cat") + .def(py::init()); + +.. code-block:: pycon + + >>> import cats + >>> import dogs + Traceback (most recent call last): + File "", line 1, in + ImportError: generic_type: type "Pet" is already registered! + +To get around this, you can tell pybind11 to keep the external class binding +localized to the module by passing the ``py::module_local()`` attribute into +the ``py::class_`` constructor: + +.. code-block:: cpp + + // Pet binding in dogs.cpp: + py::class(m, "Pet", py::module_local()) + .def("name", &pets::Pet::name); + +.. code-block:: cpp + + // Pet binding in cats.cpp: + py::class(m, "Pet", py::module_local()) + .def("get_name", &pets::Pet::name); + +This makes the Python-side ``dogs.Pet`` and ``cats.Pet`` into distinct classes, +avoiding the conflict and allowing both modules to be loaded. C++ code in the +``dogs`` module that casts or returns a ``Pet`` instance will result in a +``dogs.Pet`` Python instance, while C++ code in the ``cats`` module will result +in a ``cats.Pet`` Python instance. + +This does come with two caveats, however: First, external modules cannot return +or cast a ``Pet`` instance to Python (unless they also provide their own local +bindings). Second, from the Python point of view they are two distinct classes. + +Note that the locality only applies in the C++ -> Python direction. When +passing such a ``py::module_local`` type into a C++ function, the module-local +classes are still considered. This means that if the following function is +added to any module (including but not limited to the ``cats`` and ``dogs`` +modules above) it will be callable with either a ``dogs.Pet`` or ``cats.Pet`` +argument: + +.. code-block:: cpp + + m.def("pet_name", [](const pets::Pet &pet) { return pet.name(); }); + +For example, suppose the above function is added to each of ``cats.cpp``, +``dogs.cpp`` and ``frogs.cpp`` (where ``frogs.cpp`` is some other module that +does *not* bind ``Pets`` at all). + +.. code-block:: pycon + + >>> import cats, dogs, frogs # No error because of the added py::module_local() + >>> mycat, mydog = cats.Cat("Fluffy"), dogs.Dog("Rover") + >>> (cats.pet_name(mycat), dogs.pet_name(mydog)) + ('Fluffy', 'Rover') + >>> (cats.pet_name(mydog), dogs.pet_name(mycat), frogs.pet_name(mycat)) + ('Rover', 'Fluffy', 'Fluffy') + +It is possible to use ``py::module_local()`` registrations in one module even +if another module registers the same type globally: within the module with the +module-local definition, all C++ instances will be cast to the associated bound +Python type. In other modules any such values are converted to the global +Python type created elsewhere. + +.. note:: + + STL bindings (as provided via the optional :file:`pybind11/stl_bind.h` + header) apply ``py::module_local`` by default when the bound type might + conflict with other modules; see :ref:`stl_bind` for details. + +.. note:: + + The localization of the bound types is actually tied to the shared object + or binary generated by the compiler/linker. For typical modules created + with ``PYBIND11_MODULE()``, this distinction is not significant. It is + possible, however, when :ref:`embedding` to embed multiple modules in the + same binary (see :ref:`embedding_modules`). In such a case, the + localization will apply across all embedded modules within the same binary. + +.. seealso:: + + The file :file:`tests/test_local_bindings.cpp` contains additional examples + that demonstrate how ``py::module_local()`` works. + +Binding protected member functions +================================== + +It's normally not possible to expose ``protected`` member functions to Python: + +.. code-block:: cpp + + class A { + protected: + int foo() const { return 42; } + }; + + py::class_(m, "A") + .def("foo", &A::foo); // error: 'foo' is a protected member of 'A' + +On one hand, this is good because non-``public`` members aren't meant to be +accessed from the outside. But we may want to make use of ``protected`` +functions in derived Python classes. + +The following pattern makes this possible: + +.. code-block:: cpp + + class A { + protected: + int foo() const { return 42; } + }; + + class Publicist : public A { // helper type for exposing protected functions + public: + using A::foo; // inherited with different access modifier + }; + + py::class_(m, "A") // bind the primary class + .def("foo", &Publicist::foo); // expose protected methods via the publicist + +This works because ``&Publicist::foo`` is exactly the same function as +``&A::foo`` (same signature and address), just with a different access +modifier. The only purpose of the ``Publicist`` helper class is to make +the function name ``public``. + +If the intent is to expose ``protected`` ``virtual`` functions which can be +overridden in Python, the publicist pattern can be combined with the previously +described trampoline: + +.. code-block:: cpp + + class A { + public: + virtual ~A() = default; + + protected: + virtual int foo() const { return 42; } + }; + + class Trampoline : public A { + public: + int foo() const override { PYBIND11_OVERLOAD(int, A, foo, ); } + }; + + class Publicist : public A { + public: + using A::foo; + }; + + py::class_(m, "A") // <-- `Trampoline` here + .def("foo", &Publicist::foo); // <-- `Publicist` here, not `Trampoline`! + +.. note:: + + MSVC 2015 has a compiler bug (fixed in version 2017) which + requires a more explicit function binding in the form of + ``.def("foo", static_cast(&Publicist::foo));`` + where ``int (A::*)() const`` is the type of ``A::foo``. + +Binding final classes +===================== + +Some classes may not be appropriate to inherit from. In C++11, classes can +use the ``final`` specifier to ensure that a class cannot be inherited from. +The ``py::is_final`` attribute can be used to ensure that Python classes +cannot inherit from a specified type. The underlying C++ type does not need +to be declared final. + +.. code-block:: cpp + + class IsFinal final {}; + + py::class_(m, "IsFinal", py::is_final()); + +When you try to inherit from such a class in Python, you will now get this +error: + +.. code-block:: pycon + + >>> class PyFinalChild(IsFinal): + ... pass + TypeError: type 'IsFinal' is not an acceptable base type + +.. note:: This attribute is currently ignored on PyPy + +.. versionadded:: 2.6 + +Custom automatic downcasters +============================ + +As explained in :ref:`inheritance`, pybind11 comes with built-in +understanding of the dynamic type of polymorphic objects in C++; that +is, returning a Pet to Python produces a Python object that knows it's +wrapping a Dog, if Pet has virtual methods and pybind11 knows about +Dog and this Pet is in fact a Dog. Sometimes, you might want to +provide this automatic downcasting behavior when creating bindings for +a class hierarchy that does not use standard C++ polymorphism, such as +LLVM [#f4]_. As long as there's some way to determine at runtime +whether a downcast is safe, you can proceed by specializing the +``pybind11::polymorphic_type_hook`` template: + +.. code-block:: cpp + + enum class PetKind { Cat, Dog, Zebra }; + struct Pet { // Not polymorphic: has no virtual methods + const PetKind kind; + int age = 0; + protected: + Pet(PetKind _kind) : kind(_kind) {} + }; + struct Dog : Pet { + Dog() : Pet(PetKind::Dog) {} + std::string sound = "woof!"; + std::string bark() const { return sound; } + }; + + namespace pybind11 { + template<> struct polymorphic_type_hook { + static const void *get(const Pet *src, const std::type_info*& type) { + // note that src may be nullptr + if (src && src->kind == PetKind::Dog) { + type = &typeid(Dog); + return static_cast(src); + } + return src; + } + }; + } // namespace pybind11 + +When pybind11 wants to convert a C++ pointer of type ``Base*`` to a +Python object, it calls ``polymorphic_type_hook::get()`` to +determine if a downcast is possible. The ``get()`` function should use +whatever runtime information is available to determine if its ``src`` +parameter is in fact an instance of some class ``Derived`` that +inherits from ``Base``. If it finds such a ``Derived``, it sets ``type += &typeid(Derived)`` and returns a pointer to the ``Derived`` object +that contains ``src``. Otherwise, it just returns ``src``, leaving +``type`` at its default value of nullptr. If you set ``type`` to a +type that pybind11 doesn't know about, no downcasting will occur, and +the original ``src`` pointer will be used with its static type +``Base*``. + +It is critical that the returned pointer and ``type`` argument of +``get()`` agree with each other: if ``type`` is set to something +non-null, the returned pointer must point to the start of an object +whose type is ``type``. If the hierarchy being exposed uses only +single inheritance, a simple ``return src;`` will achieve this just +fine, but in the general case, you must cast ``src`` to the +appropriate derived-class pointer (e.g. using +``static_cast(src)``) before allowing it to be returned as a +``void*``. + +.. [#f4] https://llvm.org/docs/HowToSetUpLLVMStyleRTTI.html + +.. note:: + + pybind11's standard support for downcasting objects whose types + have virtual methods is implemented using + ``polymorphic_type_hook`` too, using the standard C++ ability to + determine the most-derived type of a polymorphic object using + ``typeid()`` and to cast a base pointer to that most-derived type + (even if you don't know what it is) using ``dynamic_cast``. + +.. seealso:: + + The file :file:`tests/test_tagbased_polymorphic.cpp` contains a + more complete example, including a demonstration of how to provide + automatic downcasting for an entire class hierarchy without + writing one get() function for each class. diff --git a/diffvg/pybind11/docs/advanced/embedding.rst b/diffvg/pybind11/docs/advanced/embedding.rst new file mode 100644 index 0000000000000000000000000000000000000000..98a5c5219076872fbbd158bc2d99de294ee00789 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/embedding.rst @@ -0,0 +1,261 @@ +.. _embedding: + +Embedding the interpreter +######################### + +While pybind11 is mainly focused on extending Python using C++, it's also +possible to do the reverse: embed the Python interpreter into a C++ program. +All of the other documentation pages still apply here, so refer to them for +general pybind11 usage. This section will cover a few extra things required +for embedding. + +Getting started +=============== + +A basic executable with an embedded interpreter can be created with just a few +lines of CMake and the ``pybind11::embed`` target, as shown below. For more +information, see :doc:`/compiling`. + +.. code-block:: cmake + + cmake_minimum_required(VERSION 3.4) + project(example) + + find_package(pybind11 REQUIRED) # or `add_subdirectory(pybind11)` + + add_executable(example main.cpp) + target_link_libraries(example PRIVATE pybind11::embed) + +The essential structure of the ``main.cpp`` file looks like this: + +.. code-block:: cpp + + #include // everything needed for embedding + namespace py = pybind11; + + int main() { + py::scoped_interpreter guard{}; // start the interpreter and keep it alive + + py::print("Hello, World!"); // use the Python API + } + +The interpreter must be initialized before using any Python API, which includes +all the functions and classes in pybind11. The RAII guard class `scoped_interpreter` +takes care of the interpreter lifetime. After the guard is destroyed, the interpreter +shuts down and clears its memory. No Python functions can be called after this. + +Executing Python code +===================== + +There are a few different ways to run Python code. One option is to use `eval`, +`exec` or `eval_file`, as explained in :ref:`eval`. Here is a quick example in +the context of an executable with an embedded interpreter: + +.. code-block:: cpp + + #include + namespace py = pybind11; + + int main() { + py::scoped_interpreter guard{}; + + py::exec(R"( + kwargs = dict(name="World", number=42) + message = "Hello, {name}! The answer is {number}".format(**kwargs) + print(message) + )"); + } + +Alternatively, similar results can be achieved using pybind11's API (see +:doc:`/advanced/pycpp/index` for more details). + +.. code-block:: cpp + + #include + namespace py = pybind11; + using namespace py::literals; + + int main() { + py::scoped_interpreter guard{}; + + auto kwargs = py::dict("name"_a="World", "number"_a=42); + auto message = "Hello, {name}! The answer is {number}"_s.format(**kwargs); + py::print(message); + } + +The two approaches can also be combined: + +.. code-block:: cpp + + #include + #include + + namespace py = pybind11; + using namespace py::literals; + + int main() { + py::scoped_interpreter guard{}; + + auto locals = py::dict("name"_a="World", "number"_a=42); + py::exec(R"( + message = "Hello, {name}! The answer is {number}".format(**locals()) + )", py::globals(), locals); + + auto message = locals["message"].cast(); + std::cout << message; + } + +Importing modules +================= + +Python modules can be imported using `module::import()`: + +.. code-block:: cpp + + py::module sys = py::module::import("sys"); + py::print(sys.attr("path")); + +For convenience, the current working directory is included in ``sys.path`` when +embedding the interpreter. This makes it easy to import local Python files: + +.. code-block:: python + + """calc.py located in the working directory""" + + def add(i, j): + return i + j + + +.. code-block:: cpp + + py::module calc = py::module::import("calc"); + py::object result = calc.attr("add")(1, 2); + int n = result.cast(); + assert(n == 3); + +Modules can be reloaded using `module::reload()` if the source is modified e.g. +by an external process. This can be useful in scenarios where the application +imports a user defined data processing script which needs to be updated after +changes by the user. Note that this function does not reload modules recursively. + +.. _embedding_modules: + +Adding embedded modules +======================= + +Embedded binary modules can be added using the `PYBIND11_EMBEDDED_MODULE` macro. +Note that the definition must be placed at global scope. They can be imported +like any other module. + +.. code-block:: cpp + + #include + namespace py = pybind11; + + PYBIND11_EMBEDDED_MODULE(fast_calc, m) { + // `m` is a `py::module` which is used to bind functions and classes + m.def("add", [](int i, int j) { + return i + j; + }); + } + + int main() { + py::scoped_interpreter guard{}; + + auto fast_calc = py::module::import("fast_calc"); + auto result = fast_calc.attr("add")(1, 2).cast(); + assert(result == 3); + } + +Unlike extension modules where only a single binary module can be created, on +the embedded side an unlimited number of modules can be added using multiple +`PYBIND11_EMBEDDED_MODULE` definitions (as long as they have unique names). + +These modules are added to Python's list of builtins, so they can also be +imported in pure Python files loaded by the interpreter. Everything interacts +naturally: + +.. code-block:: python + + """py_module.py located in the working directory""" + import cpp_module + + a = cpp_module.a + b = a + 1 + + +.. code-block:: cpp + + #include + namespace py = pybind11; + + PYBIND11_EMBEDDED_MODULE(cpp_module, m) { + m.attr("a") = 1; + } + + int main() { + py::scoped_interpreter guard{}; + + auto py_module = py::module::import("py_module"); + + auto locals = py::dict("fmt"_a="{} + {} = {}", **py_module.attr("__dict__")); + assert(locals["a"].cast() == 1); + assert(locals["b"].cast() == 2); + + py::exec(R"( + c = a + b + message = fmt.format(a, b, c) + )", py::globals(), locals); + + assert(locals["c"].cast() == 3); + assert(locals["message"].cast() == "1 + 2 = 3"); + } + + +Interpreter lifetime +==================== + +The Python interpreter shuts down when `scoped_interpreter` is destroyed. After +this, creating a new instance will restart the interpreter. Alternatively, the +`initialize_interpreter` / `finalize_interpreter` pair of functions can be used +to directly set the state at any time. + +Modules created with pybind11 can be safely re-initialized after the interpreter +has been restarted. However, this may not apply to third-party extension modules. +The issue is that Python itself cannot completely unload extension modules and +there are several caveats with regard to interpreter restarting. In short, not +all memory may be freed, either due to Python reference cycles or user-created +global data. All the details can be found in the CPython documentation. + +.. warning:: + + Creating two concurrent `scoped_interpreter` guards is a fatal error. So is + calling `initialize_interpreter` for a second time after the interpreter + has already been initialized. + + Do not use the raw CPython API functions ``Py_Initialize`` and + ``Py_Finalize`` as these do not properly handle the lifetime of + pybind11's internal data. + + +Sub-interpreter support +======================= + +Creating multiple copies of `scoped_interpreter` is not possible because it +represents the main Python interpreter. Sub-interpreters are something different +and they do permit the existence of multiple interpreters. This is an advanced +feature of the CPython API and should be handled with care. pybind11 does not +currently offer a C++ interface for sub-interpreters, so refer to the CPython +documentation for all the details regarding this feature. + +We'll just mention a couple of caveats the sub-interpreters support in pybind11: + + 1. Sub-interpreters will not receive independent copies of embedded modules. + Instead, these are shared and modifications in one interpreter may be + reflected in another. + + 2. Managing multiple threads, multiple interpreters and the GIL can be + challenging and there are several caveats here, even within the pure + CPython API (please refer to the Python docs for details). As for + pybind11, keep in mind that `gil_scoped_release` and `gil_scoped_acquire` + do not take sub-interpreters into account. diff --git a/diffvg/pybind11/docs/advanced/exceptions.rst b/diffvg/pybind11/docs/advanced/exceptions.rst new file mode 100644 index 0000000000000000000000000000000000000000..b7d36014a68239c7518d00364522fbdb03f183bc --- /dev/null +++ b/diffvg/pybind11/docs/advanced/exceptions.rst @@ -0,0 +1,285 @@ +Exceptions +########## + +Built-in C++ to Python exception translation +============================================ + +When Python calls C++ code through pybind11, pybind11 provides a C++ exception handler +that will trap C++ exceptions, translate them to the corresponding Python exception, +and raise them so that Python code can handle them. + +pybind11 defines translations for ``std::exception`` and its standard +subclasses, and several special exception classes that translate to specific +Python exceptions. Note that these are not actually Python exceptions, so they +cannot be examined using the Python C API. Instead, they are pure C++ objects +that pybind11 will translate the corresponding Python exception when they arrive +at its exception handler. + +.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| + ++--------------------------------------+--------------------------------------+ +| Exception thrown by C++ | Translated to Python exception type | ++======================================+======================================+ +| :class:`std::exception` | ``RuntimeError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::bad_alloc` | ``MemoryError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::domain_error` | ``ValueError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::invalid_argument` | ``ValueError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::length_error` | ``ValueError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::out_of_range` | ``IndexError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::range_error` | ``ValueError`` | ++--------------------------------------+--------------------------------------+ +| :class:`std::overflow_error` | ``OverflowError`` | ++--------------------------------------+--------------------------------------+ +| :class:`pybind11::stop_iteration` | ``StopIteration`` (used to implement | +| | custom iterators) | ++--------------------------------------+--------------------------------------+ +| :class:`pybind11::index_error` | ``IndexError`` (used to indicate out | +| | of bounds access in ``__getitem__``, | +| | ``__setitem__``, etc.) | ++--------------------------------------+--------------------------------------+ +| :class:`pybind11::value_error` | ``ValueError`` (used to indicate | +| | wrong value passed in | +| | ``container.remove(...)``) | ++--------------------------------------+--------------------------------------+ +| :class:`pybind11::key_error` | ``KeyError`` (used to indicate out | +| | of bounds access in ``__getitem__``, | +| | ``__setitem__`` in dict-like | +| | objects, etc.) | ++--------------------------------------+--------------------------------------+ + +Exception translation is not bidirectional. That is, *catching* the C++ +exceptions defined above above will not trap exceptions that originate from +Python. For that, catch :class:`pybind11::error_already_set`. See :ref:`below +` for further details. + +There is also a special exception :class:`cast_error` that is thrown by +:func:`handle::call` when the input arguments cannot be converted to Python +objects. + +Registering custom translators +============================== + +If the default exception conversion policy described above is insufficient, +pybind11 also provides support for registering custom exception translators. +To register a simple exception conversion that translates a C++ exception into +a new Python exception using the C++ exception's ``what()`` method, a helper +function is available: + +.. code-block:: cpp + + py::register_exception(module, "PyExp"); + +This call creates a Python exception class with the name ``PyExp`` in the given +module and automatically converts any encountered exceptions of type ``CppExp`` +into Python exceptions of type ``PyExp``. + +When more advanced exception translation is needed, the function +``py::register_exception_translator(translator)`` can be used to register +functions that can translate arbitrary exception types (and which may include +additional logic to do so). The function takes a stateless callable (e.g. a +function pointer or a lambda function without captured variables) with the call +signature ``void(std::exception_ptr)``. + +When a C++ exception is thrown, the registered exception translators are tried +in reverse order of registration (i.e. the last registered translator gets the +first shot at handling the exception). + +Inside the translator, ``std::rethrow_exception`` should be used within +a try block to re-throw the exception. One or more catch clauses to catch +the appropriate exceptions should then be used with each clause using +``PyErr_SetString`` to set a Python exception or ``ex(string)`` to set +the python exception to a custom exception type (see below). + +To declare a custom Python exception type, declare a ``py::exception`` variable +and use this in the associated exception translator (note: it is often useful +to make this a static declaration when using it inside a lambda expression +without requiring capturing). + +The following example demonstrates this for a hypothetical exception classes +``MyCustomException`` and ``OtherException``: the first is translated to a +custom python exception ``MyCustomError``, while the second is translated to a +standard python RuntimeError: + +.. code-block:: cpp + + static py::exception exc(m, "MyCustomError"); + py::register_exception_translator([](std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (const MyCustomException &e) { + exc(e.what()); + } catch (const OtherException &e) { + PyErr_SetString(PyExc_RuntimeError, e.what()); + } + }); + +Multiple exceptions can be handled by a single translator, as shown in the +example above. If the exception is not caught by the current translator, the +previously registered one gets a chance. + +If none of the registered exception translators is able to handle the +exception, it is handled by the default converter as described in the previous +section. + +.. seealso:: + + The file :file:`tests/test_exceptions.cpp` contains examples + of various custom exception translators and custom exception types. + +.. note:: + + Call either ``PyErr_SetString`` or a custom exception's call + operator (``exc(string)``) for every exception caught in a custom exception + translator. Failure to do so will cause Python to crash with ``SystemError: + error return without exception set``. + + Exceptions that you do not plan to handle should simply not be caught, or + may be explicitly (re-)thrown to delegate it to the other, + previously-declared existing exception translators. + +.. _handling_python_exceptions_cpp: + +Handling exceptions from Python in C++ +====================================== + +When C++ calls Python functions, such as in a callback function or when +manipulating Python objects, and Python raises an ``Exception``, pybind11 +converts the Python exception into a C++ exception of type +:class:`pybind11::error_already_set` whose payload contains a C++ string textual +summary and the actual Python exception. ``error_already_set`` is used to +propagate Python exception back to Python (or possibly, handle them in C++). + +.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| + ++--------------------------------------+--------------------------------------+ +| Exception raised in Python | Thrown as C++ exception type | ++======================================+======================================+ +| Any Python ``Exception`` | :class:`pybind11::error_already_set` | ++--------------------------------------+--------------------------------------+ + +For example: + +.. code-block:: cpp + + try { + // open("missing.txt", "r") + auto file = py::module::import("io").attr("open")("missing.txt", "r"); + auto text = file.attr("read")(); + file.attr("close")(); + } catch (py::error_already_set &e) { + if (e.matches(PyExc_FileNotFoundError)) { + py::print("missing.txt not found"); + } else if (e.match(PyExc_PermissionError)) { + py::print("missing.txt found but not accessible"); + } else { + throw; + } + } + +Note that C++ to Python exception translation does not apply here, since that is +a method for translating C++ exceptions to Python, not vice versa. The error raised +from Python is always ``error_already_set``. + +This example illustrates this behavior: + +.. code-block:: cpp + + try { + py::eval("raise ValueError('The Ring')"); + } catch (py::value_error &boromir) { + // Boromir never gets the ring + assert(false); + } catch (py::error_already_set &frodo) { + // Frodo gets the ring + py::print("I will take the ring"); + } + + try { + // py::value_error is a request for pybind11 to raise a Python exception + throw py::value_error("The ball"); + } catch (py::error_already_set &cat) { + // cat won't catch the ball since + // py::value_error is not a Python exception + assert(false); + } catch (py::value_error &dog) { + // dog will catch the ball + py::print("Run Spot run"); + throw; // Throw it again (pybind11 will raise ValueError) + } + +Handling errors from the Python C API +===================================== + +Where possible, use :ref:`pybind11 wrappers ` instead of calling +the Python C API directly. When calling the Python C API directly, in +addition to manually managing reference counts, one must follow the pybind11 +error protocol, which is outlined here. + +After calling the Python C API, if Python returns an error, +``throw py::error_already_set();``, which allows pybind11 to deal with the +exception and pass it back to the Python interpreter. This includes calls to +the error setting functions such as ``PyErr_SetString``. + +.. code-block:: cpp + + PyErr_SetString(PyExc_TypeError, "C API type error demo"); + throw py::error_already_set(); + + // But it would be easier to simply... + throw py::type_error("pybind11 wrapper type error"); + +Alternately, to ignore the error, call `PyErr_Clear +`_. + +Any Python error must be thrown or cleared, or Python/pybind11 will be left in +an invalid state. + +.. _unraisable_exceptions: + +Handling unraisable exceptions +============================== + +If a Python function invoked from a C++ destructor or any function marked +``noexcept(true)`` (collectively, "noexcept functions") throws an exception, there +is no way to propagate the exception, as such functions may not throw. +Should they throw or fail to catch any exceptions in their call graph, +the C++ runtime calls ``std::terminate()`` to abort immediately. + +Similarly, Python exceptions raised in a class's ``__del__`` method do not +propagate, but are logged by Python as an unraisable error. In Python 3.8+, a +`system hook is triggered +`_ +and an auditing event is logged. + +Any noexcept function should have a try-catch block that traps +class:`error_already_set` (or any other exception that can occur). Note that +pybind11 wrappers around Python exceptions such as +:class:`pybind11::value_error` are *not* Python exceptions; they are C++ +exceptions that pybind11 catches and converts to Python exceptions. Noexcept +functions cannot propagate these exceptions either. A useful approach is to +convert them to Python exceptions and then ``discard_as_unraisable`` as shown +below. + +.. code-block:: cpp + + void nonthrowing_func() noexcept(true) { + try { + // ... + } catch (py::error_already_set &eas) { + // Discard the Python error using Python APIs, using the C++ magic + // variable __func__. Python already knows the type and value and of the + // exception object. + eas.discard_as_unraisable(__func__); + } catch (const std::exception &e) { + // Log and discard C++ exceptions. + third_party::log(e); + } + } + +.. versionadded:: 2.6 diff --git a/diffvg/pybind11/docs/advanced/functions.rst b/diffvg/pybind11/docs/advanced/functions.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e33c9cf7da88d9d9669f33fadcfc11ccf0d2698 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/functions.rst @@ -0,0 +1,537 @@ +Functions +######### + +Before proceeding with this section, make sure that you are already familiar +with the basics of binding functions and classes, as explained in :doc:`/basics` +and :doc:`/classes`. The following guide is applicable to both free and member +functions, i.e. *methods* in Python. + +.. _return_value_policies: + +Return value policies +===================== + +Python and C++ use fundamentally different ways of managing the memory and +lifetime of objects managed by them. This can lead to issues when creating +bindings for functions that return a non-trivial type. Just by looking at the +type information, it is not clear whether Python should take charge of the +returned value and eventually free its resources, or if this is handled on the +C++ side. For this reason, pybind11 provides a several *return value policy* +annotations that can be passed to the :func:`module::def` and +:func:`class_::def` functions. The default policy is +:enum:`return_value_policy::automatic`. + +Return value policies are tricky, and it's very important to get them right. +Just to illustrate what can go wrong, consider the following simple example: + +.. code-block:: cpp + + /* Function declaration */ + Data *get_data() { return _data; /* (pointer to a static data structure) */ } + ... + + /* Binding code */ + m.def("get_data", &get_data); // <-- KABOOM, will cause crash when called from Python + +What's going on here? When ``get_data()`` is called from Python, the return +value (a native C++ type) must be wrapped to turn it into a usable Python type. +In this case, the default return value policy (:enum:`return_value_policy::automatic`) +causes pybind11 to assume ownership of the static ``_data`` instance. + +When Python's garbage collector eventually deletes the Python +wrapper, pybind11 will also attempt to delete the C++ instance (via ``operator +delete()``) due to the implied ownership. At this point, the entire application +will come crashing down, though errors could also be more subtle and involve +silent data corruption. + +In the above example, the policy :enum:`return_value_policy::reference` should have +been specified so that the global data instance is only *referenced* without any +implied transfer of ownership, i.e.: + +.. code-block:: cpp + + m.def("get_data", &get_data, return_value_policy::reference); + +On the other hand, this is not the right policy for many other situations, +where ignoring ownership could lead to resource leaks. +As a developer using pybind11, it's important to be familiar with the different +return value policies, including which situation calls for which one of them. +The following table provides an overview of available policies: + +.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}| + ++--------------------------------------------------+----------------------------------------------------------------------------+ +| Return value policy | Description | ++==================================================+============================================================================+ +| :enum:`return_value_policy::take_ownership` | Reference an existing object (i.e. do not create a new copy) and take | +| | ownership. Python will call the destructor and delete operator when the | +| | object's reference count reaches zero. Undefined behavior ensues when the | +| | C++ side does the same, or when the data was not dynamically allocated. | ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::copy` | Create a new copy of the returned object, which will be owned by Python. | +| | This policy is comparably safe because the lifetimes of the two instances | +| | are decoupled. | ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::move` | Use ``std::move`` to move the return value contents into a new instance | +| | that will be owned by Python. This policy is comparably safe because the | +| | lifetimes of the two instances (move source and destination) are decoupled.| ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::reference` | Reference an existing object, but do not take ownership. The C++ side is | +| | responsible for managing the object's lifetime and deallocating it when | +| | it is no longer used. Warning: undefined behavior will ensue when the C++ | +| | side deletes an object that is still referenced and used by Python. | ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::reference_internal` | Indicates that the lifetime of the return value is tied to the lifetime | +| | of a parent object, namely the implicit ``this``, or ``self`` argument of | +| | the called method or property. Internally, this policy works just like | +| | :enum:`return_value_policy::reference` but additionally applies a | +| | ``keep_alive<0, 1>`` *call policy* (described in the next section) that | +| | prevents the parent object from being garbage collected as long as the | +| | return value is referenced by Python. This is the default policy for | +| | property getters created via ``def_property``, ``def_readwrite``, etc. | ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::automatic` | **Default policy.** This policy falls back to the policy | +| | :enum:`return_value_policy::take_ownership` when the return value is a | +| | pointer. Otherwise, it uses :enum:`return_value_policy::move` or | +| | :enum:`return_value_policy::copy` for rvalue and lvalue references, | +| | respectively. See above for a description of what all of these different | +| | policies do. | ++--------------------------------------------------+----------------------------------------------------------------------------+ +| :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the | +| | return value is a pointer. This is the default conversion policy for | +| | function arguments when calling Python functions manually from C++ code | +| | (i.e. via handle::operator()). You probably won't need to use this. | ++--------------------------------------------------+----------------------------------------------------------------------------+ + +Return value policies can also be applied to properties: + +.. code-block:: cpp + + class_(m, "MyClass") + .def_property("data", &MyClass::getData, &MyClass::setData, + py::return_value_policy::copy); + +Technically, the code above applies the policy to both the getter and the +setter function, however, the setter doesn't really care about *return* +value policies which makes this a convenient terse syntax. Alternatively, +targeted arguments can be passed through the :class:`cpp_function` constructor: + +.. code-block:: cpp + + class_(m, "MyClass") + .def_property("data" + py::cpp_function(&MyClass::getData, py::return_value_policy::copy), + py::cpp_function(&MyClass::setData) + ); + +.. warning:: + + Code with invalid return value policies might access uninitialized memory or + free data structures multiple times, which can lead to hard-to-debug + non-determinism and segmentation faults, hence it is worth spending the + time to understand all the different options in the table above. + +.. note:: + + One important aspect of the above policies is that they only apply to + instances which pybind11 has *not* seen before, in which case the policy + clarifies essential questions about the return value's lifetime and + ownership. When pybind11 knows the instance already (as identified by its + type and address in memory), it will return the existing Python object + wrapper rather than creating a new copy. + +.. note:: + + The next section on :ref:`call_policies` discusses *call policies* that can be + specified *in addition* to a return value policy from the list above. Call + policies indicate reference relationships that can involve both return values + and parameters of functions. + +.. note:: + + As an alternative to elaborate call policies and lifetime management logic, + consider using smart pointers (see the section on :ref:`smart_pointers` for + details). Smart pointers can tell whether an object is still referenced from + C++ or Python, which generally eliminates the kinds of inconsistencies that + can lead to crashes or undefined behavior. For functions returning smart + pointers, it is not necessary to specify a return value policy. + +.. _call_policies: + +Additional call policies +======================== + +In addition to the above return value policies, further *call policies* can be +specified to indicate dependencies between parameters or ensure a certain state +for the function call. + +Keep alive +---------- + +In general, this policy is required when the C++ object is any kind of container +and another object is being added to the container. ``keep_alive`` +indicates that the argument with index ``Patient`` should be kept alive at least +until the argument with index ``Nurse`` is freed by the garbage collector. Argument +indices start at one, while zero refers to the return value. For methods, index +``1`` refers to the implicit ``this`` pointer, while regular arguments begin at +index ``2``. Arbitrarily many call policies can be specified. When a ``Nurse`` +with value ``None`` is detected at runtime, the call policy does nothing. + +When the nurse is not a pybind11-registered type, the implementation internally +relies on the ability to create a *weak reference* to the nurse object. When +the nurse object is not a pybind11-registered type and does not support weak +references, an exception will be thrown. + +Consider the following example: here, the binding code for a list append +operation ties the lifetime of the newly added element to the underlying +container: + +.. code-block:: cpp + + py::class_(m, "List") + .def("append", &List::append, py::keep_alive<1, 2>()); + +For consistency, the argument indexing is identical for constructors. Index +``1`` still refers to the implicit ``this`` pointer, i.e. the object which is +being constructed. Index ``0`` refers to the return type which is presumed to +be ``void`` when a constructor is viewed like a function. The following example +ties the lifetime of the constructor element to the constructed object: + +.. code-block:: cpp + + py::class_(m, "Nurse") + .def(py::init(), py::keep_alive<1, 2>()); + +.. note:: + + ``keep_alive`` is analogous to the ``with_custodian_and_ward`` (if Nurse, + Patient != 0) and ``with_custodian_and_ward_postcall`` (if Nurse/Patient == + 0) policies from Boost.Python. + +Call guard +---------- + +The ``call_guard`` policy allows any scope guard type ``T`` to be placed +around the function call. For example, this definition: + +.. code-block:: cpp + + m.def("foo", foo, py::call_guard()); + +is equivalent to the following pseudocode: + +.. code-block:: cpp + + m.def("foo", [](args...) { + T scope_guard; + return foo(args...); // forwarded arguments + }); + +The only requirement is that ``T`` is default-constructible, but otherwise any +scope guard will work. This is very useful in combination with `gil_scoped_release`. +See :ref:`gil`. + +Multiple guards can also be specified as ``py::call_guard``. The +constructor order is left to right and destruction happens in reverse. + +.. seealso:: + + The file :file:`tests/test_call_policies.cpp` contains a complete example + that demonstrates using `keep_alive` and `call_guard` in more detail. + +.. _python_objects_as_args: + +Python objects as arguments +=========================== + +pybind11 exposes all major Python types using thin C++ wrapper classes. These +wrapper classes can also be used as parameters of functions in bindings, which +makes it possible to directly work with native Python types on the C++ side. +For instance, the following statement iterates over a Python ``dict``: + +.. code-block:: cpp + + void print_dict(py::dict dict) { + /* Easily interact with Python types */ + for (auto item : dict) + std::cout << "key=" << std::string(py::str(item.first)) << ", " + << "value=" << std::string(py::str(item.second)) << std::endl; + } + +It can be exported: + +.. code-block:: cpp + + m.def("print_dict", &print_dict); + +And used in Python as usual: + +.. code-block:: pycon + + >>> print_dict({'foo': 123, 'bar': 'hello'}) + key=foo, value=123 + key=bar, value=hello + +For more information on using Python objects in C++, see :doc:`/advanced/pycpp/index`. + +Accepting \*args and \*\*kwargs +=============================== + +Python provides a useful mechanism to define functions that accept arbitrary +numbers of arguments and keyword arguments: + +.. code-block:: python + + def generic(*args, **kwargs): + ... # do something with args and kwargs + +Such functions can also be created using pybind11: + +.. code-block:: cpp + + void generic(py::args args, py::kwargs kwargs) { + /// .. do something with args + if (kwargs) + /// .. do something with kwargs + } + + /// Binding code + m.def("generic", &generic); + +The class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives +from ``py::dict``. + +You may also use just one or the other, and may combine these with other +arguments as long as the ``py::args`` and ``py::kwargs`` arguments are the last +arguments accepted by the function. + +Please refer to the other examples for details on how to iterate over these, +and on how to cast their entries into C++ objects. A demonstration is also +available in ``tests/test_kwargs_and_defaults.cpp``. + +.. note:: + + When combining \*args or \*\*kwargs with :ref:`keyword_args` you should + *not* include ``py::arg`` tags for the ``py::args`` and ``py::kwargs`` + arguments. + +Default arguments revisited +=========================== + +The section on :ref:`default_args` previously discussed basic usage of default +arguments using pybind11. One noteworthy aspect of their implementation is that +default arguments are converted to Python objects right at declaration time. +Consider the following example: + +.. code-block:: cpp + + py::class_("MyClass") + .def("myFunction", py::arg("arg") = SomeType(123)); + +In this case, pybind11 must already be set up to deal with values of the type +``SomeType`` (via a prior instantiation of ``py::class_``), or an +exception will be thrown. + +Another aspect worth highlighting is that the "preview" of the default argument +in the function signature is generated using the object's ``__repr__`` method. +If not available, the signature may not be very helpful, e.g.: + +.. code-block:: pycon + + FUNCTIONS + ... + | myFunction(...) + | Signature : (MyClass, arg : SomeType = ) -> NoneType + ... + +The first way of addressing this is by defining ``SomeType.__repr__``. +Alternatively, it is possible to specify the human-readable preview of the +default argument manually using the ``arg_v`` notation: + +.. code-block:: cpp + + py::class_("MyClass") + .def("myFunction", py::arg_v("arg", SomeType(123), "SomeType(123)")); + +Sometimes it may be necessary to pass a null pointer value as a default +argument. In this case, remember to cast it to the underlying type in question, +like so: + +.. code-block:: cpp + + py::class_("MyClass") + .def("myFunction", py::arg("arg") = (SomeType *) nullptr); + +Keyword-only arguments +====================== + +Python 3 introduced keyword-only arguments by specifying an unnamed ``*`` +argument in a function definition: + +.. code-block:: python + + def f(a, *, b): # a can be positional or via keyword; b must be via keyword + pass + + f(a=1, b=2) # good + f(b=2, a=1) # good + f(1, b=2) # good + f(1, 2) # TypeError: f() takes 1 positional argument but 2 were given + +Pybind11 provides a ``py::kwonly`` object that allows you to implement +the same behaviour by specifying the object between positional and keyword-only +argument annotations when registering the function: + +.. code-block:: cpp + + m.def("f", [](int a, int b) { /* ... */ }, + py::arg("a"), py::kwonly(), py::arg("b")); + +Note that, as in Python, you cannot combine this with a ``py::args`` argument. +This feature does *not* require Python 3 to work. + +.. versionadded:: 2.6 + +.. _nonconverting_arguments: + +Non-converting arguments +======================== + +Certain argument types may support conversion from one type to another. Some +examples of conversions are: + +* :ref:`implicit_conversions` declared using ``py::implicitly_convertible()`` +* Calling a method accepting a double with an integer argument +* Calling a ``std::complex`` argument with a non-complex python type + (for example, with a float). (Requires the optional ``pybind11/complex.h`` + header). +* Calling a function taking an Eigen matrix reference with a numpy array of the + wrong type or of an incompatible data layout. (Requires the optional + ``pybind11/eigen.h`` header). + +This behaviour is sometimes undesirable: the binding code may prefer to raise +an error rather than convert the argument. This behaviour can be obtained +through ``py::arg`` by calling the ``.noconvert()`` method of the ``py::arg`` +object, such as: + +.. code-block:: cpp + + m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert()); + m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f")); + +Attempting the call the second function (the one without ``.noconvert()``) with +an integer will succeed, but attempting to call the ``.noconvert()`` version +will fail with a ``TypeError``: + +.. code-block:: pycon + + >>> floats_preferred(4) + 2.0 + >>> floats_only(4) + Traceback (most recent call last): + File "", line 1, in + TypeError: floats_only(): incompatible function arguments. The following argument types are supported: + 1. (f: float) -> float + + Invoked with: 4 + +You may, of course, combine this with the :var:`_a` shorthand notation (see +:ref:`keyword_args`) and/or :ref:`default_args`. It is also permitted to omit +the argument name by using the ``py::arg()`` constructor without an argument +name, i.e. by specifying ``py::arg().noconvert()``. + +.. note:: + + When specifying ``py::arg`` options it is necessary to provide the same + number of options as the bound function has arguments. Thus if you want to + enable no-convert behaviour for just one of several arguments, you will + need to specify a ``py::arg()`` annotation for each argument with the + no-convert argument modified to ``py::arg().noconvert()``. + +.. _none_arguments: + +Allow/Prohibiting None arguments +================================ + +When a C++ type registered with :class:`py::class_` is passed as an argument to +a function taking the instance as pointer or shared holder (e.g. ``shared_ptr`` +or a custom, copyable holder as described in :ref:`smart_pointers`), pybind +allows ``None`` to be passed from Python which results in calling the C++ +function with ``nullptr`` (or an empty holder) for the argument. + +To explicitly enable or disable this behaviour, using the +``.none`` method of the :class:`py::arg` object: + +.. code-block:: cpp + + py::class_(m, "Dog").def(py::init<>()); + py::class_(m, "Cat").def(py::init<>()); + m.def("bark", [](Dog *dog) -> std::string { + if (dog) return "woof!"; /* Called with a Dog instance */ + else return "(no dog)"; /* Called with None, dog == nullptr */ + }, py::arg("dog").none(true)); + m.def("meow", [](Cat *cat) -> std::string { + // Can't be called with None argument + return "meow"; + }, py::arg("cat").none(false)); + +With the above, the Python call ``bark(None)`` will return the string ``"(no +dog)"``, while attempting to call ``meow(None)`` will raise a ``TypeError``: + +.. code-block:: pycon + + >>> from animals import Dog, Cat, bark, meow + >>> bark(Dog()) + 'woof!' + >>> meow(Cat()) + 'meow' + >>> bark(None) + '(no dog)' + >>> meow(None) + Traceback (most recent call last): + File "", line 1, in + TypeError: meow(): incompatible function arguments. The following argument types are supported: + 1. (cat: animals.Cat) -> str + + Invoked with: None + +The default behaviour when the tag is unspecified is to allow ``None``. + +.. note:: + + Even when ``.none(true)`` is specified for an argument, ``None`` will be converted to a + ``nullptr`` *only* for custom and :ref:`opaque ` types. Pointers to built-in types + (``double *``, ``int *``, ...) and STL types (``std::vector *``, ...; if ``pybind11/stl.h`` + is included) are copied when converted to C++ (see :doc:`/advanced/cast/overview`) and will + not allow ``None`` as argument. To pass optional argument of these copied types consider + using ``std::optional`` + +Overload resolution order +========================= + +When a function or method with multiple overloads is called from Python, +pybind11 determines which overload to call in two passes. The first pass +attempts to call each overload without allowing argument conversion (as if +every argument had been specified as ``py::arg().noconvert()`` as described +above). + +If no overload succeeds in the no-conversion first pass, a second pass is +attempted in which argument conversion is allowed (except where prohibited via +an explicit ``py::arg().noconvert()`` attribute in the function definition). + +If the second pass also fails a ``TypeError`` is raised. + +Within each pass, overloads are tried in the order they were registered with +pybind11. + +What this means in practice is that pybind11 will prefer any overload that does +not require conversion of arguments to an overload that does, but otherwise prefers +earlier-defined overloads to later-defined ones. + +.. note:: + + pybind11 does *not* further prioritize based on the number/pattern of + overloaded arguments. That is, pybind11 does not prioritize a function + requiring one conversion over one requiring three, but only prioritizes + overloads requiring no conversion at all to overloads that require + conversion of at least one argument. diff --git a/diffvg/pybind11/docs/advanced/misc.rst b/diffvg/pybind11/docs/advanced/misc.rst new file mode 100644 index 0000000000000000000000000000000000000000..0a73dae7e7f675736b0b0aea5aa740154988bf47 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/misc.rst @@ -0,0 +1,337 @@ +Miscellaneous +############# + +.. _macro_notes: + +General notes regarding convenience macros +========================================== + +pybind11 provides a few convenience macros such as +:func:`PYBIND11_DECLARE_HOLDER_TYPE` and ``PYBIND11_OVERLOAD_*``. Since these +are "just" macros that are evaluated in the preprocessor (which has no concept +of types), they *will* get confused by commas in a template argument; for +example, consider: + +.. code-block:: cpp + + PYBIND11_OVERLOAD(MyReturnType, Class, func) + +The limitation of the C preprocessor interprets this as five arguments (with new +arguments beginning after each comma) rather than three. To get around this, +there are two alternatives: you can use a type alias, or you can wrap the type +using the ``PYBIND11_TYPE`` macro: + +.. code-block:: cpp + + // Version 1: using a type alias + using ReturnType = MyReturnType; + using ClassType = Class; + PYBIND11_OVERLOAD(ReturnType, ClassType, func); + + // Version 2: using the PYBIND11_TYPE macro: + PYBIND11_OVERLOAD(PYBIND11_TYPE(MyReturnType), + PYBIND11_TYPE(Class), func) + +The ``PYBIND11_MAKE_OPAQUE`` macro does *not* require the above workarounds. + +.. _gil: + +Global Interpreter Lock (GIL) +============================= + +When calling a C++ function from Python, the GIL is always held. +The classes :class:`gil_scoped_release` and :class:`gil_scoped_acquire` can be +used to acquire and release the global interpreter lock in the body of a C++ +function call. In this way, long-running C++ code can be parallelized using +multiple Python threads. Taking :ref:`overriding_virtuals` as an example, this +could be realized as follows (important changes highlighted): + +.. code-block:: cpp + :emphasize-lines: 8,9,31,32 + + class PyAnimal : public Animal { + public: + /* Inherit the constructors */ + using Animal::Animal; + + /* Trampoline (need one for each virtual function) */ + std::string go(int n_times) { + /* Acquire GIL before calling Python code */ + py::gil_scoped_acquire acquire; + + PYBIND11_OVERLOAD_PURE( + std::string, /* Return type */ + Animal, /* Parent class */ + go, /* Name of function */ + n_times /* Argument(s) */ + ); + } + }; + + PYBIND11_MODULE(example, m) { + py::class_ animal(m, "Animal"); + animal + .def(py::init<>()) + .def("go", &Animal::go); + + py::class_(m, "Dog", animal) + .def(py::init<>()); + + m.def("call_go", [](Animal *animal) -> std::string { + /* Release GIL before calling into (potentially long-running) C++ code */ + py::gil_scoped_release release; + return call_go(animal); + }); + } + +The ``call_go`` wrapper can also be simplified using the `call_guard` policy +(see :ref:`call_policies`) which yields the same result: + +.. code-block:: cpp + + m.def("call_go", &call_go, py::call_guard()); + + +Binding sequence data types, iterators, the slicing protocol, etc. +================================================================== + +Please refer to the supplemental example for details. + +.. seealso:: + + The file :file:`tests/test_sequences_and_iterators.cpp` contains a + complete example that shows how to bind a sequence data type, including + length queries (``__len__``), iterators (``__iter__``), the slicing + protocol and other kinds of useful operations. + + +Partitioning code over multiple extension modules +================================================= + +It's straightforward to split binding code over multiple extension modules, +while referencing types that are declared elsewhere. Everything "just" works +without any special precautions. One exception to this rule occurs when +extending a type declared in another extension module. Recall the basic example +from Section :ref:`inheritance`. + +.. code-block:: cpp + + py::class_ pet(m, "Pet"); + pet.def(py::init()) + .def_readwrite("name", &Pet::name); + + py::class_(m, "Dog", pet /* <- specify parent */) + .def(py::init()) + .def("bark", &Dog::bark); + +Suppose now that ``Pet`` bindings are defined in a module named ``basic``, +whereas the ``Dog`` bindings are defined somewhere else. The challenge is of +course that the variable ``pet`` is not available anymore though it is needed +to indicate the inheritance relationship to the constructor of ``class_``. +However, it can be acquired as follows: + +.. code-block:: cpp + + py::object pet = (py::object) py::module::import("basic").attr("Pet"); + + py::class_(m, "Dog", pet) + .def(py::init()) + .def("bark", &Dog::bark); + +Alternatively, you can specify the base class as a template parameter option to +``class_``, which performs an automated lookup of the corresponding Python +type. Like the above code, however, this also requires invoking the ``import`` +function once to ensure that the pybind11 binding code of the module ``basic`` +has been executed: + +.. code-block:: cpp + + py::module::import("basic"); + + py::class_(m, "Dog") + .def(py::init()) + .def("bark", &Dog::bark); + +Naturally, both methods will fail when there are cyclic dependencies. + +Note that pybind11 code compiled with hidden-by-default symbol visibility (e.g. +via the command line flag ``-fvisibility=hidden`` on GCC/Clang), which is +required for proper pybind11 functionality, can interfere with the ability to +access types defined in another extension module. Working around this requires +manually exporting types that are accessed by multiple extension modules; +pybind11 provides a macro to do just this: + +.. code-block:: cpp + + class PYBIND11_EXPORT Dog : public Animal { + ... + }; + +Note also that it is possible (although would rarely be required) to share arbitrary +C++ objects between extension modules at runtime. Internal library data is shared +between modules using capsule machinery [#f6]_ which can be also utilized for +storing, modifying and accessing user-defined data. Note that an extension module +will "see" other extensions' data if and only if they were built with the same +pybind11 version. Consider the following example: + +.. code-block:: cpp + + auto data = (MyData *) py::get_shared_data("mydata"); + if (!data) + data = (MyData *) py::set_shared_data("mydata", new MyData(42)); + +If the above snippet was used in several separately compiled extension modules, +the first one to be imported would create a ``MyData`` instance and associate +a ``"mydata"`` key with a pointer to it. Extensions that are imported later +would be then able to access the data behind the same pointer. + +.. [#f6] https://docs.python.org/3/extending/extending.html#using-capsules + +Module Destructors +================== + +pybind11 does not provide an explicit mechanism to invoke cleanup code at +module destruction time. In rare cases where such functionality is required, it +is possible to emulate it using Python capsules or weak references with a +destruction callback. + +.. code-block:: cpp + + auto cleanup_callback = []() { + // perform cleanup here -- this function is called with the GIL held + }; + + m.add_object("_cleanup", py::capsule(cleanup_callback)); + +This approach has the potential downside that instances of classes exposed +within the module may still be alive when the cleanup callback is invoked +(whether this is acceptable will generally depend on the application). + +Alternatively, the capsule may also be stashed within a type object, which +ensures that it not called before all instances of that type have been +collected: + +.. code-block:: cpp + + auto cleanup_callback = []() { /* ... */ }; + m.attr("BaseClass").attr("_cleanup") = py::capsule(cleanup_callback); + +Both approaches also expose a potentially dangerous ``_cleanup`` attribute in +Python, which may be undesirable from an API standpoint (a premature explicit +call from Python might lead to undefined behavior). Yet another approach that +avoids this issue involves weak reference with a cleanup callback: + +.. code-block:: cpp + + // Register a callback function that is invoked when the BaseClass object is colelcted + py::cpp_function cleanup_callback( + [](py::handle weakref) { + // perform cleanup here -- this function is called with the GIL held + + weakref.dec_ref(); // release weak reference + } + ); + + // Create a weak reference with a cleanup callback and initially leak it + (void) py::weakref(m.attr("BaseClass"), cleanup_callback).release(); + +.. note:: + + PyPy (at least version 5.9) does not garbage collect objects when the + interpreter exits. An alternative approach (which also works on CPython) is to use + the :py:mod:`atexit` module [#f7]_, for example: + + .. code-block:: cpp + + auto atexit = py::module::import("atexit"); + atexit.attr("register")(py::cpp_function([]() { + // perform cleanup here -- this function is called with the GIL held + })); + + .. [#f7] https://docs.python.org/3/library/atexit.html + + +Generating documentation using Sphinx +===================================== + +Sphinx [#f4]_ has the ability to inspect the signatures and documentation +strings in pybind11-based extension modules to automatically generate beautiful +documentation in a variety formats. The python_example repository [#f5]_ contains a +simple example repository which uses this approach. + +There are two potential gotchas when using this approach: first, make sure that +the resulting strings do not contain any :kbd:`TAB` characters, which break the +docstring parsing routines. You may want to use C++11 raw string literals, +which are convenient for multi-line comments. Conveniently, any excess +indentation will be automatically be removed by Sphinx. However, for this to +work, it is important that all lines are indented consistently, i.e.: + +.. code-block:: cpp + + // ok + m.def("foo", &foo, R"mydelimiter( + The foo function + + Parameters + ---------- + )mydelimiter"); + + // *not ok* + m.def("foo", &foo, R"mydelimiter(The foo function + + Parameters + ---------- + )mydelimiter"); + +By default, pybind11 automatically generates and prepends a signature to the docstring of a function +registered with ``module::def()`` and ``class_::def()``. Sometimes this +behavior is not desirable, because you want to provide your own signature or remove +the docstring completely to exclude the function from the Sphinx documentation. +The class ``options`` allows you to selectively suppress auto-generated signatures: + +.. code-block:: cpp + + PYBIND11_MODULE(example, m) { + py::options options; + options.disable_function_signatures(); + + m.def("add", [](int a, int b) { return a + b; }, "A function which adds two numbers"); + } + +Note that changes to the settings affect only function bindings created during the +lifetime of the ``options`` instance. When it goes out of scope at the end of the module's init function, +the default settings are restored to prevent unwanted side effects. + +.. [#f4] http://www.sphinx-doc.org +.. [#f5] http://github.com/pybind/python_example + +.. _avoiding-cpp-types-in-docstrings: + +Avoiding C++ types in docstrings +================================ + +Docstrings are generated at the time of the declaration, e.g. when ``.def(...)`` is called. +At this point parameter and return types should be known to pybind11. +If a custom type is not exposed yet through a ``py::class_`` constructor or a custom type caster, +its C++ type name will be used instead to generate the signature in the docstring: + +.. code-block:: text + + | __init__(...) + | __init__(self: example.Foo, arg0: ns::Bar) -> None + ^^^^^^^ + + +This limitation can be circumvented by ensuring that C++ classes are registered with pybind11 +before they are used as a parameter or return type of a function: + +.. code-block:: cpp + + PYBIND11_MODULE(example, m) { + + auto pyFoo = py::class_(m, "Foo"); + auto pyBar = py::class_(m, "Bar"); + + pyFoo.def(py::init()); + pyBar.def(py::init()); + } diff --git a/diffvg/pybind11/docs/advanced/pycpp/index.rst b/diffvg/pybind11/docs/advanced/pycpp/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..6885bdcff1b56bbab5605873ccb1e0676864bb03 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/pycpp/index.rst @@ -0,0 +1,13 @@ +Python C++ interface +#################### + +pybind11 exposes Python types and functions using thin C++ wrappers, which +makes it possible to conveniently call Python code from C++ without resorting +to Python's C API. + +.. toctree:: + :maxdepth: 2 + + object + numpy + utilities diff --git a/diffvg/pybind11/docs/advanced/pycpp/numpy.rst b/diffvg/pybind11/docs/advanced/pycpp/numpy.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e5c6092c471f1006672e4060def4fd31786622b --- /dev/null +++ b/diffvg/pybind11/docs/advanced/pycpp/numpy.rst @@ -0,0 +1,436 @@ +.. _numpy: + +NumPy +##### + +Buffer protocol +=============== + +Python supports an extremely general and convenient approach for exchanging +data between plugin libraries. Types can expose a buffer view [#f2]_, which +provides fast direct access to the raw internal data representation. Suppose we +want to bind the following simplistic Matrix class: + +.. code-block:: cpp + + class Matrix { + public: + Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) { + m_data = new float[rows*cols]; + } + float *data() { return m_data; } + size_t rows() const { return m_rows; } + size_t cols() const { return m_cols; } + private: + size_t m_rows, m_cols; + float *m_data; + }; + +The following binding code exposes the ``Matrix`` contents as a buffer object, +making it possible to cast Matrices into NumPy arrays. It is even possible to +completely avoid copy operations with Python expressions like +``np.array(matrix_instance, copy = False)``. + +.. code-block:: cpp + + py::class_(m, "Matrix", py::buffer_protocol()) + .def_buffer([](Matrix &m) -> py::buffer_info { + return py::buffer_info( + m.data(), /* Pointer to buffer */ + sizeof(float), /* Size of one scalar */ + py::format_descriptor::format(), /* Python struct-style format descriptor */ + 2, /* Number of dimensions */ + { m.rows(), m.cols() }, /* Buffer dimensions */ + { sizeof(float) * m.cols(), /* Strides (in bytes) for each index */ + sizeof(float) } + ); + }); + +Supporting the buffer protocol in a new type involves specifying the special +``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the +``def_buffer()`` method with a lambda function that creates a +``py::buffer_info`` description record on demand describing a given matrix +instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol +specification. + +.. code-block:: cpp + + struct buffer_info { + void *ptr; + ssize_t itemsize; + std::string format; + ssize_t ndim; + std::vector shape; + std::vector strides; + }; + +To create a C++ function that can take a Python buffer object as an argument, +simply use the type ``py::buffer`` as one of its arguments. Buffers can exist +in a great variety of configurations, hence some safety checks are usually +necessary in the function body. Below, you can see a basic example on how to +define a custom constructor for the Eigen double precision matrix +(``Eigen::MatrixXd``) type, which supports initialization from compatible +buffer objects (e.g. a NumPy matrix). + +.. code-block:: cpp + + /* Bind MatrixXd (or some other Eigen type) to Python */ + typedef Eigen::MatrixXd Matrix; + + typedef Matrix::Scalar Scalar; + constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit; + + py::class_(m, "Matrix", py::buffer_protocol()) + .def(py::init([](py::buffer b) { + typedef Eigen::Stride Strides; + + /* Request a buffer descriptor from Python */ + py::buffer_info info = b.request(); + + /* Some sanity checks ... */ + if (info.format != py::format_descriptor::format()) + throw std::runtime_error("Incompatible format: expected a double array!"); + + if (info.ndim != 2) + throw std::runtime_error("Incompatible buffer dimension!"); + + auto strides = Strides( + info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar), + info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar)); + + auto map = Eigen::Map( + static_cast(info.ptr), info.shape[0], info.shape[1], strides); + + return Matrix(map); + })); + +For reference, the ``def_buffer()`` call for this Eigen data type should look +as follows: + +.. code-block:: cpp + + .def_buffer([](Matrix &m) -> py::buffer_info { + return py::buffer_info( + m.data(), /* Pointer to buffer */ + sizeof(Scalar), /* Size of one scalar */ + py::format_descriptor::format(), /* Python struct-style format descriptor */ + 2, /* Number of dimensions */ + { m.rows(), m.cols() }, /* Buffer dimensions */ + { sizeof(Scalar) * (rowMajor ? m.cols() : 1), + sizeof(Scalar) * (rowMajor ? 1 : m.rows()) } + /* Strides (in bytes) for each index */ + ); + }) + +For a much easier approach of binding Eigen types (although with some +limitations), refer to the section on :doc:`/advanced/cast/eigen`. + +.. seealso:: + + The file :file:`tests/test_buffers.cpp` contains a complete example + that demonstrates using the buffer protocol with pybind11 in more detail. + +.. [#f2] http://docs.python.org/3/c-api/buffer.html + +Arrays +====== + +By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can +restrict the function so that it only accepts NumPy arrays (rather than any +type of Python object satisfying the buffer protocol). + +In many situations, we want to define a function which only accepts a NumPy +array of a certain data type. This is possible via the ``py::array_t`` +template. For instance, the following function requires the argument to be a +NumPy array containing double precision values. + +.. code-block:: cpp + + void f(py::array_t array); + +When it is invoked with a different type (e.g. an integer or a list of +integers), the binding code will attempt to cast the input into a NumPy array +of the requested type. Note that this feature requires the +:file:`pybind11/numpy.h` header to be included. + +Data in NumPy arrays is not guaranteed to packed in a dense manner; +furthermore, entries can be separated by arbitrary column and row strides. +Sometimes, it can be useful to require a function to only accept dense arrays +using either the C (row-major) or Fortran (column-major) ordering. This can be +accomplished via a second template argument with values ``py::array::c_style`` +or ``py::array::f_style``. + +.. code-block:: cpp + + void f(py::array_t array); + +The ``py::array::forcecast`` argument is the default value of the second +template parameter, and it ensures that non-conforming arguments are converted +into an array satisfying the specified requirements instead of trying the next +function overload. + +Structured types +================ + +In order for ``py::array_t`` to work with structured (record) types, we first +need to register the memory layout of the type. This can be done via +``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which +expects the type followed by field names: + +.. code-block:: cpp + + struct A { + int x; + double y; + }; + + struct B { + int z; + A a; + }; + + // ... + PYBIND11_MODULE(test, m) { + // ... + + PYBIND11_NUMPY_DTYPE(A, x, y); + PYBIND11_NUMPY_DTYPE(B, z, a); + /* now both A and B can be used as template arguments to py::array_t */ + } + +The structure should consist of fundamental arithmetic types, ``std::complex``, +previously registered substructures, and arrays of any of the above. Both C++ +arrays and ``std::array`` are supported. While there is a static assertion to +prevent many types of unsupported structures, it is still the user's +responsibility to use only "plain" structures that can be safely manipulated as +raw memory without violating invariants. + +Vectorizing functions +===================== + +Suppose we want to bind a function with the following signature to Python so +that it can process arbitrary NumPy array arguments (vectors, matrices, general +N-D arrays) in addition to its normal arguments: + +.. code-block:: cpp + + double my_func(int x, float y, double z); + +After including the ``pybind11/numpy.h`` header, this is extremely simple: + +.. code-block:: cpp + + m.def("vectorized_func", py::vectorize(my_func)); + +Invoking the function like below causes 4 calls to be made to ``my_func`` with +each of the array elements. The significant advantage of this compared to +solutions like ``numpy.vectorize()`` is that the loop over the elements runs +entirely on the C++ side and can be crunched down into a tight, optimized loop +by the compiler. The result is returned as a NumPy array of type +``numpy.dtype.float64``. + +.. code-block:: pycon + + >>> x = np.array([[1, 3],[5, 7]]) + >>> y = np.array([[2, 4],[6, 8]]) + >>> z = 3 + >>> result = vectorized_func(x, y, z) + +The scalar argument ``z`` is transparently replicated 4 times. The input +arrays ``x`` and ``y`` are automatically converted into the right types (they +are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and +``numpy.dtype.float32``, respectively). + +.. note:: + + Only arithmetic, complex, and POD types passed by value or by ``const &`` + reference are vectorized; all other arguments are passed through as-is. + Functions taking rvalue reference arguments cannot be vectorized. + +In cases where the computation is too complicated to be reduced to +``vectorize``, it will be necessary to create and access the buffer contents +manually. The following snippet contains a complete example that shows how this +works (the code is somewhat contrived, since it could have been done more +simply using ``vectorize``). + +.. code-block:: cpp + + #include + #include + + namespace py = pybind11; + + py::array_t add_arrays(py::array_t input1, py::array_t input2) { + py::buffer_info buf1 = input1.request(), buf2 = input2.request(); + + if (buf1.ndim != 1 || buf2.ndim != 1) + throw std::runtime_error("Number of dimensions must be one"); + + if (buf1.size != buf2.size) + throw std::runtime_error("Input shapes must match"); + + /* No pointer is passed, so NumPy will allocate the buffer */ + auto result = py::array_t(buf1.size); + + py::buffer_info buf3 = result.request(); + + double *ptr1 = (double *) buf1.ptr, + *ptr2 = (double *) buf2.ptr, + *ptr3 = (double *) buf3.ptr; + + for (size_t idx = 0; idx < buf1.shape[0]; idx++) + ptr3[idx] = ptr1[idx] + ptr2[idx]; + + return result; + } + + PYBIND11_MODULE(test, m) { + m.def("add_arrays", &add_arrays, "Add two NumPy arrays"); + } + +.. seealso:: + + The file :file:`tests/test_numpy_vectorize.cpp` contains a complete + example that demonstrates using :func:`vectorize` in more detail. + +Direct access +============= + +For performance reasons, particularly when dealing with very large arrays, it +is often desirable to directly access array elements without internal checking +of dimensions and bounds on every access when indices are known to be already +valid. To avoid such checks, the ``array`` class and ``array_t`` template +class offer an unchecked proxy object that can be used for this unchecked +access through the ``unchecked`` and ``mutable_unchecked`` methods, +where ``N`` gives the required dimensionality of the array: + +.. code-block:: cpp + + m.def("sum_3d", [](py::array_t x) { + auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable + double sum = 0; + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + for (ssize_t k = 0; k < r.shape(2); k++) + sum += r(i, j, k); + return sum; + }); + m.def("increment_3d", [](py::array_t x) { + auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + for (ssize_t k = 0; k < r.shape(2); k++) + r(i, j, k) += 1.0; + }, py::arg().noconvert()); + +To obtain the proxy from an ``array`` object, you must specify both the data +type and number of dimensions as template arguments, such as ``auto r = +myarray.mutable_unchecked()``. + +If the number of dimensions is not known at compile time, you can omit the +dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or +``arr.unchecked()``. This will give you a proxy object that works in the +same way, but results in less optimizable code and thus a small efficiency +loss in tight loops. + +Note that the returned proxy object directly references the array's data, and +only reads its shape, strides, and writeable flag when constructed. You must +take care to ensure that the referenced array is not destroyed or reshaped for +the duration of the returned object, typically by limiting the scope of the +returned instance. + +The returned proxy object supports some of the same methods as ``py::array`` so +that it can be used as a drop-in replacement for some existing, index-checked +uses of ``py::array``: + +- ``r.ndim()`` returns the number of dimensions + +- ``r.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to + the ``const T`` or ``T`` data, respectively, at the given indices. The + latter is only available to proxies obtained via ``a.mutable_unchecked()``. + +- ``itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``. + +- ``ndim()`` returns the number of dimensions. + +- ``shape(n)`` returns the size of dimension ``n`` + +- ``size()`` returns the total number of elements (i.e. the product of the shapes). + +- ``nbytes()`` returns the number of bytes used by the referenced elements + (i.e. ``itemsize()`` times ``size()``). + +.. seealso:: + + The file :file:`tests/test_numpy_array.cpp` contains additional examples + demonstrating the use of this feature. + +Ellipsis +======== + +Python 3 provides a convenient ``...`` ellipsis notation that is often used to +slice multidimensional arrays. For instance, the following snippet extracts the +middle dimensions of a tensor with the first and last index set to zero. +In Python 2, the syntactic sugar ``...`` is not available, but the singleton +``Ellipsis`` (of type ``ellipsis``) can still be used directly. + +.. code-block:: python + + a = # a NumPy array + b = a[0, ..., 0] + +The function ``py::ellipsis()`` function can be used to perform the same +operation on the C++ side: + +.. code-block:: cpp + + py::array a = /* A NumPy array */; + py::array b = a[py::make_tuple(0, py::ellipsis(), 0)]; + +.. versionchanged:: 2.6 + ``py::ellipsis()`` is now also avaliable in Python 2. + +Memory view +=========== + +For a case when we simply want to provide a direct accessor to C/C++ buffer +without a concrete class object, we can return a ``memoryview`` object. Suppose +we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the +following: + +.. code-block:: cpp + + const uint8_t buffer[] = { + 0, 1, 2, 3, + 4, 5, 6, 7 + }; + m.def("get_memoryview2d", []() { + return py::memoryview::from_buffer( + buffer, // buffer pointer + { 2, 4 }, // shape (rows, cols) + { sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes + ); + }) + +This approach is meant for providing a ``memoryview`` for a C/C++ buffer not +managed by Python. The user is responsible for managing the lifetime of the +buffer. Using a ``memoryview`` created in this way after deleting the buffer in +C++ side results in undefined behavior. + +We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer: + +.. code-block:: cpp + + m.def("get_memoryview1d", []() { + return py::memoryview::from_memory( + buffer, // buffer pointer + sizeof(uint8_t) * 8 // buffer size + ); + }) + +.. note:: + + ``memoryview::from_memory`` is not available in Python 2. + +.. versionchanged:: 2.6 + ``memoryview::from_memory`` added. diff --git a/diffvg/pybind11/docs/advanced/pycpp/object.rst b/diffvg/pybind11/docs/advanced/pycpp/object.rst new file mode 100644 index 0000000000000000000000000000000000000000..07525d0dc78f5e2ff2f83cd57bdb616413104341 --- /dev/null +++ b/diffvg/pybind11/docs/advanced/pycpp/object.rst @@ -0,0 +1,180 @@ +Python types +############ + +.. _wrappers: + +Available wrappers +================== + +All major Python types are available as thin C++ wrapper classes. These +can also be used as function parameters -- see :ref:`python_objects_as_args`. + +Available types include :class:`handle`, :class:`object`, :class:`bool_`, +:class:`int_`, :class:`float_`, :class:`str`, :class:`bytes`, :class:`tuple`, +:class:`list`, :class:`dict`, :class:`slice`, :class:`none`, :class:`capsule`, +:class:`iterable`, :class:`iterator`, :class:`function`, :class:`buffer`, +:class:`array`, and :class:`array_t`. + +Casting back and forth +====================== + +In this kind of mixed code, it is often necessary to convert arbitrary C++ +types to Python, which can be done using :func:`py::cast`: + +.. code-block:: cpp + + MyClass *cls = ..; + py::object obj = py::cast(cls); + +The reverse direction uses the following syntax: + +.. code-block:: cpp + + py::object obj = ...; + MyClass *cls = obj.cast(); + +When conversion fails, both directions throw the exception :class:`cast_error`. + +.. _python_libs: + +Accessing Python libraries from C++ +=================================== + +It is also possible to import objects defined in the Python standard +library or available in the current Python environment (``sys.path``) and work +with these in C++. + +This example obtains a reference to the Python ``Decimal`` class. + +.. code-block:: cpp + + // Equivalent to "from decimal import Decimal" + py::object Decimal = py::module::import("decimal").attr("Decimal"); + +.. code-block:: cpp + + // Try to import scipy + py::object scipy = py::module::import("scipy"); + return scipy.attr("__version__"); + +.. _calling_python_functions: + +Calling Python functions +======================== + +It is also possible to call Python classes, functions and methods +via ``operator()``. + +.. code-block:: cpp + + // Construct a Python object of class Decimal + py::object pi = Decimal("3.14159"); + +.. code-block:: cpp + + // Use Python to make our directories + py::object os = py::module::import("os"); + py::object makedirs = os.attr("makedirs"); + makedirs("/tmp/path/to/somewhere"); + +One can convert the result obtained from Python to a pure C++ version +if a ``py::class_`` or type conversion is defined. + +.. code-block:: cpp + + py::function f = <...>; + py::object result_py = f(1234, "hello", some_instance); + MyClass &result = result_py.cast(); + +.. _calling_python_methods: + +Calling Python methods +======================== + +To call an object's method, one can again use ``.attr`` to obtain access to the +Python method. + +.. code-block:: cpp + + // Calculate e^Ο€ in decimal + py::object exp_pi = pi.attr("exp")(); + py::print(py::str(exp_pi)); + +In the example above ``pi.attr("exp")`` is a *bound method*: it will always call +the method for that same instance of the class. Alternately one can create an +*unbound method* via the Python class (instead of instance) and pass the ``self`` +object explicitly, followed by other arguments. + +.. code-block:: cpp + + py::object decimal_exp = Decimal.attr("exp"); + + // Compute the e^n for n=0..4 + for (int n = 0; n < 5; n++) { + py::print(decimal_exp(Decimal(n)); + } + +Keyword arguments +================= + +Keyword arguments are also supported. In Python, there is the usual call syntax: + +.. code-block:: python + + def f(number, say, to): + ... # function code + + f(1234, say="hello", to=some_instance) # keyword call in Python + +In C++, the same call can be made using: + +.. code-block:: cpp + + using namespace pybind11::literals; // to bring in the `_a` literal + f(1234, "say"_a="hello", "to"_a=some_instance); // keyword call in C++ + +Unpacking arguments +=================== + +Unpacking of ``*args`` and ``**kwargs`` is also possible and can be mixed with +other arguments: + +.. code-block:: cpp + + // * unpacking + py::tuple args = py::make_tuple(1234, "hello", some_instance); + f(*args); + + // ** unpacking + py::dict kwargs = py::dict("number"_a=1234, "say"_a="hello", "to"_a=some_instance); + f(**kwargs); + + // mixed keywords, * and ** unpacking + py::tuple args = py::make_tuple(1234); + py::dict kwargs = py::dict("to"_a=some_instance); + f(*args, "say"_a="hello", **kwargs); + +Generalized unpacking according to PEP448_ is also supported: + +.. code-block:: cpp + + py::dict kwargs1 = py::dict("number"_a=1234); + py::dict kwargs2 = py::dict("to"_a=some_instance); + f(**kwargs1, "say"_a="hello", **kwargs2); + +.. seealso:: + + The file :file:`tests/test_pytypes.cpp` contains a complete + example that demonstrates passing native Python types in more detail. The + file :file:`tests/test_callbacks.cpp` presents a few examples of calling + Python functions from C++, including keywords arguments and unpacking. + +.. _PEP448: https://www.python.org/dev/peps/pep-0448/ + +Handling exceptions +=================== + +Python exceptions from wrapper classes will be thrown as a ``py::error_already_set``. +See :ref:`Handling exceptions from Python in C++ +` for more information on handling exceptions +raised when calling C++ wrapper classes. diff --git a/diffvg/pybind11/docs/advanced/pycpp/utilities.rst b/diffvg/pybind11/docs/advanced/pycpp/utilities.rst new file mode 100644 index 0000000000000000000000000000000000000000..369e7c94dbd69f3ce7bb2d837a53ea2853a04efc --- /dev/null +++ b/diffvg/pybind11/docs/advanced/pycpp/utilities.rst @@ -0,0 +1,144 @@ +Utilities +######### + +Using Python's print function in C++ +==================================== + +The usual way to write output in C++ is using ``std::cout`` while in Python one +would use ``print``. Since these methods use different buffers, mixing them can +lead to output order issues. To resolve this, pybind11 modules can use the +:func:`py::print` function which writes to Python's ``sys.stdout`` for consistency. + +Python's ``print`` function is replicated in the C++ API including optional +keyword arguments ``sep``, ``end``, ``file``, ``flush``. Everything works as +expected in Python: + +.. code-block:: cpp + + py::print(1, 2.0, "three"); // 1 2.0 three + py::print(1, 2.0, "three", "sep"_a="-"); // 1-2.0-three + + auto args = py::make_tuple("unpacked", true); + py::print("->", *args, "end"_a="<-"); // -> unpacked True <- + +.. _ostream_redirect: + +Capturing standard output from ostream +====================================== + +Often, a library will use the streams ``std::cout`` and ``std::cerr`` to print, +but this does not play well with Python's standard ``sys.stdout`` and ``sys.stderr`` +redirection. Replacing a library's printing with `py::print ` may not +be feasible. This can be fixed using a guard around the library function that +redirects output to the corresponding Python streams: + +.. code-block:: cpp + + #include + + ... + + // Add a scoped redirect for your noisy code + m.def("noisy_func", []() { + py::scoped_ostream_redirect stream( + std::cout, // std::ostream& + py::module::import("sys").attr("stdout") // Python output + ); + call_noisy_func(); + }); + +This method respects flushes on the output streams and will flush if needed +when the scoped guard is destroyed. This allows the output to be redirected in +real time, such as to a Jupyter notebook. The two arguments, the C++ stream and +the Python output, are optional, and default to standard output if not given. An +extra type, `py::scoped_estream_redirect `, is identical +except for defaulting to ``std::cerr`` and ``sys.stderr``; this can be useful with +`py::call_guard`, which allows multiple items, but uses the default constructor: + +.. code-block:: py + + // Alternative: Call single function using call guard + m.def("noisy_func", &call_noisy_function, + py::call_guard()); + +The redirection can also be done in Python with the addition of a context +manager, using the `py::add_ostream_redirect() ` function: + +.. code-block:: cpp + + py::add_ostream_redirect(m, "ostream_redirect"); + +The name in Python defaults to ``ostream_redirect`` if no name is passed. This +creates the following context manager in Python: + +.. code-block:: python + + with ostream_redirect(stdout=True, stderr=True): + noisy_function() + +It defaults to redirecting both streams, though you can use the keyword +arguments to disable one of the streams if needed. + +.. note:: + + The above methods will not redirect C-level output to file descriptors, such + as ``fprintf``. For those cases, you'll need to redirect the file + descriptors either directly in C or with Python's ``os.dup2`` function + in an operating-system dependent way. + +.. _eval: + +Evaluating Python expressions from strings and files +==================================================== + +pybind11 provides the `eval`, `exec` and `eval_file` functions to evaluate +Python expressions and statements. The following example illustrates how they +can be used. + +.. code-block:: cpp + + // At beginning of file + #include + + ... + + // Evaluate in scope of main module + py::object scope = py::module::import("__main__").attr("__dict__"); + + // Evaluate an isolated expression + int result = py::eval("my_variable + 10", scope).cast(); + + // Evaluate a sequence of statements + py::exec( + "print('Hello')\n" + "print('world!');", + scope); + + // Evaluate the statements in an separate Python file on disk + py::eval_file("script.py", scope); + +C++11 raw string literals are also supported and quite handy for this purpose. +The only requirement is that the first statement must be on a new line following +the raw string delimiter ``R"(``, ensuring all lines have common leading indent: + +.. code-block:: cpp + + py::exec(R"( + x = get_answer() + if x == 42: + print('Hello World!') + else: + print('Bye!') + )", scope + ); + +.. note:: + + `eval` and `eval_file` accept a template parameter that describes how the + string/file should be interpreted. Possible choices include ``eval_expr`` + (isolated expression), ``eval_single_statement`` (a single statement, return + value is always ``none``), and ``eval_statements`` (sequence of statements, + return value is always ``none``). `eval` defaults to ``eval_expr``, + `eval_file` defaults to ``eval_statements`` and `exec` is just a shortcut + for ``eval``. diff --git a/diffvg/pybind11/docs/advanced/smart_ptrs.rst b/diffvg/pybind11/docs/advanced/smart_ptrs.rst new file mode 100644 index 0000000000000000000000000000000000000000..da57748ca585a92000198a6c607a087704b1f07c --- /dev/null +++ b/diffvg/pybind11/docs/advanced/smart_ptrs.rst @@ -0,0 +1,173 @@ +Smart pointers +############## + +std::unique_ptr +=============== + +Given a class ``Example`` with Python bindings, it's possible to return +instances wrapped in C++11 unique pointers, like so + +.. code-block:: cpp + + std::unique_ptr create_example() { return std::unique_ptr(new Example()); } + +.. code-block:: cpp + + m.def("create_example", &create_example); + +In other words, there is nothing special that needs to be done. While returning +unique pointers in this way is allowed, it is *illegal* to use them as function +arguments. For instance, the following function signature cannot be processed +by pybind11. + +.. code-block:: cpp + + void do_something_with_example(std::unique_ptr ex) { ... } + +The above signature would imply that Python needs to give up ownership of an +object that is passed to this function, which is generally not possible (for +instance, the object might be referenced elsewhere). + +std::shared_ptr +=============== + +The binding generator for classes, :class:`class_`, can be passed a template +type that denotes a special *holder* type that is used to manage references to +the object. If no such holder type template argument is given, the default for +a type named ``Type`` is ``std::unique_ptr``, which means that the object +is deallocated when Python's reference count goes to zero. + +It is possible to switch to other types of reference counting wrappers or smart +pointers, which is useful in codebases that rely on them. For instance, the +following snippet causes ``std::shared_ptr`` to be used instead. + +.. code-block:: cpp + + py::class_ /* <- holder type */> obj(m, "Example"); + +Note that any particular class can only be associated with a single holder type. + +One potential stumbling block when using holder types is that they need to be +applied consistently. Can you guess what's broken about the following binding +code? + +.. code-block:: cpp + + class Child { }; + + class Parent { + public: + Parent() : child(std::make_shared()) { } + Child *get_child() { return child.get(); } /* Hint: ** DON'T DO THIS ** */ + private: + std::shared_ptr child; + }; + + PYBIND11_MODULE(example, m) { + py::class_>(m, "Child"); + + py::class_>(m, "Parent") + .def(py::init<>()) + .def("get_child", &Parent::get_child); + } + +The following Python code will cause undefined behavior (and likely a +segmentation fault). + +.. code-block:: python + + from example import Parent + print(Parent().get_child()) + +The problem is that ``Parent::get_child()`` returns a pointer to an instance of +``Child``, but the fact that this instance is already managed by +``std::shared_ptr<...>`` is lost when passing raw pointers. In this case, +pybind11 will create a second independent ``std::shared_ptr<...>`` that also +claims ownership of the pointer. In the end, the object will be freed **twice** +since these shared pointers have no way of knowing about each other. + +There are two ways to resolve this issue: + +1. For types that are managed by a smart pointer class, never use raw pointers + in function arguments or return values. In other words: always consistently + wrap pointers into their designated holder types (such as + ``std::shared_ptr<...>``). In this case, the signature of ``get_child()`` + should be modified as follows: + +.. code-block:: cpp + + std::shared_ptr get_child() { return child; } + +2. Adjust the definition of ``Child`` by specifying + ``std::enable_shared_from_this`` (see cppreference_ for details) as a + base class. This adds a small bit of information to ``Child`` that allows + pybind11 to realize that there is already an existing + ``std::shared_ptr<...>`` and communicate with it. In this case, the + declaration of ``Child`` should look as follows: + +.. _cppreference: http://en.cppreference.com/w/cpp/memory/enable_shared_from_this + +.. code-block:: cpp + + class Child : public std::enable_shared_from_this { }; + +.. _smart_pointers: + +Custom smart pointers +===================== + +pybind11 supports ``std::unique_ptr`` and ``std::shared_ptr`` right out of the +box. For any other custom smart pointer, transparent conversions can be enabled +using a macro invocation similar to the following. It must be declared at the +top namespace level before any binding code: + +.. code-block:: cpp + + PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr); + +The first argument of :func:`PYBIND11_DECLARE_HOLDER_TYPE` should be a +placeholder name that is used as a template parameter of the second argument. +Thus, feel free to use any identifier, but use it consistently on both sides; +also, don't use the name of a type that already exists in your codebase. + +The macro also accepts a third optional boolean parameter that is set to false +by default. Specify + +.. code-block:: cpp + + PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr, true); + +if ``SmartPtr`` can always be initialized from a ``T*`` pointer without the +risk of inconsistencies (such as multiple independent ``SmartPtr`` instances +believing that they are the sole owner of the ``T*`` pointer). A common +situation where ``true`` should be passed is when the ``T`` instances use +*intrusive* reference counting. + +Please take a look at the :ref:`macro_notes` before using this feature. + +By default, pybind11 assumes that your custom smart pointer has a standard +interface, i.e. provides a ``.get()`` member function to access the underlying +raw pointer. If this is not the case, pybind11's ``holder_helper`` must be +specialized: + +.. code-block:: cpp + + // Always needed for custom holder types + PYBIND11_DECLARE_HOLDER_TYPE(T, SmartPtr); + + // Only needed if the type's `.get()` goes by another name + namespace pybind11 { namespace detail { + template + struct holder_helper> { // <-- specialization + static const T *get(const SmartPtr &p) { return p.getPointer(); } + }; + }} + +The above specialization informs pybind11 that the custom ``SmartPtr`` class +provides ``.get()`` functionality via ``.getPointer()``. + +.. seealso:: + + The file :file:`tests/test_smart_ptr.cpp` contains a complete example + that demonstrates how to work with custom reference-counting holder types + in more detail. diff --git a/diffvg/pybind11/docs/basics.rst b/diffvg/pybind11/docs/basics.rst new file mode 100644 index 0000000000000000000000000000000000000000..6bb5f98222f8ee7986b386e2af3ee4b1cb98940d --- /dev/null +++ b/diffvg/pybind11/docs/basics.rst @@ -0,0 +1,301 @@ +.. _basics: + +First steps +########### + +This sections demonstrates the basic features of pybind11. Before getting +started, make sure that development environment is set up to compile the +included set of test cases. + + +Compiling the test cases +======================== + +Linux/MacOS +----------- + +On Linux you'll need to install the **python-dev** or **python3-dev** packages as +well as **cmake**. On Mac OS, the included python version works out of the box, +but **cmake** must still be installed. + +After installing the prerequisites, run + +.. code-block:: bash + + mkdir build + cd build + cmake .. + make check -j 4 + +The last line will both compile and run the tests. + +Windows +------- + +On Windows, only **Visual Studio 2015** and newer are supported since pybind11 relies +on various C++11 language features that break older versions of Visual Studio. + +.. Note:: + + To use the C++17 in Visual Studio 2017 (MSVC 14.1), pybind11 requires the flag + ``/permissive-`` to be passed to the compiler `to enforce standard conformance`_. When + building with Visual Studio 2019, this is not strictly necessary, but still adviced. + +.. _`to enforce standard conformance`: https://docs.microsoft.com/en-us/cpp/build/reference/permissive-standards-conformance?view=vs-2017 + +To compile and run the tests: + +.. code-block:: batch + + mkdir build + cd build + cmake .. + cmake --build . --config Release --target check + +This will create a Visual Studio project, compile and run the target, all from the +command line. + +.. Note:: + + If all tests fail, make sure that the Python binary and the testcases are compiled + for the same processor type and bitness (i.e. either **i386** or **x86_64**). You + can specify **x86_64** as the target architecture for the generated Visual Studio + project using ``cmake -A x64 ..``. + +.. seealso:: + + Advanced users who are already familiar with Boost.Python may want to skip + the tutorial and look at the test cases in the :file:`tests` directory, + which exercise all features of pybind11. + +Header and namespace conventions +================================ + +For brevity, all code examples assume that the following two lines are present: + +.. code-block:: cpp + + #include + + namespace py = pybind11; + +Some features may require additional headers, but those will be specified as needed. + +.. _simple_example: + +Creating bindings for a simple function +======================================= + +Let's start by creating Python bindings for an extremely simple function, which +adds two numbers and returns their result: + +.. code-block:: cpp + + int add(int i, int j) { + return i + j; + } + +For simplicity [#f1]_, we'll put both this function and the binding code into +a file named :file:`example.cpp` with the following contents: + +.. code-block:: cpp + + #include + + int add(int i, int j) { + return i + j; + } + + PYBIND11_MODULE(example, m) { + m.doc() = "pybind11 example plugin"; // optional module docstring + + m.def("add", &add, "A function which adds two numbers"); + } + +.. [#f1] In practice, implementation and binding code will generally be located + in separate files. + +The :func:`PYBIND11_MODULE` macro creates a function that will be called when an +``import`` statement is issued from within Python. The module name (``example``) +is given as the first macro argument (it should not be in quotes). The second +argument (``m``) defines a variable of type :class:`py::module ` which +is the main interface for creating bindings. The method :func:`module::def` +generates binding code that exposes the ``add()`` function to Python. + +.. note:: + + Notice how little code was needed to expose our function to Python: all + details regarding the function's parameters and return value were + automatically inferred using template metaprogramming. This overall + approach and the used syntax are borrowed from Boost.Python, though the + underlying implementation is very different. + +pybind11 is a header-only library, hence it is not necessary to link against +any special libraries and there are no intermediate (magic) translation steps. +On Linux, the above example can be compiled using the following command: + +.. code-block:: bash + + $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix` + +For more details on the required compiler flags on Linux and MacOS, see +:ref:`building_manually`. For complete cross-platform compilation instructions, +refer to the :ref:`compiling` page. + +The `python_example`_ and `cmake_example`_ repositories are also a good place +to start. They are both complete project examples with cross-platform build +systems. The only difference between the two is that `python_example`_ uses +Python's ``setuptools`` to build the module, while `cmake_example`_ uses CMake +(which may be preferable for existing C++ projects). + +.. _python_example: https://github.com/pybind/python_example +.. _cmake_example: https://github.com/pybind/cmake_example + +Building the above C++ code will produce a binary module file that can be +imported to Python. Assuming that the compiled module is located in the +current directory, the following interactive Python session shows how to +load and execute the example: + +.. code-block:: pycon + + $ python + Python 2.7.10 (default, Aug 22 2015, 20:33:39) + [GCC 4.2.1 Compatible Apple LLVM 7.0.0 (clang-700.0.59.1)] on darwin + Type "help", "copyright", "credits" or "license" for more information. + >>> import example + >>> example.add(1, 2) + 3L + >>> + +.. _keyword_args: + +Keyword arguments +================= + +With a simple code modification, it is possible to inform Python about the +names of the arguments ("i" and "j" in this case). + +.. code-block:: cpp + + m.def("add", &add, "A function which adds two numbers", + py::arg("i"), py::arg("j")); + +:class:`arg` is one of several special tag classes which can be used to pass +metadata into :func:`module::def`. With this modified binding code, we can now +call the function using keyword arguments, which is a more readable alternative +particularly for functions taking many parameters: + +.. code-block:: pycon + + >>> import example + >>> example.add(i=1, j=2) + 3L + +The keyword names also appear in the function signatures within the documentation. + +.. code-block:: pycon + + >>> help(example) + + .... + + FUNCTIONS + add(...) + Signature : (i: int, j: int) -> int + + A function which adds two numbers + +A shorter notation for named arguments is also available: + +.. code-block:: cpp + + // regular notation + m.def("add1", &add, py::arg("i"), py::arg("j")); + // shorthand + using namespace pybind11::literals; + m.def("add2", &add, "i"_a, "j"_a); + +The :var:`_a` suffix forms a C++11 literal which is equivalent to :class:`arg`. +Note that the literal operator must first be made visible with the directive +``using namespace pybind11::literals``. This does not bring in anything else +from the ``pybind11`` namespace except for literals. + +.. _default_args: + +Default arguments +================= + +Suppose now that the function to be bound has default arguments, e.g.: + +.. code-block:: cpp + + int add(int i = 1, int j = 2) { + return i + j; + } + +Unfortunately, pybind11 cannot automatically extract these parameters, since they +are not part of the function's type information. However, they are simple to specify +using an extension of :class:`arg`: + +.. code-block:: cpp + + m.def("add", &add, "A function which adds two numbers", + py::arg("i") = 1, py::arg("j") = 2); + +The default values also appear within the documentation. + +.. code-block:: pycon + + >>> help(example) + + .... + + FUNCTIONS + add(...) + Signature : (i: int = 1, j: int = 2) -> int + + A function which adds two numbers + +The shorthand notation is also available for default arguments: + +.. code-block:: cpp + + // regular notation + m.def("add1", &add, py::arg("i") = 1, py::arg("j") = 2); + // shorthand + m.def("add2", &add, "i"_a=1, "j"_a=2); + +Exporting variables +=================== + +To expose a value from C++, use the ``attr`` function to register it in a +module as shown below. Built-in types and general objects (more on that later) +are automatically converted when assigned as attributes, and can be explicitly +converted using the function ``py::cast``. + +.. code-block:: cpp + + PYBIND11_MODULE(example, m) { + m.attr("the_answer") = 42; + py::object world = py::cast("World"); + m.attr("what") = world; + } + +These are then accessible from Python: + +.. code-block:: pycon + + >>> import example + >>> example.the_answer + 42 + >>> example.what + 'World' + +.. _supported_types: + +Supported data types +==================== + +A large number of data types are supported out of the box and can be used +seamlessly as functions arguments, return values or with ``py::cast`` in general. +For a full overview, see the :doc:`advanced/cast/index` section. diff --git a/diffvg/pybind11/docs/benchmark.py b/diffvg/pybind11/docs/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..023477212ee3ca34353067b196e9959144444f33 --- /dev/null +++ b/diffvg/pybind11/docs/benchmark.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +import random +import os +import time +import datetime as dt + +nfns = 4 # Functions per class +nargs = 4 # Arguments per function + + +def generate_dummy_code_pybind11(nclasses=10): + decl = "" + bindings = "" + + for cl in range(nclasses): + decl += "class cl%03i;\n" % cl + decl += '\n' + + for cl in range(nclasses): + decl += "class cl%03i {\n" % cl + decl += "public:\n" + bindings += ' py::class_(m, "cl%03i")\n' % (cl, cl) + for fn in range(nfns): + ret = random.randint(0, nclasses - 1) + params = [random.randint(0, nclasses - 1) for i in range(nargs)] + decl += " cl%03i *fn_%03i(" % (ret, fn) + decl += ", ".join("cl%03i *" % p for p in params) + decl += ");\n" + bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \ + (fn, cl, fn) + decl += "};\n\n" + bindings += ' ;\n' + + result = "#include \n\n" + result += "namespace py = pybind11;\n\n" + result += decl + '\n' + result += "PYBIND11_MODULE(example, m) {\n" + result += bindings + result += "}" + return result + + +def generate_dummy_code_boost(nclasses=10): + decl = "" + bindings = "" + + for cl in range(nclasses): + decl += "class cl%03i;\n" % cl + decl += '\n' + + for cl in range(nclasses): + decl += "class cl%03i {\n" % cl + decl += "public:\n" + bindings += ' py::class_("cl%03i")\n' % (cl, cl) + for fn in range(nfns): + ret = random.randint(0, nclasses - 1) + params = [random.randint(0, nclasses - 1) for i in range(nargs)] + decl += " cl%03i *fn_%03i(" % (ret, fn) + decl += ", ".join("cl%03i *" % p for p in params) + decl += ");\n" + bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy())\n' % \ + (fn, cl, fn) + decl += "};\n\n" + bindings += ' ;\n' + + result = "#include \n\n" + result += "namespace py = boost::python;\n\n" + result += decl + '\n' + result += "BOOST_PYTHON_MODULE(example) {\n" + result += bindings + result += "}" + return result + + +for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]: + print ("{") + for i in range(0, 10): + nclasses = 2 ** i + with open("test.cpp", "w") as f: + f.write(codegen(nclasses)) + n1 = dt.datetime.now() + os.system("g++ -Os -shared -rdynamic -undefined dynamic_lookup " + "-fvisibility=hidden -std=c++14 test.cpp -I include " + "-I /System/Library/Frameworks/Python.framework/Headers -o test.so") + n2 = dt.datetime.now() + elapsed = (n2 - n1).total_seconds() + size = os.stat('test.so').st_size + print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size)) + print ("}") diff --git a/diffvg/pybind11/docs/benchmark.rst b/diffvg/pybind11/docs/benchmark.rst new file mode 100644 index 0000000000000000000000000000000000000000..02c2ccde7dc00db1e57b73a7523521f9c39a5639 --- /dev/null +++ b/diffvg/pybind11/docs/benchmark.rst @@ -0,0 +1,95 @@ +Benchmark +========= + +The following is the result of a synthetic benchmark comparing both compilation +time and module size of pybind11 against Boost.Python. A detailed report about a +Boost.Python to pybind11 conversion of a real project is available here: [#f1]_. + +.. [#f1] http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf + +Setup +----- + +A python script (see the ``docs/benchmark.py`` file) was used to generate a set +of files with dummy classes whose count increases for each successive benchmark +(between 1 and 2048 classes in powers of two). Each class has four methods with +a randomly generated signature with a return value and four arguments. (There +was no particular reason for this setup other than the desire to generate many +unique function signatures whose count could be controlled in a simple way.) + +Here is an example of the binding code for one class: + +.. code-block:: cpp + + ... + class cl034 { + public: + cl279 *fn_000(cl084 *, cl057 *, cl065 *, cl042 *); + cl025 *fn_001(cl098 *, cl262 *, cl414 *, cl121 *); + cl085 *fn_002(cl445 *, cl297 *, cl145 *, cl421 *); + cl470 *fn_003(cl200 *, cl323 *, cl332 *, cl492 *); + }; + ... + + PYBIND11_MODULE(example, m) { + ... + py::class_(m, "cl034") + .def("fn_000", &cl034::fn_000) + .def("fn_001", &cl034::fn_001) + .def("fn_002", &cl034::fn_002) + .def("fn_003", &cl034::fn_003) + ... + } + +The Boost.Python version looks almost identical except that a return value +policy had to be specified as an argument to ``def()``. For both libraries, +compilation was done with + +.. code-block:: bash + + Apple LLVM version 7.0.2 (clang-700.1.81) + +and the following compilation flags + +.. code-block:: bash + + g++ -Os -shared -rdynamic -undefined dynamic_lookup -fvisibility=hidden -std=c++14 + +Compilation time +---------------- + +The following log-log plot shows how the compilation time grows for an +increasing number of class and function declarations. pybind11 includes many +fewer headers, which initially leads to shorter compilation times, but the +performance is ultimately fairly similar (pybind11 is 19.8 seconds faster for +the largest largest file with 2048 classes and a total of 8192 methods -- a +modest **1.2x** speedup relative to Boost.Python, which required 116.35 +seconds). + +.. only:: not latex + + .. image:: pybind11_vs_boost_python1.svg + +.. only:: latex + + .. image:: pybind11_vs_boost_python1.png + +Module size +----------- + +Differences between the two libraries become much more pronounced when +considering the file size of the generated Python plugin: for the largest file, +the binary generated by Boost.Python required 16.8 MiB, which was **2.17 +times** / **9.1 megabytes** larger than the output generated by pybind11. For +very small inputs, Boost.Python has an edge in the plot below -- however, note +that it stores many definitions in an external library, whose size was not +included here, hence the comparison is slightly shifted in Boost.Python's +favor. + +.. only:: not latex + + .. image:: pybind11_vs_boost_python2.svg + +.. only:: latex + + .. image:: pybind11_vs_boost_python2.png diff --git a/diffvg/pybind11/docs/changelog.rst b/diffvg/pybind11/docs/changelog.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e15621d2811054dfcc0e9ffcb5bd5ad38e40dae --- /dev/null +++ b/diffvg/pybind11/docs/changelog.rst @@ -0,0 +1,1361 @@ +.. _changelog: + +Changelog +######### + +Starting with version 1.8.0, pybind11 releases use a `semantic versioning +`_ policy. + +v2.6.0 (IN PROGRESS) +-------------------- + +See :ref:`upgrade-guide-2.6` for help upgrading to the new version. + +* Keyword only argument supported in Python 2 or 3 with ``py::kwonly()``. + `#2100 `_ + +* Perfect forwarding support for methods. + `#2048 `_ + +* Added ``py::error_already_set::discard_as_unraisable()``. + `#2372 `_ + +* ``py::hash`` is now public. + `#2217 `_ + +* ``py::is_final()`` class modifier to block subclassing (CPython only). + `#2151 `_ + +* ``py::memoryview`` update and documentation. + `#2223 `_ + +* Minimum CMake required increased to 3.4. + `#2338 `_ and + `#2370 `_ + + * Full integration with CMake’s C++ standard system replaces + ``PYBIND11_CPP_STANDARD``. + + * Generated config file is now portable to different Python/compiler/CMake + versions. + + * Virtual environments prioritized if ``PYTHON_EXECUTABLE`` is not set + (``venv``, ``virtualenv``, and ``conda``) (similar to the new FindPython + mode). + + * Other CMake features now natively supported, like + ``CMAKE_INTERPROCEDURAL_OPTIMIZATION``, ``set(CMAKE_CXX_VISIBILITY_PRESET + hidden)``. + +* Optional :ref:`find-python-mode` and :ref:`nopython-mode` with CMake. + `#2370 `_ + +* Uninstall target added. + `#2265 `_ and + `#2346 `_ + +Smaller or developer focused features: + +* Error now thrown when ``__init__`` is forgotten on subclasses. + `#2152 `_ + +* If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to + ``None``. + `#2291 `_ + +* ``py::ellipsis`` now also works on Python 2 + `#2360 `_ + +* Added missing signature for ``py::array`` + `#2363 `_ + +* Bugfixes related to more extensive testing + `#2321 `_ + +* Pointer to ``std::tuple`` & ``std::pair`` supported in cast. + `#2334 `_ + +* Small fixes in NumPy support. ``py::array`` now uses ``py::ssize_t`` as first + argument type. + `#2293 `_ + +* PyPy fixes, including support for PyPy3 and PyPy 7. + `#2146 `_ + +* CPython 3.9 fixes. + `#2253 `_ + +* Debug Python interpreter support. + `#2025 `_ + + + +v2.5.0 (Mar 31, 2020) +----------------------------------------------------- + +* Use C++17 fold expressions in type casters, if available. This can + improve performance during overload resolution when functions have + multiple arguments. + `#2043 `_. + +* Changed include directory resolution in ``pybind11/__init__.py`` + and installation in ``setup.py``. This fixes a number of open issues + where pybind11 headers could not be found in certain environments. + `#1995 `_. + +* C++20 ``char8_t`` and ``u8string`` support. `#2026 + `_. + +* CMake: search for Python 3.9. `bb9c91 + `_. + +* Fixes for MSYS-based build environments. + `#2087 `_, + `#2053 `_. + +* STL bindings for ``std::vector<...>::clear``. `#2074 + `_. + +* Read-only flag for ``py::buffer``. `#1466 + `_. + +* Exception handling during module initialization. + `bf2b031 `_. + +* Support linking against a CPython debug build. + `#2025 `_. + +* Fixed issues involving the availability and use of aligned ``new`` and + ``delete``. `#1988 `_, + `759221 `_. + +* Fixed a resource leak upon interpreter shutdown. + `#2020 `_. + +* Fixed error handling in the boolean caster. + `#1976 `_. + +v2.4.3 (Oct 15, 2019) +----------------------------------------------------- + +* Adapt pybind11 to a C API convention change in Python 3.8. `#1950 + `_. + +v2.4.2 (Sep 21, 2019) +----------------------------------------------------- + +* Replaced usage of a C++14 only construct. `#1929 + `_. + +* Made an ifdef future-proof for Python >= 4. `f3109d + `_. + +v2.4.1 (Sep 20, 2019) +----------------------------------------------------- + +* Fixed a problem involving implicit conversion from enumerations to integers + on Python 3.8. `#1780 `_. + +v2.4.0 (Sep 19, 2019) +----------------------------------------------------- + +* Try harder to keep pybind11-internal data structures separate when there + are potential ABI incompatibilities. Fixes crashes that occurred when loading + multiple pybind11 extensions that were e.g. compiled by GCC (libstdc++) + and Clang (libc++). + `#1588 `_ and + `c9f5a `_. + +* Added support for ``__await__``, ``__aiter__``, and ``__anext__`` protocols. + `#1842 `_. + +* ``pybind11_add_module()``: don't strip symbols when compiling in + ``RelWithDebInfo`` mode. `#1980 + `_. + +* ``enum_``: Reproduce Python behavior when comparing against invalid values + (e.g. ``None``, strings, etc.). Add back support for ``__invert__()``. + `#1912 `_, + `#1907 `_. + +* List insertion operation for ``py::list``. + Added ``.empty()`` to all collection types. + Added ``py::set::contains()`` and ``py::dict::contains()``. + `#1887 `_, + `#1884 `_, + `#1888 `_. + +* ``py::details::overload_cast_impl`` is available in C++11 mode, can be used + like ``overload_cast`` with an additional set of parantheses. + `#1581 `_. + +* Fixed ``get_include()`` on Conda. + `#1877 `_. + +* ``stl_bind.h``: negative indexing support. + `#1882 `_. + +* Minor CMake fix to add MinGW compatibility. + `#1851 `_. + +* GIL-related fixes. + `#1836 `_, + `8b90b `_. + +* Other very minor/subtle fixes and improvements. + `#1329 `_, + `#1910 `_, + `#1863 `_, + `#1847 `_, + `#1890 `_, + `#1860 `_, + `#1848 `_, + `#1821 `_, + `#1837 `_, + `#1833 `_, + `#1748 `_, + `#1852 `_. + +v2.3.0 (June 11, 2019) +----------------------------------------------------- + +* Significantly reduced module binary size (10-20%) when compiled in C++11 mode + with GCC/Clang, or in any mode with MSVC. Function signatures are now always + precomputed at compile time (this was previously only available in C++14 mode + for non-MSVC compilers). + `#934 `_. + +* Add basic support for tag-based static polymorphism, where classes + provide a method to returns the desired type of an instance. + `#1326 `_. + +* Python type wrappers (``py::handle``, ``py::object``, etc.) + now support map Python's number protocol onto C++ arithmetic + operators such as ``operator+``, ``operator/=``, etc. + `#1511 `_. + +* A number of improvements related to enumerations: + + 1. The ``enum_`` implementation was rewritten from scratch to reduce + code bloat. Rather than instantiating a full implementation for each + enumeration, most code is now contained in a generic base class. + `#1511 `_. + + 2. The ``value()`` method of ``py::enum_`` now accepts an optional + docstring that will be shown in the documentation of the associated + enumeration. `#1160 `_. + + 3. check for already existing enum value and throw an error if present. + `#1453 `_. + +* Support for over-aligned type allocation via C++17's aligned ``new`` + statement. `#1582 `_. + +* Added ``py::ellipsis()`` method for slicing of multidimensional NumPy arrays + `#1502 `_. + +* Numerous Improvements to the ``mkdoc.py`` script for extracting documentation + from C++ header files. + `#1788 `_. + +* ``pybind11_add_module()``: allow including Python as a ``SYSTEM`` include path. + `#1416 `_. + +* ``pybind11/stl.h`` does not convert strings to ``vector`` anymore. + `#1258 `_. + +* Mark static methods as such to fix auto-generated Sphinx documentation. + `#1732 `_. + +* Re-throw forced unwind exceptions (e.g. during pthread termination). + `#1208 `_. + +* Added ``__contains__`` method to the bindings of maps (``std::map``, + ``std::unordered_map``). + `#1767 `_. + +* Improvements to ``gil_scoped_acquire``. + `#1211 `_. + +* Type caster support for ``std::deque``. + `#1609 `_. + +* Support for ``std::unique_ptr`` holders, whose deleters differ between a base and derived + class. `#1353 `_. + +* Construction of STL array/vector-like data structures from + iterators. Added an ``extend()`` operation. + `#1709 `_, + +* CMake build system improvements for projects that include non-C++ + files (e.g. plain C, CUDA) in ``pybind11_add_module`` et al. + `#1678 `_. + +* Fixed asynchronous invocation and deallocation of Python functions + wrapped in ``std::function``. + `#1595 `_. + +* Fixes regarding return value policy propagation in STL type casters. + `#1603 `_. + +* Fixed scoped enum comparisons. + `#1571 `_. + +* Fixed iostream redirection for code that releases the GIL. + `#1368 `_, + +* A number of CI-related fixes. + `#1757 `_, + `#1744 `_, + `#1670 `_. + +v2.2.4 (September 11, 2018) +----------------------------------------------------- + +* Use new Python 3.7 Thread Specific Storage (TSS) implementation if available. + `#1454 `_, + `#1517 `_. + +* Fixes for newer MSVC versions and C++17 mode. + `#1347 `_, + `#1462 `_. + +* Propagate return value policies to type-specific casters + when casting STL containers. + `#1455 `_. + +* Allow ostream-redirection of more than 1024 characters. + `#1479 `_. + +* Set ``Py_DEBUG`` define when compiling against a debug Python build. + `#1438 `_. + +* Untangle integer logic in number type caster to work for custom + types that may only be castable to a restricted set of builtin types. + `#1442 `_. + +* CMake build system: Remember Python version in cache file. + `#1434 `_. + +* Fix for custom smart pointers: use ``std::addressof`` to obtain holder + address instead of ``operator&``. + `#1435 `_. + +* Properly report exceptions thrown during module initialization. + `#1362 `_. + +* Fixed a segmentation fault when creating empty-shaped NumPy array. + `#1371 `_. + +* The version of Intel C++ compiler must be >= 2017, and this is now checked by + the header files. `#1363 `_. + +* A few minor typo fixes and improvements to the test suite, and + patches that silence compiler warnings. + +* Vectors now support construction from generators, as well as ``extend()`` from a + list or generator. + `#1496 `_. + + +v2.2.3 (April 29, 2018) +----------------------------------------------------- + +* The pybind11 header location detection was replaced by a new implementation + that no longer depends on ``pip`` internals (the recently released ``pip`` + 10 has restricted access to this API). + `#1190 `_. + +* Small adjustment to an implementation detail to work around a compiler segmentation fault in Clang 3.3/3.4. + `#1350 `_. + +* The minimal supported version of the Intel compiler was >= 17.0 since + pybind11 v2.1. This check is now explicit, and a compile-time error is raised + if the compiler meet the requirement. + `#1363 `_. + +* Fixed an endianness-related fault in the test suite. + `#1287 `_. + +v2.2.2 (February 7, 2018) +----------------------------------------------------- + +* Fixed a segfault when combining embedded interpreter + shutdown/reinitialization with external loaded pybind11 modules. + `#1092 `_. + +* Eigen support: fixed a bug where Nx1/1xN numpy inputs couldn't be passed as + arguments to Eigen vectors (which for Eigen are simply compile-time fixed + Nx1/1xN matrices). + `#1106 `_. + +* Clarified to license by moving the licensing of contributions from + ``LICENSE`` into ``CONTRIBUTING.md``: the licensing of contributions is not + actually part of the software license as distributed. This isn't meant to be + a substantial change in the licensing of the project, but addresses concerns + that the clause made the license non-standard. + `#1109 `_. + +* Fixed a regression introduced in 2.1 that broke binding functions with lvalue + character literal arguments. + `#1128 `_. + +* MSVC: fix for compilation failures under /permissive-, and added the flag to + the appveyor test suite. + `#1155 `_. + +* Fixed ``__qualname__`` generation, and in turn, fixes how class names + (especially nested class names) are shown in generated docstrings. + `#1171 `_. + +* Updated the FAQ with a suggested project citation reference. + `#1189 `_. + +* Added fixes for deprecation warnings when compiled under C++17 with + ``-Wdeprecated`` turned on, and add ``-Wdeprecated`` to the test suite + compilation flags. + `#1191 `_. + +* Fixed outdated PyPI URLs in ``setup.py``. + `#1213 `_. + +* Fixed a refcount leak for arguments that end up in a ``py::args`` argument + for functions with both fixed positional and ``py::args`` arguments. + `#1216 `_. + +* Fixed a potential segfault resulting from possible premature destruction of + ``py::args``/``py::kwargs`` arguments with overloaded functions. + `#1223 `_. + +* Fixed ``del map[item]`` for a ``stl_bind.h`` bound stl map. + `#1229 `_. + +* Fixed a regression from v2.1.x where the aggregate initialization could + unintentionally end up at a constructor taking a templated + ``std::initializer_list`` argument. + `#1249 `_. + +* Fixed an issue where calling a function with a keep_alive policy on the same + nurse/patient pair would cause the internal patient storage to needlessly + grow (unboundedly, if the nurse is long-lived). + `#1251 `_. + +* Various other minor fixes. + +v2.2.1 (September 14, 2017) +----------------------------------------------------- + +* Added ``py::module::reload()`` member function for reloading a module. + `#1040 `_. + +* Fixed a reference leak in the number converter. + `#1078 `_. + +* Fixed compilation with Clang on host GCC < 5 (old libstdc++ which isn't fully + C++11 compliant). `#1062 `_. + +* Fixed a regression where the automatic ``std::vector`` caster would + fail to compile. The same fix also applies to any container which returns + element proxies instead of references. + `#1053 `_. + +* Fixed a regression where the ``py::keep_alive`` policy could not be applied + to constructors. `#1065 `_. + +* Fixed a nullptr dereference when loading a ``py::module_local`` type + that's only registered in an external module. + `#1058 `_. + +* Fixed implicit conversion of accessors to types derived from ``py::object``. + `#1076 `_. + +* The ``name`` in ``PYBIND11_MODULE(name, variable)`` can now be a macro. + `#1082 `_. + +* Relaxed overly strict ``py::pickle()`` check for matching get and set types. + `#1064 `_. + +* Conversion errors now try to be more informative when it's likely that + a missing header is the cause (e.g. forgetting ````). + `#1077 `_. + +v2.2.0 (August 31, 2017) +----------------------------------------------------- + +* Support for embedding the Python interpreter. See the + :doc:`documentation page ` for a + full overview of the new features. + `#774 `_, + `#889 `_, + `#892 `_, + `#920 `_. + + .. code-block:: cpp + + #include + namespace py = pybind11; + + int main() { + py::scoped_interpreter guard{}; // start the interpreter and keep it alive + + py::print("Hello, World!"); // use the Python API + } + +* Support for inheriting from multiple C++ bases in Python. + `#693 `_. + + .. code-block:: python + + from cpp_module import CppBase1, CppBase2 + + class PyDerived(CppBase1, CppBase2): + def __init__(self): + CppBase1.__init__(self) # C++ bases must be initialized explicitly + CppBase2.__init__(self) + +* ``PYBIND11_MODULE`` is now the preferred way to create module entry points. + ``PYBIND11_PLUGIN`` is deprecated. See :ref:`macros` for details. + `#879 `_. + + .. code-block:: cpp + + // new + PYBIND11_MODULE(example, m) { + m.def("add", [](int a, int b) { return a + b; }); + } + + // old + PYBIND11_PLUGIN(example) { + py::module m("example"); + m.def("add", [](int a, int b) { return a + b; }); + return m.ptr(); + } + +* pybind11's headers and build system now more strictly enforce hidden symbol + visibility for extension modules. This should be seamless for most users, + but see the :doc:`upgrade` if you use a custom build system. + `#995 `_. + +* Support for ``py::module_local`` types which allow multiple modules to + export the same C++ types without conflicts. This is useful for opaque + types like ``std::vector``. ``py::bind_vector`` and ``py::bind_map`` + now default to ``py::module_local`` if their elements are builtins or + local types. See :ref:`module_local` for details. + `#949 `_, + `#981 `_, + `#995 `_, + `#997 `_. + +* Custom constructors can now be added very easily using lambdas or factory + functions which return a class instance by value, pointer or holder. This + supersedes the old placement-new ``__init__`` technique. + See :ref:`custom_constructors` for details. + `#805 `_, + `#1014 `_. + + .. code-block:: cpp + + struct Example { + Example(std::string); + }; + + py::class_(m, "Example") + .def(py::init()) // existing constructor + .def(py::init([](int n) { // custom constructor + return std::make_unique(std::to_string(n)); + })); + +* Similarly to custom constructors, pickling support functions are now bound + using the ``py::pickle()`` adaptor which improves type safety. See the + :doc:`upgrade` and :ref:`pickling` for details. + `#1038 `_. + +* Builtin support for converting C++17 standard library types and general + conversion improvements: + + 1. C++17 ``std::variant`` is supported right out of the box. C++11/14 + equivalents (e.g. ``boost::variant``) can also be added with a simple + user-defined specialization. See :ref:`cpp17_container_casters` for details. + `#811 `_, + `#845 `_, + `#989 `_. + + 2. Out-of-the-box support for C++17 ``std::string_view``. + `#906 `_. + + 3. Improved compatibility of the builtin ``optional`` converter. + `#874 `_. + + 4. The ``bool`` converter now accepts ``numpy.bool_`` and types which + define ``__bool__`` (Python 3.x) or ``__nonzero__`` (Python 2.7). + `#925 `_. + + 5. C++-to-Python casters are now more efficient and move elements out + of rvalue containers whenever possible. + `#851 `_, + `#936 `_, + `#938 `_. + + 6. Fixed ``bytes`` to ``std::string/char*`` conversion on Python 3. + `#817 `_. + + 7. Fixed lifetime of temporary C++ objects created in Python-to-C++ conversions. + `#924 `_. + +* Scope guard call policy for RAII types, e.g. ``py::call_guard()``, + ``py::call_guard()``. See :ref:`call_policies` for details. + `#740 `_. + +* Utility for redirecting C++ streams to Python (e.g. ``std::cout`` -> + ``sys.stdout``). Scope guard ``py::scoped_ostream_redirect`` in C++ and + a context manager in Python. See :ref:`ostream_redirect`. + `#1009 `_. + +* Improved handling of types and exceptions across module boundaries. + `#915 `_, + `#951 `_, + `#995 `_. + +* Fixed destruction order of ``py::keep_alive`` nurse/patient objects + in reference cycles. + `#856 `_. + +* Numpy and buffer protocol related improvements: + + 1. Support for negative strides in Python buffer objects/numpy arrays. This + required changing integers from unsigned to signed for the related C++ APIs. + Note: If you have compiler warnings enabled, you may notice some new conversion + warnings after upgrading. These can be resolved with ``static_cast``. + `#782 `_. + + 2. Support ``std::complex`` and arrays inside ``PYBIND11_NUMPY_DTYPE``. + `#831 `_, + `#832 `_. + + 3. Support for constructing ``py::buffer_info`` and ``py::arrays`` using + arbitrary containers or iterators instead of requiring a ``std::vector``. + `#788 `_, + `#822 `_, + `#860 `_. + + 4. Explicitly check numpy version and require >= 1.7.0. + `#819 `_. + +* Support for allowing/prohibiting ``None`` for specific arguments and improved + ``None`` overload resolution order. See :ref:`none_arguments` for details. + `#843 `_. + `#859 `_. + +* Added ``py::exec()`` as a shortcut for ``py::eval()`` + and support for C++11 raw string literals as input. See :ref:`eval`. + `#766 `_, + `#827 `_. + +* ``py::vectorize()`` ignores non-vectorizable arguments and supports + member functions. + `#762 `_. + +* Support for bound methods as callbacks (``pybind11/functional.h``). + `#815 `_. + +* Allow aliasing pybind11 methods: ``cls.attr("foo") = cls.attr("bar")``. + `#802 `_. + +* Don't allow mixed static/non-static overloads. + `#804 `_. + +* Fixed overriding static properties in derived classes. + `#784 `_. + +* Added support for write only properties. + `#1144 `_. + +* Improved deduction of member functions of a derived class when its bases + aren't registered with pybind11. + `#855 `_. + + .. code-block:: cpp + + struct Base { + int foo() { return 42; } + } + + struct Derived : Base {} + + // Now works, but previously required also binding `Base` + py::class_(m, "Derived") + .def("foo", &Derived::foo); // function is actually from `Base` + +* The implementation of ``py::init<>`` now uses C++11 brace initialization + syntax to construct instances, which permits binding implicit constructors of + aggregate types. `#1015 `_. + + .. code-block:: cpp + + struct Aggregate { + int a; + std::string b; + }; + + py::class_(m, "Aggregate") + .def(py::init()); + +* Fixed issues with multiple inheritance with offset base/derived pointers. + `#812 `_, + `#866 `_, + `#960 `_. + +* Fixed reference leak of type objects. + `#1030 `_. + +* Improved support for the ``/std:c++14`` and ``/std:c++latest`` modes + on MSVC 2017. + `#841 `_, + `#999 `_. + +* Fixed detection of private operator new on MSVC. + `#893 `_, + `#918 `_. + +* Intel C++ compiler compatibility fixes. + `#937 `_. + +* Fixed implicit conversion of `py::enum_` to integer types on Python 2.7. + `#821 `_. + +* Added ``py::hash`` to fetch the hash value of Python objects, and + ``.def(hash(py::self))`` to provide the C++ ``std::hash`` as the Python + ``__hash__`` method. + `#1034 `_. + +* Fixed ``__truediv__`` on Python 2 and ``__itruediv__`` on Python 3. + `#867 `_. + +* ``py::capsule`` objects now support the ``name`` attribute. This is useful + for interfacing with ``scipy.LowLevelCallable``. + `#902 `_. + +* Fixed ``py::make_iterator``'s ``__next__()`` for past-the-end calls. + `#897 `_. + +* Added ``error_already_set::matches()`` for checking Python exceptions. + `#772 `_. + +* Deprecated ``py::error_already_set::clear()``. It's no longer needed + following a simplification of the ``py::error_already_set`` class. + `#954 `_. + +* Deprecated ``py::handle::operator==()`` in favor of ``py::handle::is()`` + `#825 `_. + +* Deprecated ``py::object::borrowed``/``py::object::stolen``. + Use ``py::object::borrowed_t{}``/``py::object::stolen_t{}`` instead. + `#771 `_. + +* Changed internal data structure versioning to avoid conflicts between + modules compiled with different revisions of pybind11. + `#1012 `_. + +* Additional compile-time and run-time error checking and more informative messages. + `#786 `_, + `#794 `_, + `#803 `_. + +* Various minor improvements and fixes. + `#764 `_, + `#791 `_, + `#795 `_, + `#840 `_, + `#844 `_, + `#846 `_, + `#849 `_, + `#858 `_, + `#862 `_, + `#871 `_, + `#872 `_, + `#881 `_, + `#888 `_, + `#899 `_, + `#928 `_, + `#931 `_, + `#944 `_, + `#950 `_, + `#952 `_, + `#962 `_, + `#965 `_, + `#970 `_, + `#978 `_, + `#979 `_, + `#986 `_, + `#1020 `_, + `#1027 `_, + `#1037 `_. + +* Testing improvements. + `#798 `_, + `#882 `_, + `#898 `_, + `#900 `_, + `#921 `_, + `#923 `_, + `#963 `_. + +v2.1.1 (April 7, 2017) +----------------------------------------------------- + +* Fixed minimum version requirement for MSVC 2015u3 + `#773 `_. + +v2.1.0 (March 22, 2017) +----------------------------------------------------- + +* pybind11 now performs function overload resolution in two phases. The first + phase only considers exact type matches, while the second allows for implicit + conversions to take place. A special ``noconvert()`` syntax can be used to + completely disable implicit conversions for specific arguments. + `#643 `_, + `#634 `_, + `#650 `_. + +* Fixed a regression where static properties no longer worked with classes + using multiple inheritance. The ``py::metaclass`` attribute is no longer + necessary (and deprecated as of this release) when binding classes with + static properties. + `#679 `_, + +* Classes bound using ``pybind11`` can now use custom metaclasses. + `#679 `_, + +* ``py::args`` and ``py::kwargs`` can now be mixed with other positional + arguments when binding functions using pybind11. + `#611 `_. + +* Improved support for C++11 unicode string and character types; added + extensive documentation regarding pybind11's string conversion behavior. + `#624 `_, + `#636 `_, + `#715 `_. + +* pybind11 can now avoid expensive copies when converting Eigen arrays to NumPy + arrays (and vice versa). `#610 `_. + +* The "fast path" in ``py::vectorize`` now works for any full-size group of C or + F-contiguous arrays. The non-fast path is also faster since it no longer performs + copies of the input arguments (except when type conversions are necessary). + `#610 `_. + +* Added fast, unchecked access to NumPy arrays via a proxy object. + `#746 `_. + +* Transparent support for class-specific ``operator new`` and + ``operator delete`` implementations. + `#755 `_. + +* Slimmer and more efficient STL-compatible iterator interface for sequence types. + `#662 `_. + +* Improved custom holder type support. + `#607 `_. + +* ``nullptr`` to ``None`` conversion fixed in various builtin type casters. + `#732 `_. + +* ``enum_`` now exposes its members via a special ``__members__`` attribute. + `#666 `_. + +* ``std::vector`` bindings created using ``stl_bind.h`` can now optionally + implement the buffer protocol. `#488 `_. + +* Automated C++ reference documentation using doxygen and breathe. + `#598 `_. + +* Added minimum compiler version assertions. + `#727 `_. + +* Improved compatibility with C++1z. + `#677 `_. + +* Improved ``py::capsule`` API. Can be used to implement cleanup + callbacks that are involved at module destruction time. + `#752 `_. + +* Various minor improvements and fixes. + `#595 `_, + `#588 `_, + `#589 `_, + `#603 `_, + `#619 `_, + `#648 `_, + `#695 `_, + `#720 `_, + `#723 `_, + `#729 `_, + `#724 `_, + `#742 `_, + `#753 `_. + +v2.0.1 (Jan 4, 2017) +----------------------------------------------------- + +* Fix pointer to reference error in type_caster on MSVC + `#583 `_. + +* Fixed a segmentation in the test suite due to a typo + `cd7eac `_. + +v2.0.0 (Jan 1, 2017) +----------------------------------------------------- + +* Fixed a reference counting regression affecting types with custom metaclasses + (introduced in v2.0.0-rc1). + `#571 `_. + +* Quenched a CMake policy warning. + `#570 `_. + +v2.0.0-rc1 (Dec 23, 2016) +----------------------------------------------------- + +The pybind11 developers are excited to issue a release candidate of pybind11 +with a subsequent v2.0.0 release planned in early January next year. + +An incredible amount of effort by went into pybind11 over the last ~5 months, +leading to a release that is jam-packed with exciting new features and numerous +usability improvements. The following list links PRs or individual commits +whenever applicable. + +Happy Christmas! + +* Support for binding C++ class hierarchies that make use of multiple + inheritance. `#410 `_. + +* PyPy support: pybind11 now supports nightly builds of PyPy and will + interoperate with the future 5.7 release. No code changes are necessary, + everything "just" works as usual. Note that we only target the Python 2.7 + branch for now; support for 3.x will be added once its ``cpyext`` extension + support catches up. A few minor features remain unsupported for the time + being (notably dynamic attributes in custom types). + `#527 `_. + +* Significant work on the documentation -- in particular, the monolithic + ``advanced.rst`` file was restructured into a easier to read hierarchical + organization. `#448 `_. + +* Many NumPy-related improvements: + + 1. Object-oriented API to access and modify NumPy ``ndarray`` instances, + replicating much of the corresponding NumPy C API functionality. + `#402 `_. + + 2. NumPy array ``dtype`` array descriptors are now first-class citizens and + are exposed via a new class ``py::dtype``. + + 3. Structured dtypes can be registered using the ``PYBIND11_NUMPY_DTYPE()`` + macro. Special ``array`` constructors accepting dtype objects were also + added. + + One potential caveat involving this change: format descriptor strings + should now be accessed via ``format_descriptor::format()`` (however, for + compatibility purposes, the old syntax ``format_descriptor::value`` will + still work for non-structured data types). `#308 + `_. + + 4. Further improvements to support structured dtypes throughout the system. + `#472 `_, + `#474 `_, + `#459 `_, + `#453 `_, + `#452 `_, and + `#505 `_. + + 5. Fast access operators. `#497 `_. + + 6. Constructors for arrays whose storage is owned by another object. + `#440 `_. + + 7. Added constructors for ``array`` and ``array_t`` explicitly accepting shape + and strides; if strides are not provided, they are deduced assuming + C-contiguity. Also added simplified constructors for 1-dimensional case. + + 8. Added buffer/NumPy support for ``char[N]`` and ``std::array`` types. + + 9. Added ``memoryview`` wrapper type which is constructible from ``buffer_info``. + +* Eigen: many additional conversions and support for non-contiguous + arrays/slices. + `#427 `_, + `#315 `_, + `#316 `_, + `#312 `_, and + `#267 `_ + +* Incompatible changes in ``class_<...>::class_()``: + + 1. Declarations of types that provide access via the buffer protocol must + now include the ``py::buffer_protocol()`` annotation as an argument to + the ``class_`` constructor. + + 2. Declarations of types that require a custom metaclass (i.e. all classes + which include static properties via commands such as + ``def_readwrite_static()``) must now include the ``py::metaclass()`` + annotation as an argument to the ``class_`` constructor. + + These two changes were necessary to make type definitions in pybind11 + future-proof, and to support PyPy via its cpyext mechanism. `#527 + `_. + + + 3. This version of pybind11 uses a redesigned mechanism for instantiating + trampoline classes that are used to override virtual methods from within + Python. This led to the following user-visible syntax change: instead of + + .. code-block:: cpp + + py::class_("MyClass") + .alias() + .... + + write + + .. code-block:: cpp + + py::class_("MyClass") + .... + + Importantly, both the original and the trampoline class are now + specified as an arguments (in arbitrary order) to the ``py::class_`` + template, and the ``alias<..>()`` call is gone. The new scheme has zero + overhead in cases when Python doesn't override any functions of the + underlying C++ class. `rev. 86d825 + `_. + +* Added ``eval`` and ``eval_file`` functions for evaluating expressions and + statements from a string or file. `rev. 0d3fc3 + `_. + +* pybind11 can now create types with a modifiable dictionary. + `#437 `_ and + `#444 `_. + +* Support for translation of arbitrary C++ exceptions to Python counterparts. + `#296 `_ and + `#273 `_. + +* Report full backtraces through mixed C++/Python code, better reporting for + import errors, fixed GIL management in exception processing. + `#537 `_, + `#494 `_, + `rev. e72d95 `_, and + `rev. 099d6e `_. + +* Support for bit-level operations, comparisons, and serialization of C++ + enumerations. `#503 `_, + `#508 `_, + `#380 `_, + `#309 `_. + `#311 `_. + +* The ``class_`` constructor now accepts its template arguments in any order. + `#385 `_. + +* Attribute and item accessors now have a more complete interface which makes + it possible to chain attributes as in + ``obj.attr("a")[key].attr("b").attr("method")(1, 2, 3)``. `#425 + `_. + +* Major redesign of the default and conversion constructors in ``pytypes.h``. + `#464 `_. + +* Added built-in support for ``std::shared_ptr`` holder type. It is no longer + necessary to to include a declaration of the form + ``PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr)`` (though continuing to + do so won't cause an error). + `#454 `_. + +* New ``py::overload_cast`` casting operator to select among multiple possible + overloads of a function. An example: + + .. code-block:: cpp + + py::class_(m, "Pet") + .def("set", py::overload_cast(&Pet::set), "Set the pet's age") + .def("set", py::overload_cast(&Pet::set), "Set the pet's name"); + + This feature only works on C++14-capable compilers. + `#541 `_. + +* C++ types are automatically cast to Python types, e.g. when assigning + them as an attribute. For instance, the following is now legal: + + .. code-block:: cpp + + py::module m = /* ... */ + m.attr("constant") = 123; + + (Previously, a ``py::cast`` call was necessary to avoid a compilation error.) + `#551 `_. + +* Redesigned ``pytest``-based test suite. `#321 `_. + +* Instance tracking to detect reference leaks in test suite. `#324 `_ + +* pybind11 can now distinguish between multiple different instances that are + located at the same memory address, but which have different types. + `#329 `_. + +* Improved logic in ``move`` return value policy. + `#510 `_, + `#297 `_. + +* Generalized unpacking API to permit calling Python functions from C++ using + notation such as ``foo(a1, a2, *args, "ka"_a=1, "kb"_a=2, **kwargs)``. `#372 `_. + +* ``py::print()`` function whose behavior matches that of the native Python + ``print()`` function. `#372 `_. + +* Added ``py::dict`` keyword constructor:``auto d = dict("number"_a=42, + "name"_a="World");``. `#372 `_. + +* Added ``py::str::format()`` method and ``_s`` literal: ``py::str s = "1 + 2 + = {}"_s.format(3);``. `#372 `_. + +* Added ``py::repr()`` function which is equivalent to Python's builtin + ``repr()``. `#333 `_. + +* Improved construction and destruction logic for holder types. It is now + possible to reference instances with smart pointer holder types without + constructing the holder if desired. The ``PYBIND11_DECLARE_HOLDER_TYPE`` + macro now accepts an optional second parameter to indicate whether the holder + type uses intrusive reference counting. + `#533 `_ and + `#561 `_. + +* Mapping a stateless C++ function to Python and back is now "for free" (i.e. + no extra indirections or argument conversion overheads). `rev. 954b79 + `_. + +* Bindings for ``std::valarray``. + `#545 `_. + +* Improved support for C++17 capable compilers. + `#562 `_. + +* Bindings for ``std::optional``. + `#475 `_, + `#476 `_, + `#479 `_, + `#499 `_, and + `#501 `_. + +* ``stl_bind.h``: general improvements and support for ``std::map`` and + ``std::unordered_map``. + `#490 `_, + `#282 `_, + `#235 `_. + +* The ``std::tuple``, ``std::pair``, ``std::list``, and ``std::vector`` type + casters now accept any Python sequence type as input. `rev. 107285 + `_. + +* Improved CMake Python detection on multi-architecture Linux. + `#532 `_. + +* Infrastructure to selectively disable or enable parts of the automatically + generated docstrings. `#486 `_. + +* ``reference`` and ``reference_internal`` are now the default return value + properties for static and non-static properties, respectively. `#473 + `_. (the previous defaults + were ``automatic``). `#473 `_. + +* Support for ``std::unique_ptr`` with non-default deleters or no deleter at + all (``py::nodelete``). `#384 `_. + +* Deprecated ``handle::call()`` method. The new syntax to call Python + functions is simply ``handle()``. It can also be invoked explicitly via + ``handle::operator()``, where ``X`` is an optional return value policy. + +* Print more informative error messages when ``make_tuple()`` or ``cast()`` + fail. `#262 `_. + +* Creation of holder types for classes deriving from + ``std::enable_shared_from_this<>`` now also works for ``const`` values. + `#260 `_. + +* ``make_iterator()`` improvements for better compatibility with various + types (now uses prefix increment operator); it now also accepts iterators + with different begin/end types as long as they are equality comparable. + `#247 `_. + +* ``arg()`` now accepts a wider range of argument types for default values. + `#244 `_. + +* Support ``keep_alive`` where the nurse object may be ``None``. `#341 + `_. + +* Added constructors for ``str`` and ``bytes`` from zero-terminated char + pointers, and from char pointers and length. Added constructors for ``str`` + from ``bytes`` and for ``bytes`` from ``str``, which will perform UTF-8 + decoding/encoding as required. + +* Many other improvements of library internals without user-visible changes + + +1.8.1 (July 12, 2016) +---------------------- +* Fixed a rare but potentially very severe issue when the garbage collector ran + during pybind11 type creation. + +1.8.0 (June 14, 2016) +---------------------- +* Redesigned CMake build system which exports a convenient + ``pybind11_add_module`` function to parent projects. +* ``std::vector<>`` type bindings analogous to Boost.Python's ``indexing_suite`` +* Transparent conversion of sparse and dense Eigen matrices and vectors (``eigen.h``) +* Added an ``ExtraFlags`` template argument to the NumPy ``array_t<>`` wrapper + to disable an enforced cast that may lose precision, e.g. to create overloads + for different precisions and complex vs real-valued matrices. +* Prevent implicit conversion of floating point values to integral types in + function arguments +* Fixed incorrect default return value policy for functions returning a shared + pointer +* Don't allow registering a type via ``class_`` twice +* Don't allow casting a ``None`` value into a C++ lvalue reference +* Fixed a crash in ``enum_::operator==`` that was triggered by the ``help()`` command +* Improved detection of whether or not custom C++ types can be copy/move-constructed +* Extended ``str`` type to also work with ``bytes`` instances +* Added a ``"name"_a`` user defined string literal that is equivalent to ``py::arg("name")``. +* When specifying function arguments via ``py::arg``, the test that verifies + the number of arguments now runs at compile time. +* Added ``[[noreturn]]`` attribute to ``pybind11_fail()`` to quench some + compiler warnings +* List function arguments in exception text when the dispatch code cannot find + a matching overload +* Added ``PYBIND11_OVERLOAD_NAME`` and ``PYBIND11_OVERLOAD_PURE_NAME`` macros which + can be used to override virtual methods whose name differs in C++ and Python + (e.g. ``__call__`` and ``operator()``) +* Various minor ``iterator`` and ``make_iterator()`` improvements +* Transparently support ``__bool__`` on Python 2.x and Python 3.x +* Fixed issue with destructor of unpickled object not being called +* Minor CMake build system improvements on Windows +* New ``pybind11::args`` and ``pybind11::kwargs`` types to create functions which + take an arbitrary number of arguments and keyword arguments +* New syntax to call a Python function from C++ using ``*args`` and ``*kwargs`` +* The functions ``def_property_*`` now correctly process docstring arguments (these + formerly caused a segmentation fault) +* Many ``mkdoc.py`` improvements (enumerations, template arguments, ``DOC()`` + macro accepts more arguments) +* Cygwin support +* Documentation improvements (pickling support, ``keep_alive``, macro usage) + +1.7 (April 30, 2016) +---------------------- +* Added a new ``move`` return value policy that triggers C++11 move semantics. + The automatic return value policy falls back to this case whenever a rvalue + reference is encountered +* Significantly more general GIL state routines that are used instead of + Python's troublesome ``PyGILState_Ensure`` and ``PyGILState_Release`` API +* Redesign of opaque types that drastically simplifies their usage +* Extended ability to pass values of type ``[const] void *`` +* ``keep_alive`` fix: don't fail when there is no patient +* ``functional.h``: acquire the GIL before calling a Python function +* Added Python RAII type wrappers ``none`` and ``iterable`` +* Added ``*args`` and ``*kwargs`` pass-through parameters to + ``pybind11.get_include()`` function +* Iterator improvements and fixes +* Documentation on return value policies and opaque types improved + +1.6 (April 30, 2016) +---------------------- +* Skipped due to upload to PyPI gone wrong and inability to recover + (https://github.com/pypa/packaging-problems/issues/74) + +1.5 (April 21, 2016) +---------------------- +* For polymorphic types, use RTTI to try to return the closest type registered with pybind11 +* Pickling support for serializing and unserializing C++ instances to a byte stream in Python +* Added a convenience routine ``make_iterator()`` which turns a range indicated + by a pair of C++ iterators into a iterable Python object +* Added ``len()`` and a variadic ``make_tuple()`` function +* Addressed a rare issue that could confuse the current virtual function + dispatcher and another that could lead to crashes in multi-threaded + applications +* Added a ``get_include()`` function to the Python module that returns the path + of the directory containing the installed pybind11 header files +* Documentation improvements: import issues, symbol visibility, pickling, limitations +* Added casting support for ``std::reference_wrapper<>`` + +1.4 (April 7, 2016) +-------------------------- +* Transparent type conversion for ``std::wstring`` and ``wchar_t`` +* Allow passing ``nullptr``-valued strings +* Transparent passing of ``void *`` pointers using capsules +* Transparent support for returning values wrapped in ``std::unique_ptr<>`` +* Improved docstring generation for compatibility with Sphinx +* Nicer debug error message when default parameter construction fails +* Support for "opaque" types that bypass the transparent conversion layer for STL containers +* Redesigned type casting interface to avoid ambiguities that could occasionally cause compiler errors +* Redesigned property implementation; fixes crashes due to an unfortunate default return value policy +* Anaconda package generation support + +1.3 (March 8, 2016) +-------------------------- + +* Added support for the Intel C++ compiler (v15+) +* Added support for the STL unordered set/map data structures +* Added support for the STL linked list data structure +* NumPy-style broadcasting support in ``pybind11::vectorize`` +* pybind11 now displays more verbose error messages when ``arg::operator=()`` fails +* pybind11 internal data structures now live in a version-dependent namespace to avoid ABI issues +* Many, many bugfixes involving corner cases and advanced usage + +1.2 (February 7, 2016) +-------------------------- + +* Optional: efficient generation of function signatures at compile time using C++14 +* Switched to a simpler and more general way of dealing with function default + arguments. Unused keyword arguments in function calls are now detected and + cause errors as expected +* New ``keep_alive`` call policy analogous to Boost.Python's ``with_custodian_and_ward`` +* New ``pybind11::base<>`` attribute to indicate a subclass relationship +* Improved interface for RAII type wrappers in ``pytypes.h`` +* Use RAII type wrappers consistently within pybind11 itself. This + fixes various potential refcount leaks when exceptions occur +* Added new ``bytes`` RAII type wrapper (maps to ``string`` in Python 2.7) +* Made handle and related RAII classes const correct, using them more + consistently everywhere now +* Got rid of the ugly ``__pybind11__`` attributes on the Python side---they are + now stored in a C++ hash table that is not visible in Python +* Fixed refcount leaks involving NumPy arrays and bound functions +* Vastly improved handling of shared/smart pointers +* Removed an unnecessary copy operation in ``pybind11::vectorize`` +* Fixed naming clashes when both pybind11 and NumPy headers are included +* Added conversions for additional exception types +* Documentation improvements (using multiple extension modules, smart pointers, + other minor clarifications) +* unified infrastructure for parsing variadic arguments in ``class_`` and cpp_function +* Fixed license text (was: ZLIB, should have been: 3-clause BSD) +* Python 3.2 compatibility +* Fixed remaining issues when accessing types in another plugin module +* Added enum comparison and casting methods +* Improved SFINAE-based detection of whether types are copy-constructible +* Eliminated many warnings about unused variables and the use of ``offsetof()`` +* Support for ``std::array<>`` conversions + +1.1 (December 7, 2015) +-------------------------- + +* Documentation improvements (GIL, wrapping functions, casting, fixed many typos) +* Generalized conversion of integer types +* Improved support for casting function objects +* Improved support for ``std::shared_ptr<>`` conversions +* Initial support for ``std::set<>`` conversions +* Fixed type resolution issue for types defined in a separate plugin module +* Cmake build system improvements +* Factored out generic functionality to non-templated code (smaller code size) +* Added a code size / compile time benchmark vs Boost.Python +* Added an appveyor CI script + +1.0 (October 15, 2015) +------------------------ +* Initial release diff --git a/diffvg/pybind11/docs/classes.rst b/diffvg/pybind11/docs/classes.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d44a5931d0d4a293d8069ebb3edf860783d7266 --- /dev/null +++ b/diffvg/pybind11/docs/classes.rst @@ -0,0 +1,532 @@ +.. _classes: + +Object-oriented code +#################### + +Creating bindings for a custom type +=================================== + +Let's now look at a more complex example where we'll create bindings for a +custom C++ data structure named ``Pet``. Its definition is given below: + +.. code-block:: cpp + + struct Pet { + Pet(const std::string &name) : name(name) { } + void setName(const std::string &name_) { name = name_; } + const std::string &getName() const { return name; } + + std::string name; + }; + +The binding code for ``Pet`` looks as follows: + +.. code-block:: cpp + + #include + + namespace py = pybind11; + + PYBIND11_MODULE(example, m) { + py::class_(m, "Pet") + .def(py::init()) + .def("setName", &Pet::setName) + .def("getName", &Pet::getName); + } + +:class:`class_` creates bindings for a C++ *class* or *struct*-style data +structure. :func:`init` is a convenience function that takes the types of a +constructor's parameters as template arguments and wraps the corresponding +constructor (see the :ref:`custom_constructors` section for details). An +interactive Python session demonstrating this example is shown below: + +.. code-block:: pycon + + % python + >>> import example + >>> p = example.Pet('Molly') + >>> print(p) + + >>> p.getName() + u'Molly' + >>> p.setName('Charly') + >>> p.getName() + u'Charly' + +.. seealso:: + + Static member functions can be bound in the same way using + :func:`class_::def_static`. + +Keyword and default arguments +============================= +It is possible to specify keyword and default arguments using the syntax +discussed in the previous chapter. Refer to the sections :ref:`keyword_args` +and :ref:`default_args` for details. + +Binding lambda functions +======================== + +Note how ``print(p)`` produced a rather useless summary of our data structure in the example above: + +.. code-block:: pycon + + >>> print(p) + + +To address this, we could bind a utility function that returns a human-readable +summary to the special method slot named ``__repr__``. Unfortunately, there is no +suitable functionality in the ``Pet`` data structure, and it would be nice if +we did not have to change it. This can easily be accomplished by binding a +Lambda function instead: + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init()) + .def("setName", &Pet::setName) + .def("getName", &Pet::getName) + .def("__repr__", + [](const Pet &a) { + return ""; + } + ); + +Both stateless [#f1]_ and stateful lambda closures are supported by pybind11. +With the above change, the same Python code now produces the following output: + +.. code-block:: pycon + + >>> print(p) + + +.. [#f1] Stateless closures are those with an empty pair of brackets ``[]`` as the capture object. + +.. _properties: + +Instance and static fields +========================== + +We can also directly expose the ``name`` field using the +:func:`class_::def_readwrite` method. A similar :func:`class_::def_readonly` +method also exists for ``const`` fields. + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init()) + .def_readwrite("name", &Pet::name) + // ... remainder ... + +This makes it possible to write + +.. code-block:: pycon + + >>> p = example.Pet('Molly') + >>> p.name + u'Molly' + >>> p.name = 'Charly' + >>> p.name + u'Charly' + +Now suppose that ``Pet::name`` was a private internal variable +that can only be accessed via setters and getters. + +.. code-block:: cpp + + class Pet { + public: + Pet(const std::string &name) : name(name) { } + void setName(const std::string &name_) { name = name_; } + const std::string &getName() const { return name; } + private: + std::string name; + }; + +In this case, the method :func:`class_::def_property` +(:func:`class_::def_property_readonly` for read-only data) can be used to +provide a field-like interface within Python that will transparently call +the setter and getter functions: + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init()) + .def_property("name", &Pet::getName, &Pet::setName) + // ... remainder ... + +Write only properties can be defined by passing ``nullptr`` as the +input for the read function. + +.. seealso:: + + Similar functions :func:`class_::def_readwrite_static`, + :func:`class_::def_readonly_static` :func:`class_::def_property_static`, + and :func:`class_::def_property_readonly_static` are provided for binding + static variables and properties. Please also see the section on + :ref:`static_properties` in the advanced part of the documentation. + +Dynamic attributes +================== + +Native Python classes can pick up new attributes dynamically: + +.. code-block:: pycon + + >>> class Pet: + ... name = 'Molly' + ... + >>> p = Pet() + >>> p.name = 'Charly' # overwrite existing + >>> p.age = 2 # dynamically add a new attribute + +By default, classes exported from C++ do not support this and the only writable +attributes are the ones explicitly defined using :func:`class_::def_readwrite` +or :func:`class_::def_property`. + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init<>()) + .def_readwrite("name", &Pet::name); + +Trying to set any other attribute results in an error: + +.. code-block:: pycon + + >>> p = example.Pet() + >>> p.name = 'Charly' # OK, attribute defined in C++ + >>> p.age = 2 # fail + AttributeError: 'Pet' object has no attribute 'age' + +To enable dynamic attributes for C++ classes, the :class:`py::dynamic_attr` tag +must be added to the :class:`py::class_` constructor: + +.. code-block:: cpp + + py::class_(m, "Pet", py::dynamic_attr()) + .def(py::init<>()) + .def_readwrite("name", &Pet::name); + +Now everything works as expected: + +.. code-block:: pycon + + >>> p = example.Pet() + >>> p.name = 'Charly' # OK, overwrite value in C++ + >>> p.age = 2 # OK, dynamically add a new attribute + >>> p.__dict__ # just like a native Python class + {'age': 2} + +Note that there is a small runtime cost for a class with dynamic attributes. +Not only because of the addition of a ``__dict__``, but also because of more +expensive garbage collection tracking which must be activated to resolve +possible circular references. Native Python classes incur this same cost by +default, so this is not anything to worry about. By default, pybind11 classes +are more efficient than native Python classes. Enabling dynamic attributes +just brings them on par. + +.. _inheritance: + +Inheritance and automatic downcasting +===================================== + +Suppose now that the example consists of two data structures with an +inheritance relationship: + +.. code-block:: cpp + + struct Pet { + Pet(const std::string &name) : name(name) { } + std::string name; + }; + + struct Dog : Pet { + Dog(const std::string &name) : Pet(name) { } + std::string bark() const { return "woof!"; } + }; + +There are two different ways of indicating a hierarchical relationship to +pybind11: the first specifies the C++ base class as an extra template +parameter of the :class:`class_`: + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init()) + .def_readwrite("name", &Pet::name); + + // Method 1: template parameter: + py::class_(m, "Dog") + .def(py::init()) + .def("bark", &Dog::bark); + +Alternatively, we can also assign a name to the previously bound ``Pet`` +:class:`class_` object and reference it when binding the ``Dog`` class: + +.. code-block:: cpp + + py::class_ pet(m, "Pet"); + pet.def(py::init()) + .def_readwrite("name", &Pet::name); + + // Method 2: pass parent class_ object: + py::class_(m, "Dog", pet /* <- specify Python parent type */) + .def(py::init()) + .def("bark", &Dog::bark); + +Functionality-wise, both approaches are equivalent. Afterwards, instances will +expose fields and methods of both types: + +.. code-block:: pycon + + >>> p = example.Dog('Molly') + >>> p.name + u'Molly' + >>> p.bark() + u'woof!' + +The C++ classes defined above are regular non-polymorphic types with an +inheritance relationship. This is reflected in Python: + +.. code-block:: cpp + + // Return a base pointer to a derived instance + m.def("pet_store", []() { return std::unique_ptr(new Dog("Molly")); }); + +.. code-block:: pycon + + >>> p = example.pet_store() + >>> type(p) # `Dog` instance behind `Pet` pointer + Pet # no pointer downcasting for regular non-polymorphic types + >>> p.bark() + AttributeError: 'Pet' object has no attribute 'bark' + +The function returned a ``Dog`` instance, but because it's a non-polymorphic +type behind a base pointer, Python only sees a ``Pet``. In C++, a type is only +considered polymorphic if it has at least one virtual function and pybind11 +will automatically recognize this: + +.. code-block:: cpp + + struct PolymorphicPet { + virtual ~PolymorphicPet() = default; + }; + + struct PolymorphicDog : PolymorphicPet { + std::string bark() const { return "woof!"; } + }; + + // Same binding code + py::class_(m, "PolymorphicPet"); + py::class_(m, "PolymorphicDog") + .def(py::init<>()) + .def("bark", &PolymorphicDog::bark); + + // Again, return a base pointer to a derived instance + m.def("pet_store2", []() { return std::unique_ptr(new PolymorphicDog); }); + +.. code-block:: pycon + + >>> p = example.pet_store2() + >>> type(p) + PolymorphicDog # automatically downcast + >>> p.bark() + u'woof!' + +Given a pointer to a polymorphic base, pybind11 performs automatic downcasting +to the actual derived type. Note that this goes beyond the usual situation in +C++: we don't just get access to the virtual functions of the base, we get the +concrete derived type including functions and attributes that the base type may +not even be aware of. + +.. seealso:: + + For more information about polymorphic behavior see :ref:`overriding_virtuals`. + + +Overloaded methods +================== + +Sometimes there are several overloaded C++ methods with the same name taking +different kinds of input arguments: + +.. code-block:: cpp + + struct Pet { + Pet(const std::string &name, int age) : name(name), age(age) { } + + void set(int age_) { age = age_; } + void set(const std::string &name_) { name = name_; } + + std::string name; + int age; + }; + +Attempting to bind ``Pet::set`` will cause an error since the compiler does not +know which method the user intended to select. We can disambiguate by casting +them to function pointers. Binding multiple functions to the same Python name +automatically creates a chain of function overloads that will be tried in +sequence. + +.. code-block:: cpp + + py::class_(m, "Pet") + .def(py::init()) + .def("set", (void (Pet::*)(int)) &Pet::set, "Set the pet's age") + .def("set", (void (Pet::*)(const std::string &)) &Pet::set, "Set the pet's name"); + +The overload signatures are also visible in the method's docstring: + +.. code-block:: pycon + + >>> help(example.Pet) + + class Pet(__builtin__.object) + | Methods defined here: + | + | __init__(...) + | Signature : (Pet, str, int) -> NoneType + | + | set(...) + | 1. Signature : (Pet, int) -> NoneType + | + | Set the pet's age + | + | 2. Signature : (Pet, str) -> NoneType + | + | Set the pet's name + +If you have a C++14 compatible compiler [#cpp14]_, you can use an alternative +syntax to cast the overloaded function: + +.. code-block:: cpp + + py::class_(m, "Pet") + .def("set", py::overload_cast(&Pet::set), "Set the pet's age") + .def("set", py::overload_cast(&Pet::set), "Set the pet's name"); + +Here, ``py::overload_cast`` only requires the parameter types to be specified. +The return type and class are deduced. This avoids the additional noise of +``void (Pet::*)()`` as seen in the raw cast. If a function is overloaded based +on constness, the ``py::const_`` tag should be used: + +.. code-block:: cpp + + struct Widget { + int foo(int x, float y); + int foo(int x, float y) const; + }; + + py::class_(m, "Widget") + .def("foo_mutable", py::overload_cast(&Widget::foo)) + .def("foo_const", py::overload_cast(&Widget::foo, py::const_)); + +If you prefer the ``py::overload_cast`` syntax but have a C++11 compatible compiler only, +you can use ``py::detail::overload_cast_impl`` with an additional set of parentheses: + +.. code-block:: cpp + + template + using overload_cast_ = pybind11::detail::overload_cast_impl; + + py::class_(m, "Pet") + .def("set", overload_cast_()(&Pet::set), "Set the pet's age") + .def("set", overload_cast_()(&Pet::set), "Set the pet's name"); + +.. [#cpp14] A compiler which supports the ``-std=c++14`` flag + or Visual Studio 2015 Update 2 and newer. + +.. note:: + + To define multiple overloaded constructors, simply declare one after the + other using the ``.def(py::init<...>())`` syntax. The existing machinery + for specifying keyword and default arguments also works. + +Enumerations and internal types +=============================== + +Let's now suppose that the example class contains an internal enumeration type, +e.g.: + +.. code-block:: cpp + + struct Pet { + enum Kind { + Dog = 0, + Cat + }; + + Pet(const std::string &name, Kind type) : name(name), type(type) { } + + std::string name; + Kind type; + }; + +The binding code for this example looks as follows: + +.. code-block:: cpp + + py::class_ pet(m, "Pet"); + + pet.def(py::init()) + .def_readwrite("name", &Pet::name) + .def_readwrite("type", &Pet::type); + + py::enum_(pet, "Kind") + .value("Dog", Pet::Kind::Dog) + .value("Cat", Pet::Kind::Cat) + .export_values(); + +To ensure that the ``Kind`` type is created within the scope of ``Pet``, the +``pet`` :class:`class_` instance must be supplied to the :class:`enum_`. +constructor. The :func:`enum_::export_values` function exports the enum entries +into the parent scope, which should be skipped for newer C++11-style strongly +typed enums. + +.. code-block:: pycon + + >>> p = Pet('Lucy', Pet.Cat) + >>> p.type + Kind.Cat + >>> int(p.type) + 1L + +The entries defined by the enumeration type are exposed in the ``__members__`` property: + +.. code-block:: pycon + + >>> Pet.Kind.__members__ + {'Dog': Kind.Dog, 'Cat': Kind.Cat} + +The ``name`` property returns the name of the enum value as a unicode string. + +.. note:: + + It is also possible to use ``str(enum)``, however these accomplish different + goals. The following shows how these two approaches differ. + + .. code-block:: pycon + + >>> p = Pet( "Lucy", Pet.Cat ) + >>> pet_type = p.type + >>> pet_type + Pet.Cat + >>> str(pet_type) + 'Pet.Cat' + >>> pet_type.name + 'Cat' + +.. note:: + + When the special tag ``py::arithmetic()`` is specified to the ``enum_`` + constructor, pybind11 creates an enumeration that also supports rudimentary + arithmetic and bit-level operations like comparisons, and, or, xor, negation, + etc. + + .. code-block:: cpp + + py::enum_(pet, "Kind", py::arithmetic()) + ... + + By default, these are omitted to conserve space. diff --git a/diffvg/pybind11/docs/compiling.rst b/diffvg/pybind11/docs/compiling.rst new file mode 100644 index 0000000000000000000000000000000000000000..72b0c1eecf352e4ae6657cd6c5293542eba63ec5 --- /dev/null +++ b/diffvg/pybind11/docs/compiling.rst @@ -0,0 +1,400 @@ +.. _compiling: + +Build systems +############# + +Building with setuptools +======================== + +For projects on PyPI, building with setuptools is the way to go. Sylvain Corlay +has kindly provided an example project which shows how to set up everything, +including automatic generation of documentation using Sphinx. Please refer to +the [python_example]_ repository. + +.. [python_example] https://github.com/pybind/python_example + +Building with cppimport +======================== + +[cppimport]_ is a small Python import hook that determines whether there is a C++ +source file whose name matches the requested module. If there is, the file is +compiled as a Python extension using pybind11 and placed in the same folder as +the C++ source file. Python is then able to find the module and load it. + +.. [cppimport] https://github.com/tbenthompson/cppimport + +.. _cmake: + +Building with CMake +=================== + +For C++ codebases that have an existing CMake-based build system, a Python +extension module can be created with just a few lines of code: + +.. code-block:: cmake + + cmake_minimum_required(VERSION 3.4...3.18) + project(example LANGUAGES CXX) + + add_subdirectory(pybind11) + pybind11_add_module(example example.cpp) + +This assumes that the pybind11 repository is located in a subdirectory named +:file:`pybind11` and that the code is located in a file named :file:`example.cpp`. +The CMake command ``add_subdirectory`` will import the pybind11 project which +provides the ``pybind11_add_module`` function. It will take care of all the +details needed to build a Python extension module on any platform. + +A working sample project, including a way to invoke CMake from :file:`setup.py` for +PyPI integration, can be found in the [cmake_example]_ repository. + +.. [cmake_example] https://github.com/pybind/cmake_example + +.. versionchanged:: 2.6 + CMake 3.4+ is required. + +pybind11_add_module +------------------- + +To ease the creation of Python extension modules, pybind11 provides a CMake +function with the following signature: + +.. code-block:: cmake + + pybind11_add_module( [MODULE | SHARED] [EXCLUDE_FROM_ALL] + [NO_EXTRAS] [THIN_LTO] source1 [source2 ...]) + +This function behaves very much like CMake's builtin ``add_library`` (in fact, +it's a wrapper function around that command). It will add a library target +called ```` to be built from the listed source files. In addition, it +will take care of all the Python-specific compiler and linker flags as well +as the OS- and Python-version-specific file extension. The produced target +```` can be further manipulated with regular CMake commands. + +``MODULE`` or ``SHARED`` may be given to specify the type of library. If no +type is given, ``MODULE`` is used by default which ensures the creation of a +Python-exclusive module. Specifying ``SHARED`` will create a more traditional +dynamic library which can also be linked from elsewhere. ``EXCLUDE_FROM_ALL`` +removes this target from the default build (see CMake docs for details). + +Since pybind11 is a template library, ``pybind11_add_module`` adds compiler +flags to ensure high quality code generation without bloat arising from long +symbol names and duplication of code in different translation units. It +sets default visibility to *hidden*, which is required for some pybind11 +features and functionality when attempting to load multiple pybind11 modules +compiled under different pybind11 versions. It also adds additional flags +enabling LTO (Link Time Optimization) and strip unneeded symbols. See the +:ref:`FAQ entry ` for a more detailed explanation. These +latter optimizations are never applied in ``Debug`` mode. If ``NO_EXTRAS`` is +given, they will always be disabled, even in ``Release`` mode. However, this +will result in code bloat and is generally not recommended. + +As stated above, LTO is enabled by default. Some newer compilers also support +different flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause +the function to prefer this flavor if available. The function falls back to +regular LTO if ``-flto=thin`` is not available. If +``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is set (either ON or OFF), then that +will be respected instead of the built-in flag search. + +.. _ThinLTO: http://clang.llvm.org/docs/ThinLTO.html + +Configuration variables +----------------------- + +By default, pybind11 will compile modules with the compiler default or the +minimum standard required by pybind11, whichever is higher. You can set the +standard explicitly with +`CMAKE_CXX_STANDARD `_: + +.. code-block:: cmake + + set(CMAKE_CXX_STANDARD 14) # or 11, 14, 17, 20 + set(CMAKE_CXX_STANDARD_REQUIRED ON) # optional, ensure standard is supported + set(CMAKE_CXX_EXTENSIONS OFF) # optional, keep compiler extensionsn off + + +The variables can also be set when calling CMake from the command line using +the ``-D=`` flag. You can also manually set ``CXX_STANDARD`` +on a target or use ``target_compile_features`` on your targets - anything that +CMake supports. + +Classic Python support: The target Python version can be selected by setting +``PYBIND11_PYTHON_VERSION`` or an exact Python installation can be specified +with ``PYTHON_EXECUTABLE``. For example: + +.. code-block:: bash + + cmake -DPYBIND11_PYTHON_VERSION=3.6 .. + + # Another method: + cmake -DPYTHON_EXECUTABLE=/path/to/python .. + + # This often is a good way to get the current Python, works in environments: + cmake -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") .. + + +find_package vs. add_subdirectory +--------------------------------- + +For CMake-based projects that don't include the pybind11 repository internally, +an external installation can be detected through ``find_package(pybind11)``. +See the `Config file`_ docstring for details of relevant CMake variables. + +.. code-block:: cmake + + cmake_minimum_required(VERSION 3.4...3.18) + project(example LANGUAGES CXX) + + find_package(pybind11 REQUIRED) + pybind11_add_module(example example.cpp) + +Note that ``find_package(pybind11)`` will only work correctly if pybind11 +has been correctly installed on the system, e. g. after downloading or cloning +the pybind11 repository : + +.. code-block:: bash + + # Classic CMake + cd pybind11 + mkdir build + cd build + cmake .. + make install + + # CMake 3.15+ + cd pybind11 + cmake -S . -B build + cmake --build build -j 2 # Build on 2 cores + cmake --install build + +Once detected, the aforementioned ``pybind11_add_module`` can be employed as +before. The function usage and configuration variables are identical no matter +if pybind11 is added as a subdirectory or found as an installed package. You +can refer to the same [cmake_example]_ repository for a full sample project +-- just swap out ``add_subdirectory`` for ``find_package``. + +.. _Config file: https://github.com/pybind/pybind11/blob/master/tools/pybind11Config.cmake.in + + +.. _find-python-mode: + +FindPython mode +--------------- + +CMake 3.12+ (3.15+ recommended) added a new module called FindPython that had a +highly improved search algorithm and modern targets and tools. If you use +FindPython, pybind11 will detect this and use the existing targets instead: + +.. code-block:: cmake + + cmake_minumum_required(VERSION 3.15...3.18) + project(example LANGUAGES CXX) + + find_package(Python COMPONENTS Interpreter Development REQUIRED) + find_package(pybind11 CONFIG REQUIRED) + # or add_subdirectory(pybind11) + + pybind11_add_module(example example.cpp) + +You can also use the targets (as listed below) with FindPython. If you define +``PYBIND11_FINDPYTHON``, pybind11 will perform the FindPython step for you +(mostly useful when building pybind11's own tests, or as a way to change search +algorithms from the CMake invocation, with ``-DPYBIND11_FINDPYTHON=ON``. + +.. warning:: + + If you use FindPython2 and FindPython3 to dual-target Python, use the + individual targets listed below, and avoid targets that directly include + Python parts. + +There are `many ways to hint or force a discovery of a specific Python +installation `_), +setting ``Python_ROOT_DIR`` may be the most common one (though with +virtualenv/venv support, and Conda support, this tends to find the correct +Python version more often than the old system did). + +.. versionadded:: 2.6 + +Advanced: interface library targets +----------------------------------- + +Pybind11 supports modern CMake usage patterns with a set of interface targets, +available in all modes. The targets provided are: + + ``pybind11::headers`` + Just the pybind11 headers and minimum compile requirements + + ``pybind11::python2_no_register`` + Quiets the warning/error when mixing C++14 or higher and Python 2 + + ``pybind11::pybind11`` + Python headers + ``pybind11::headers`` + ``pybind11::python2_no_register`` (Python 2 only) + + ``pybind11::python_link_helper`` + Just the "linking" part of pybind11:module + + ``pybind11::module`` + Everything for extension modules - ``pybind11::pybind11`` + ``Python::Module`` (FindPython CMake 3.15+) or ``pybind11::python_link_helper`` + + ``pybind11::embed`` + Everything for embedding the Python interpreter - ``pybind11::pybind11`` + ``Python::Embed`` (FindPython) or Python libs + + ``pybind11::lto`` / ``pybind11::thin_lto`` + An alternative to `INTERPROCEDURAL_OPTIMIZATION` for adding link-time optimization. + + ``pybind11::windows_extras`` + ``/bigobj`` and ``/mp`` for MSVC. + +Two helper functions are also provided: + + ``pybind11_strip(target)`` + Strips a target (uses ``CMAKE_STRIP`` after the target is built) + + ``pybind11_extension(target)`` + Sets the correct extension (with SOABI) for a target. + +You can use these targets to build complex applications. For example, the +``add_python_module`` function is identical to: + +.. code-block:: cmake + + cmake_minimum_required(VERSION 3.4) + project(example LANGUAGES CXX) + + find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11) + + add_library(example MODULE main.cpp) + + target_link_libraries(example PRIVATE pybind11::module pybind11::lto pybind11::windows_extras) + + pybind11_extension(example) + pybind11_strip(example) + + set_target_properties(example PROPERTIES CXX_VISIBILITY_PRESET "hidden" + CUDA_VISIBILITY_PRESET "hidden") + +Instead of setting properties, you can set ``CMAKE_*`` variables to initialize these correctly. + +.. warning:: + + Since pybind11 is a metatemplate library, it is crucial that certain + compiler flags are provided to ensure high quality code generation. In + contrast to the ``pybind11_add_module()`` command, the CMake interface + provides a *composable* set of targets to ensure that you retain flexibility. + It can be expecially important to provide or set these properties; the + :ref:`FAQ ` contains an explanation on why these are needed. + +.. versionadded:: 2.6 + +.. _nopython-mode: + +Advanced: NOPYTHON mode +----------------------- + +If you want complete control, you can set ``PYBIND11_NOPYTHON`` to completely +disable Python integration (this also happens if you run ``FindPython2`` and +``FindPython3`` without running ``FindPython``). This gives you complete +freedom to integrate into an existing system (like `Scikit-Build's +`_ ``PythonExtensions``). +``pybind11_add_module`` and ``pybind11_extension`` will be unavailable, and the +targets will be missing any Python specific behavior. + +.. versionadded:: 2.6 + +Embedding the Python interpreter +-------------------------------- + +In addition to extension modules, pybind11 also supports embedding Python into +a C++ executable or library. In CMake, simply link with the ``pybind11::embed`` +target. It provides everything needed to get the interpreter running. The Python +headers and libraries are attached to the target. Unlike ``pybind11::module``, +there is no need to manually set any additional properties here. For more +information about usage in C++, see :doc:`/advanced/embedding`. + +.. code-block:: cmake + + cmake_minimum_required(VERSION 3.4...3.18) + project(example LANGUAGES CXX) + + find_package(pybind11 REQUIRED) # or add_subdirectory(pybind11) + + add_executable(example main.cpp) + target_link_libraries(example PRIVATE pybind11::embed) + +.. _building_manually: + +Building manually +================= + +pybind11 is a header-only library, hence it is not necessary to link against +any special libraries and there are no intermediate (magic) translation steps. + +On Linux, you can compile an example such as the one given in +:ref:`simple_example` using the following command: + +.. code-block:: bash + + $ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix` + +The flags given here assume that you're using Python 3. For Python 2, just +change the executable appropriately (to ``python`` or ``python2``). + +The ``python3 -m pybind11 --includes`` command fetches the include paths for +both pybind11 and Python headers. This assumes that pybind11 has been installed +using ``pip`` or ``conda``. If it hasn't, you can also manually specify +``-I /include`` together with the Python includes path +``python3-config --includes``. + +Note that Python 2.7 modules don't use a special suffix, so you should simply +use ``example.so`` instead of ``example`python3-config --extension-suffix```. +Besides, the ``--extension-suffix`` option may or may not be available, depending +on the distribution; in the latter case, the module extension can be manually +set to ``.so``. + +On Mac OS: the build command is almost the same but it also requires passing +the ``-undefined dynamic_lookup`` flag so as to ignore missing symbols when +building the module: + +.. code-block:: bash + + $ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix` + +In general, it is advisable to include several additional build parameters +that can considerably reduce the size of the created binary. Refer to section +:ref:`cmake` for a detailed example of a suitable cross-platform CMake-based +build system that works on all platforms including Windows. + +.. note:: + + On Linux and macOS, it's better to (intentionally) not link against + ``libpython``. The symbols will be resolved when the extension library + is loaded into a Python binary. This is preferable because you might + have several different installations of a given Python version (e.g. the + system-provided Python, and one that ships with a piece of commercial + software). In this way, the plugin will work with both versions, instead + of possibly importing a second Python library into a process that already + contains one (which will lead to a segfault). + +Generating binding code automatically +===================================== + +The ``Binder`` project is a tool for automatic generation of pybind11 binding +code by introspecting existing C++ codebases using LLVM/Clang. See the +[binder]_ documentation for details. + +.. [binder] http://cppbinder.readthedocs.io/en/latest/about.html + +[AutoWIG]_ is a Python library that wraps automatically compiled libraries into +high-level languages. It parses C++ code using LLVM/Clang technologies and +generates the wrappers using the Mako templating engine. The approach is automatic, +extensible, and applies to very complex C++ libraries, composed of thousands of +classes or incorporating modern meta-programming constructs. + +.. [AutoWIG] https://github.com/StatisKit/AutoWIG + +[robotpy-build]_ is a is a pure python, cross platform build tool that aims to +simplify creation of python wheels for pybind11 projects, and provide +cross-project dependency management. Additionally, it is able to autogenerate +customizable pybind11-based wrappers by parsing C++ header files. + +.. [robotpy-build] https://robotpy-build.readthedocs.io diff --git a/diffvg/pybind11/docs/conf.py b/diffvg/pybind11/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..0946f30e2e1ddea55a7d4c4069b8a989a29fe5e9 --- /dev/null +++ b/diffvg/pybind11/docs/conf.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# pybind11 documentation build configuration file, created by +# sphinx-quickstart on Sun Oct 11 19:23:48 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex +import subprocess + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['breathe'] + +breathe_projects = {'pybind11': '.build/doxygenxml/'} +breathe_default_project = 'pybind11' +breathe_domain_by_extension = {'h': 'cpp'} + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'pybind11' +copyright = '2017, Wenzel Jakob' +author = 'Wenzel Jakob' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '2.5' +# The full version, including alpha/beta/rc tags. +release = '2.5.dev1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['.build', 'release.rst'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +default_role = 'any' + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +#pygments_style = 'monokai' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + + html_context = { + 'css_files': [ + '_static/theme_overrides.css' + ] + } +else: + html_context = { + 'css_files': [ + '//media.readthedocs.org/css/sphinx_rtd_theme.css', + '//media.readthedocs.org/css/readthedocs-doc-embed.css', + '_static/theme_overrides.css' + ] + } + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'pybind11doc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +'preamble': r'\DeclareUnicodeCharacter{00A0}{}', + +# Latex figure (float) alignment +#'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'pybind11.tex', 'pybind11 Documentation', + 'Wenzel Jakob', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = 'pybind11-logo.png' + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'pybind11', 'pybind11 Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'pybind11', 'pybind11 Documentation', + author, 'pybind11', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +primary_domain = 'cpp' +highlight_language = 'cpp' + + +def generate_doxygen_xml(app): + build_dir = os.path.join(app.confdir, '.build') + if not os.path.exists(build_dir): + os.mkdir(build_dir) + + try: + subprocess.call(['doxygen', '--version']) + retcode = subprocess.call(['doxygen'], cwd=app.confdir) + if retcode < 0: + sys.stderr.write("doxygen error code: {}\n".format(-retcode)) + except OSError as e: + sys.stderr.write("doxygen execution failed: {}\n".format(e)) + + +def setup(app): + """Add hook for building doxygen xml when needed""" + app.connect("builder-inited", generate_doxygen_xml) diff --git a/diffvg/pybind11/docs/faq.rst b/diffvg/pybind11/docs/faq.rst new file mode 100644 index 0000000000000000000000000000000000000000..b68562910ad271c2417a15f35aa81cba03312e45 --- /dev/null +++ b/diffvg/pybind11/docs/faq.rst @@ -0,0 +1,324 @@ +Frequently asked questions +########################## + +"ImportError: dynamic module does not define init function" +=========================================================== + +1. Make sure that the name specified in PYBIND11_MODULE is identical to the +filename of the extension library (without suffixes such as .so) + +2. If the above did not fix the issue, you are likely using an incompatible +version of Python (for instance, the extension library was compiled against +Python 2, while the interpreter is running on top of some version of Python +3, or vice versa). + +"Symbol not found: ``__Py_ZeroStruct`` / ``_PyInstanceMethod_Type``" +======================================================================== + +See the first answer. + +"SystemError: dynamic module not initialized properly" +====================================================== + +See the first answer. + +The Python interpreter immediately crashes when importing my module +=================================================================== + +See the first answer. + +CMake doesn't detect the right Python version +============================================= + +The CMake-based build system will try to automatically detect the installed +version of Python and link against that. When this fails, or when there are +multiple versions of Python and it finds the wrong one, delete +``CMakeCache.txt`` and then invoke CMake as follows: + +.. code-block:: bash + + cmake -DPYTHON_EXECUTABLE:FILEPATH= . + +.. _faq_reference_arguments: + +Limitations involving reference arguments +========================================= + +In C++, it's fairly common to pass arguments using mutable references or +mutable pointers, which allows both read and write access to the value +supplied by the caller. This is sometimes done for efficiency reasons, or to +realize functions that have multiple return values. Here are two very basic +examples: + +.. code-block:: cpp + + void increment(int &i) { i++; } + void increment_ptr(int *i) { (*i)++; } + +In Python, all arguments are passed by reference, so there is no general +issue in binding such code from Python. + +However, certain basic Python types (like ``str``, ``int``, ``bool``, +``float``, etc.) are **immutable**. This means that the following attempt +to port the function to Python doesn't have the same effect on the value +provided by the caller -- in fact, it does nothing at all. + +.. code-block:: python + + def increment(i): + i += 1 # nope.. + +pybind11 is also affected by such language-level conventions, which means that +binding ``increment`` or ``increment_ptr`` will also create Python functions +that don't modify their arguments. + +Although inconvenient, one workaround is to encapsulate the immutable types in +a custom type that does allow modifications. + +An other alternative involves binding a small wrapper lambda function that +returns a tuple with all output arguments (see the remainder of the +documentation for examples on binding lambda functions). An example: + +.. code-block:: cpp + + int foo(int &i) { i++; return 123; } + +and the binding code + +.. code-block:: cpp + + m.def("foo", [](int i) { int rv = foo(i); return std::make_tuple(rv, i); }); + + +How can I reduce the build time? +================================ + +It's good practice to split binding code over multiple files, as in the +following example: + +:file:`example.cpp`: + +.. code-block:: cpp + + void init_ex1(py::module &); + void init_ex2(py::module &); + /* ... */ + + PYBIND11_MODULE(example, m) { + init_ex1(m); + init_ex2(m); + /* ... */ + } + +:file:`ex1.cpp`: + +.. code-block:: cpp + + void init_ex1(py::module &m) { + m.def("add", [](int a, int b) { return a + b; }); + } + +:file:`ex2.cpp`: + +.. code-block:: cpp + + void init_ex2(py::module &m) { + m.def("sub", [](int a, int b) { return a - b; }); + } + +:command:`python`: + +.. code-block:: pycon + + >>> import example + >>> example.add(1, 2) + 3 + >>> example.sub(1, 1) + 0 + +As shown above, the various ``init_ex`` functions should be contained in +separate files that can be compiled independently from one another, and then +linked together into the same final shared object. Following this approach +will: + +1. reduce memory requirements per compilation unit. + +2. enable parallel builds (if desired). + +3. allow for faster incremental builds. For instance, when a single class + definition is changed, only a subset of the binding code will generally need + to be recompiled. + +"recursive template instantiation exceeded maximum depth of 256" +================================================================ + +If you receive an error about excessive recursive template evaluation, try +specifying a larger value, e.g. ``-ftemplate-depth=1024`` on GCC/Clang. The +culprit is generally the generation of function signatures at compile time +using C++14 template metaprogramming. + +.. _`faq:hidden_visibility`: + +"β€˜SomeClass’ declared with greater visibility than the type of its field β€˜SomeClass::member’ [-Wattributes]" +============================================================================================================ + +This error typically indicates that you are compiling without the required +``-fvisibility`` flag. pybind11 code internally forces hidden visibility on +all internal code, but if non-hidden (and thus *exported*) code attempts to +include a pybind type (for example, ``py::object`` or ``py::list``) you can run +into this warning. + +To avoid it, make sure you are specifying ``-fvisibility=hidden`` when +compiling pybind code. + +As to why ``-fvisibility=hidden`` is necessary, because pybind modules could +have been compiled under different versions of pybind itself, it is also +important that the symbols defined in one module do not clash with the +potentially-incompatible symbols defined in another. While Python extension +modules are usually loaded with localized symbols (under POSIX systems +typically using ``dlopen`` with the ``RTLD_LOCAL`` flag), this Python default +can be changed, but even if it isn't it is not always enough to guarantee +complete independence of the symbols involved when not using +``-fvisibility=hidden``. + +Additionally, ``-fvisiblity=hidden`` can deliver considerably binary size +savings. (See the following section for more details). + + +.. _`faq:symhidden`: + +How can I create smaller binaries? +================================== + +To do its job, pybind11 extensively relies on a programming technique known as +*template metaprogramming*, which is a way of performing computation at compile +time using type information. Template metaprogamming usually instantiates code +involving significant numbers of deeply nested types that are either completely +removed or reduced to just a few instructions during the compiler's optimization +phase. However, due to the nested nature of these types, the resulting symbol +names in the compiled extension library can be extremely long. For instance, +the included test suite contains the following symbol: + +.. only:: html + + .. code-block:: none + + _​_​Z​N​8​p​y​b​i​n​d​1​1​1​2​c​p​p​_​f​u​n​c​t​i​o​n​C​1​I​v​8​E​x​a​m​p​l​e​2​J​R​N​S​t​3​_​_​1​6​v​e​c​t​o​r​I​N​S​3​_​1​2​b​a​s​i​c​_​s​t​r​i​n​g​I​w​N​S​3​_​1​1​c​h​a​r​_​t​r​a​i​t​s​I​w​E​E​N​S​3​_​9​a​l​l​o​c​a​t​o​r​I​w​E​E​E​E​N​S​8​_​I​S​A​_​E​E​E​E​E​J​N​S​_​4​n​a​m​e​E​N​S​_​7​s​i​b​l​i​n​g​E​N​S​_​9​i​s​_​m​e​t​h​o​d​E​A​2​8​_​c​E​E​E​M​T​0​_​F​T​_​D​p​T​1​_​E​D​p​R​K​T​2​_ + +.. only:: not html + + .. code-block:: cpp + + __ZN8pybind1112cpp_functionC1Iv8Example2JRNSt3__16vectorINS3_12basic_stringIwNS3_11char_traitsIwEENS3_9allocatorIwEEEENS8_ISA_EEEEEJNS_4nameENS_7siblingENS_9is_methodEA28_cEEEMT0_FT_DpT1_EDpRKT2_ + +which is the mangled form of the following function type: + +.. code-block:: cpp + + pybind11::cpp_function::cpp_function, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > >&, pybind11::name, pybind11::sibling, pybind11::is_method, char [28]>(void (Example2::*)(std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > >&), pybind11::name const&, pybind11::sibling const&, pybind11::is_method const&, char const (&) [28]) + +The memory needed to store just the mangled name of this function (196 bytes) +is larger than the actual piece of code (111 bytes) it represents! On the other +hand, it's silly to even give this function a name -- after all, it's just a +tiny cog in a bigger piece of machinery that is not exposed to the outside +world. So we'll generally only want to export symbols for those functions which +are actually called from the outside. + +This can be achieved by specifying the parameter ``-fvisibility=hidden`` to GCC +and Clang, which sets the default symbol visibility to *hidden*, which has a +tremendous impact on the final binary size of the resulting extension library. +(On Visual Studio, symbols are already hidden by default, so nothing needs to +be done there.) + +In addition to decreasing binary size, ``-fvisibility=hidden`` also avoids +potential serious issues when loading multiple modules and is required for +proper pybind operation. See the previous FAQ entry for more details. + +Working with ancient Visual Studio 2008 builds on Windows +========================================================= + +The official Windows distributions of Python are compiled using truly +ancient versions of Visual Studio that lack good C++11 support. Some users +implicitly assume that it would be impossible to load a plugin built with +Visual Studio 2015 into a Python distribution that was compiled using Visual +Studio 2008. However, no such issue exists: it's perfectly legitimate to +interface DLLs that are built with different compilers and/or C libraries. +Common gotchas to watch out for involve not ``free()``-ing memory region +that that were ``malloc()``-ed in another shared library, using data +structures with incompatible ABIs, and so on. pybind11 is very careful not +to make these types of mistakes. + +How can I properly handle Ctrl-C in long-running functions? +=========================================================== + +Ctrl-C is received by the Python interpreter, and holds it until the GIL +is released, so a long-running function won't be interrupted. + +To interrupt from inside your function, you can use the ``PyErr_CheckSignals()`` +function, that will tell if a signal has been raised on the Python side. This +function merely checks a flag, so its impact is negligible. When a signal has +been received, you must either explicitly interrupt execution by throwing +``py::error_already_set`` (which will propagate the existing +``KeyboardInterrupt``), or clear the error (which you usually will not want): + +.. code-block:: cpp + + PYBIND11_MODULE(example, m) + { + m.def("long running_func", []() + { + for (;;) { + if (PyErr_CheckSignals() != 0) + throw py::error_already_set(); + // Long running iteration + } + }); + } + +Inconsistent detection of Python version in CMake and pybind11 +============================================================== + +The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake +for Python version detection are not used by pybind11 due to unreliability and limitations that make +them unsuitable for pybind11's needs. Instead pybind provides its own, more reliable Python detection +CMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake +Python detection in a system with several Python versions installed. + +This difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following +Cmake code executed in a system with Python 2.7 and 3.x installed: + +.. code-block:: cmake + + find_package(PythonInterp) + find_package(PythonLibs) + find_package(pybind11) + +It will detect Python 2.7 and pybind11 will pick it as well. + +In contrast this code: + +.. code-block:: cmake + + find_package(pybind11) + find_package(PythonInterp) + find_package(PythonLibs) + +will detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards. + +It is advised to avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely +on pybind11 in detecting Python version. If this is not possible CMake machinery should be called *before* including pybind11. + +How to cite this project? +========================= + +We suggest the following BibTeX template to cite pybind11 in scientific +discourse: + +.. code-block:: bash + + @misc{pybind11, + author = {Wenzel Jakob and Jason Rhinelander and Dean Moldovan}, + year = {2017}, + note = {https://github.com/pybind/pybind11}, + title = {pybind11 -- Seamless operability between C++11 and Python} + } diff --git a/diffvg/pybind11/docs/index.rst b/diffvg/pybind11/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..d236611b7224454415f395194cb72f783a42af37 --- /dev/null +++ b/diffvg/pybind11/docs/index.rst @@ -0,0 +1,47 @@ +.. only: not latex + + .. image:: pybind11-logo.png + +pybind11 --- Seamless operability between C++11 and Python +========================================================== + +.. only: not latex + + Contents: + +.. toctree:: + :maxdepth: 1 + + intro + changelog + upgrade + +.. toctree:: + :caption: The Basics + :maxdepth: 2 + + basics + classes + compiling + +.. toctree:: + :caption: Advanced Topics + :maxdepth: 2 + + advanced/functions + advanced/classes + advanced/exceptions + advanced/smart_ptrs + advanced/cast/index + advanced/pycpp/index + advanced/embedding + advanced/misc + +.. toctree:: + :caption: Extra Information + :maxdepth: 1 + + faq + benchmark + limitations + reference diff --git a/diffvg/pybind11/docs/intro.rst b/diffvg/pybind11/docs/intro.rst new file mode 100644 index 0000000000000000000000000000000000000000..10e1799a19d4a2be8efb8f58515b290ef36514f8 --- /dev/null +++ b/diffvg/pybind11/docs/intro.rst @@ -0,0 +1,93 @@ +.. image:: pybind11-logo.png + +About this project +================== +**pybind11** is a lightweight header-only library that exposes C++ types in Python +and vice versa, mainly to create Python bindings of existing C++ code. Its +goals and syntax are similar to the excellent `Boost.Python`_ library by David +Abrahams: to minimize boilerplate code in traditional extension modules by +inferring type information using compile-time introspection. + +.. _Boost.Python: http://www.boost.org/doc/libs/release/libs/python/doc/index.html + +The main issue with Boost.Pythonβ€”and the reason for creating such a similar +projectβ€”is Boost. Boost is an enormously large and complex suite of utility +libraries that works with almost every C++ compiler in existence. This +compatibility has its cost: arcane template tricks and workarounds are +necessary to support the oldest and buggiest of compiler specimens. Now that +C++11-compatible compilers are widely available, this heavy machinery has +become an excessively large and unnecessary dependency. +Think of this library as a tiny self-contained version of Boost.Python with +everything stripped away that isn't relevant for binding generation. Without +comments, the core header files only require ~4K lines of code and depend on +Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This +compact implementation was possible thanks to some of the new C++11 language +features (specifically: tuples, lambda functions and variadic templates). Since +its creation, this library has grown beyond Boost.Python in many ways, leading +to dramatically simpler binding code in many common situations. + +Core features +************* +The following core C++ features can be mapped to Python + +- Functions accepting and returning custom data structures per value, reference, or pointer +- Instance methods and static methods +- Overloaded functions +- Instance attributes and static attributes +- Arbitrary exception types +- Enumerations +- Callbacks +- Iterators and ranges +- Custom operators +- Single and multiple inheritance +- STL data structures +- Smart pointers with reference counting like ``std::shared_ptr`` +- Internal references with correct reference counting +- C++ classes with virtual (and pure virtual) methods can be extended in Python + +Goodies +******* +In addition to the core functionality, pybind11 provides some extra goodies: + +- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an + implementation-agnostic interface. + +- It is possible to bind C++11 lambda functions with captured variables. The + lambda capture data is stored inside the resulting Python function object. + +- pybind11 uses C++11 move constructors and move assignment operators whenever + possible to efficiently transfer custom data types. + +- It's easy to expose the internal storage of custom data types through + Pythons' buffer protocols. This is handy e.g. for fast conversion between + C++ matrix classes like Eigen and NumPy without expensive copy operations. + +- pybind11 can automatically vectorize functions so that they are transparently + applied to all entries of one or more NumPy array arguments. + +- Python's slice-based access and assignment operations can be supported with + just a few lines of code. + +- Everything is contained in just a few header files; there is no need to link + against any additional libraries. + +- Binaries are generally smaller by a factor of at least 2 compared to + equivalent bindings generated by Boost.Python. A recent pybind11 conversion + of `PyRosetta`_, an enormous Boost.Python binding project, reported a binary + size reduction of **5.4x** and compile time reduction by **5.8x**. + +- Function signatures are precomputed at compile time (using ``constexpr``), + leading to smaller binaries. + +- With little extra effort, C++ types can be pickled and unpickled similar to + regular Python objects. + +.. _PyRosetta: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf + +Supported compilers +******************* + +1. Clang/LLVM (any non-ancient version with C++11 support) +2. GCC 4.8 or newer +3. Microsoft Visual Studio 2015 or newer +4. Intel C++ compiler v17 or newer (v16 with pybind11 v2.0 and v15 with pybind11 v2.0 and a `workaround `_ ) diff --git a/diffvg/pybind11/docs/limitations.rst b/diffvg/pybind11/docs/limitations.rst new file mode 100644 index 0000000000000000000000000000000000000000..59474f82fd9f9f5834d35430cc283f8b57ed10dc --- /dev/null +++ b/diffvg/pybind11/docs/limitations.rst @@ -0,0 +1,19 @@ +Limitations +########### + +pybind11 strives to be a general solution to binding generation, but it also has +certain limitations: + +- pybind11 casts away ``const``-ness in function arguments and return values. + This is in line with the Python language, which has no concept of ``const`` + values. This means that some additional care is needed to avoid bugs that + would be caught by the type checker in a traditional C++ program. + +- The NumPy interface ``pybind11::array`` greatly simplifies accessing + numerical data from C++ (and vice versa), but it's not a full-blown array + class like ``Eigen::Array`` or ``boost.multi_array``. + +These features could be implemented but would lead to a significant increase in +complexity. I've decided to draw the line here to keep this project simple and +compact. Users who absolutely require these features are encouraged to fork +pybind11. diff --git a/diffvg/pybind11/docs/pybind11-logo.png b/diffvg/pybind11/docs/pybind11-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..4cbad54f797d3ced04d4048f282df5e4336d4af4 Binary files /dev/null and b/diffvg/pybind11/docs/pybind11-logo.png differ diff --git a/diffvg/pybind11/docs/pybind11_vs_boost_python1.png b/diffvg/pybind11/docs/pybind11_vs_boost_python1.png new file mode 100644 index 0000000000000000000000000000000000000000..833231f240809884fb6eb4079db528b9b3c0a9ac Binary files /dev/null and b/diffvg/pybind11/docs/pybind11_vs_boost_python1.png differ diff --git a/diffvg/pybind11/docs/pybind11_vs_boost_python1.svg b/diffvg/pybind11/docs/pybind11_vs_boost_python1.svg new file mode 100644 index 0000000000000000000000000000000000000000..5bf950e6fdc81676d9a9774926a623b4f6a2e2a8 --- /dev/null +++ b/diffvg/pybind11/docs/pybind11_vs_boost_python1.svg @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/pybind11/docs/pybind11_vs_boost_python2.png b/diffvg/pybind11/docs/pybind11_vs_boost_python2.png new file mode 100644 index 0000000000000000000000000000000000000000..9f17272c50663957d6ae6d8e23fdd5a15757e71f Binary files /dev/null and b/diffvg/pybind11/docs/pybind11_vs_boost_python2.png differ diff --git a/diffvg/pybind11/docs/pybind11_vs_boost_python2.svg b/diffvg/pybind11/docs/pybind11_vs_boost_python2.svg new file mode 100644 index 0000000000000000000000000000000000000000..5ed6530ca112cbe643d5dd6d6fde385c4edea6b5 --- /dev/null +++ b/diffvg/pybind11/docs/pybind11_vs_boost_python2.svg @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/diffvg/pybind11/docs/reference.rst b/diffvg/pybind11/docs/reference.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9fbe60015ca466e71a37f6f9cd9866bfa7adfe5 --- /dev/null +++ b/diffvg/pybind11/docs/reference.rst @@ -0,0 +1,117 @@ +.. _reference: + +.. warning:: + + Please be advised that the reference documentation discussing pybind11 + internals is currently incomplete. Please refer to the previous sections + and the pybind11 header files for the nitty gritty details. + +Reference +######### + +.. _macros: + +Macros +====== + +.. doxygendefine:: PYBIND11_MODULE + +.. _core_types: + +Convenience classes for arbitrary Python types +============================================== + +Common member functions +----------------------- + +.. doxygenclass:: object_api + :members: + +Without reference counting +-------------------------- + +.. doxygenclass:: handle + :members: + +With reference counting +----------------------- + +.. doxygenclass:: object + :members: + +.. doxygenfunction:: reinterpret_borrow + +.. doxygenfunction:: reinterpret_steal + +Convenience classes for specific Python types +============================================= + +.. doxygenclass:: module + :members: + +.. doxygengroup:: pytypes + :members: + +.. _extras: + +Passing extra arguments to ``def`` or ``class_`` +================================================ + +.. doxygengroup:: annotations + :members: + +Embedding the interpreter +========================= + +.. doxygendefine:: PYBIND11_EMBEDDED_MODULE + +.. doxygenfunction:: initialize_interpreter + +.. doxygenfunction:: finalize_interpreter + +.. doxygenclass:: scoped_interpreter + +Redirecting C++ streams +======================= + +.. doxygenclass:: scoped_ostream_redirect + +.. doxygenclass:: scoped_estream_redirect + +.. doxygenfunction:: add_ostream_redirect + +Python built-in functions +========================= + +.. doxygengroup:: python_builtins + :members: + +Inheritance +=========== + +See :doc:`/classes` and :doc:`/advanced/classes` for more detail. + +.. doxygendefine:: PYBIND11_OVERLOAD + +.. doxygendefine:: PYBIND11_OVERLOAD_PURE + +.. doxygendefine:: PYBIND11_OVERLOAD_NAME + +.. doxygendefine:: PYBIND11_OVERLOAD_PURE_NAME + +.. doxygenfunction:: get_overload + +Exceptions +========== + +.. doxygenclass:: error_already_set + :members: + +.. doxygenclass:: builtin_exception + :members: + + +Literals +======== + +.. doxygennamespace:: literals diff --git a/diffvg/pybind11/docs/release.rst b/diffvg/pybind11/docs/release.rst new file mode 100644 index 0000000000000000000000000000000000000000..9846f971a6ff88e40ceeaf16e14227ba3b6ae63c --- /dev/null +++ b/diffvg/pybind11/docs/release.rst @@ -0,0 +1,21 @@ +To release a new version of pybind11: + +- Update the version number and push to pypi + - Update ``pybind11/_version.py`` (set release version, remove 'dev'). + - Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``. + - Ensure that all the information in ``setup.py`` is up-to-date. + - Update version in ``docs/conf.py``. + - Tag release date in ``docs/changelog.rst``. + - ``git add`` and ``git commit``. + - if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y`` + - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``. + - ``git push`` + - ``git push --tags``. + - ``python setup.py sdist upload``. + - ``python setup.py bdist_wheel upload``. +- Get back to work + - Update ``_version.py`` (add 'dev' and increment minor). + - Update version in ``docs/conf.py`` + - Update version macros in ``include/pybind11/common.h`` + - ``git add`` and ``git commit``. + ``git push`` diff --git a/diffvg/pybind11/docs/requirements.txt b/diffvg/pybind11/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4c3dc2e0b658f610497d65b039025ac917cfb5b --- /dev/null +++ b/diffvg/pybind11/docs/requirements.txt @@ -0,0 +1,5 @@ +breathe==4.20.0 +commonmark==0.9.1 +recommonmark==0.6.0 +sphinx==3.2.1 +sphinx_rtd_theme==0.5.0 diff --git a/diffvg/pybind11/docs/upgrade.rst b/diffvg/pybind11/docs/upgrade.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c3f1c32808f3c07236b3e925153a6ac1c317f2e --- /dev/null +++ b/diffvg/pybind11/docs/upgrade.rst @@ -0,0 +1,459 @@ +Upgrade guide +############# + +This is a companion guide to the :doc:`changelog`. While the changelog briefly +lists all of the new features, improvements and bug fixes, this upgrade guide +focuses only the subset which directly impacts your experience when upgrading +to a new version. But it goes into more detail. This includes things like +deprecated APIs and their replacements, build system changes, general code +modernization and other useful information. + +.. _upgrade-guide-2.6: + +v2.6 +==== + +An error is now thrown when ``__init__`` is forgotten on subclasses. This was +incorrect before, but was not checked. Add a call to ``__init__`` if it is +missing. + +If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to +``None``, as in normal CPython. You should add ``__hash__`` if you intended the +class to be hashable, possibly using the new ``py::hash`` shortcut. + +CMake support: +-------------- + +The minimum required version of CMake is now 3.4. Several details of the CMake +support have been deprecated; warnings will be shown if you need to change +something. The changes are: + +* ``PYBIND11_CPP_STANDARD=`` is deprecated, please use + ``CMAKE_CXX_STANDARD=`` instead, or any other valid CMake CXX or CUDA + standard selection method, like ``target_compile_features``. + +* If you do not request a standard, PyBind11 targets will compile with the + compiler default, but not less than C++11, instead of forcing C++14 always. + If you depend on the old behavior, please use ``set(CMAKE_CXX_STANDARD 14)`` + instead. + +* Direct ``pybind11::module`` usage should always be accompanied by at least + ``set(CMAKE_CXX_VISIBILITY_PRESET hidden)`` or similar - it used to try to + manually force this compiler flag (but not correctly on all compilers or with + CUDA). + +* ``pybind11_add_module``'s ``SYSTEM`` argument is deprecated and does nothing; + linking now behaves like other imported libraries consistently in both + config and submodule mode, and behaves like a ``SYSTEM`` library by + default. + +* If ``PYTHON_EXECUTABLE`` is not set, virtual environments (``venv``, + ``virtualenv``, and ``conda``) are prioritized over the standard search + (similar to the new FindPython mode). + +In addition, the following changes may be of interest: + +* ``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` will be respected by + ``pybind11_add_module`` if set instead of linking to ``pybind11::lto`` or + ``pybind11::thin_lto``. + +* Using ``find_package(Python COMPONENTS Interpreter Development)`` before + pybind11 will cause pybind11 to use the new Python mechanisms instead of its + own custom search, based on a patched version of classic + FindPythonInterp/FindPythonLibs. In the future, this may become the default. + + + +v2.2 +==== + +Deprecation of the ``PYBIND11_PLUGIN`` macro +-------------------------------------------- + +``PYBIND11_MODULE`` is now the preferred way to create module entry points. +The old macro emits a compile-time deprecation warning. + +.. code-block:: cpp + + // old + PYBIND11_PLUGIN(example) { + py::module m("example", "documentation string"); + + m.def("add", [](int a, int b) { return a + b; }); + + return m.ptr(); + } + + // new + PYBIND11_MODULE(example, m) { + m.doc() = "documentation string"; // optional + + m.def("add", [](int a, int b) { return a + b; }); + } + + +New API for defining custom constructors and pickling functions +--------------------------------------------------------------- + +The old placement-new custom constructors have been deprecated. The new approach +uses ``py::init()`` and factory functions to greatly improve type safety. + +Placement-new can be called accidentally with an incompatible type (without any +compiler errors or warnings), or it can initialize the same object multiple times +if not careful with the Python-side ``__init__`` calls. The new-style custom +constructors prevent such mistakes. See :ref:`custom_constructors` for details. + +.. code-block:: cpp + + // old -- deprecated (runtime warning shown only in debug mode) + py::class(m, "Foo") + .def("__init__", [](Foo &self, ...) { + new (&self) Foo(...); // uses placement-new + }); + + // new + py::class(m, "Foo") + .def(py::init([](...) { // Note: no `self` argument + return new Foo(...); // return by raw pointer + // or: return std::make_unique(...); // return by holder + // or: return Foo(...); // return by value (move constructor) + })); + +Mirroring the custom constructor changes, ``py::pickle()`` is now the preferred +way to get and set object state. See :ref:`pickling` for details. + +.. code-block:: cpp + + // old -- deprecated (runtime warning shown only in debug mode) + py::class(m, "Foo") + ... + .def("__getstate__", [](const Foo &self) { + return py::make_tuple(self.value1(), self.value2(), ...); + }) + .def("__setstate__", [](Foo &self, py::tuple t) { + new (&self) Foo(t[0].cast(), ...); + }); + + // new + py::class(m, "Foo") + ... + .def(py::pickle( + [](const Foo &self) { // __getstate__ + return py::make_tuple(f.value1(), f.value2(), ...); // unchanged + }, + [](py::tuple t) { // __setstate__, note: no `self` argument + return new Foo(t[0].cast(), ...); + // or: return std::make_unique(...); // return by holder + // or: return Foo(...); // return by value (move constructor) + } + )); + +For both the constructors and pickling, warnings are shown at module +initialization time (on import, not when the functions are called). +They're only visible when compiled in debug mode. Sample warning: + +.. code-block:: none + + pybind11-bound class 'mymodule.Foo' is using an old-style placement-new '__init__' + which has been deprecated. See the upgrade guide in pybind11's docs. + + +Stricter enforcement of hidden symbol visibility for pybind11 modules +--------------------------------------------------------------------- + +pybind11 now tries to actively enforce hidden symbol visibility for modules. +If you're using either one of pybind11's :doc:`CMake or Python build systems +` (the two example repositories) and you haven't been exporting any +symbols, there's nothing to be concerned about. All the changes have been done +transparently in the background. If you were building manually or relied on +specific default visibility, read on. + +Setting default symbol visibility to *hidden* has always been recommended for +pybind11 (see :ref:`faq:symhidden`). On Linux and macOS, hidden symbol +visibility (in conjunction with the ``strip`` utility) yields much smaller +module binaries. `CPython's extension docs`_ also recommend hiding symbols +by default, with the goal of avoiding symbol name clashes between modules. +Starting with v2.2, pybind11 enforces this more strictly: (1) by declaring +all symbols inside the ``pybind11`` namespace as hidden and (2) by including +the ``-fvisibility=hidden`` flag on Linux and macOS (only for extension +modules, not for embedding the interpreter). + +.. _CPython's extension docs: https://docs.python.org/3/extending/extending.html#providing-a-c-api-for-an-extension-module + +The namespace-scope hidden visibility is done automatically in pybind11's +headers and it's generally transparent to users. It ensures that: + +* Modules compiled with different pybind11 versions don't clash with each other. + +* Some new features, like ``py::module_local`` bindings, can work as intended. + +The ``-fvisibility=hidden`` flag applies the same visibility to user bindings +outside of the ``pybind11`` namespace. It's now set automatic by pybind11's +CMake and Python build systems, but this needs to be done manually by users +of other build systems. Adding this flag: + +* Minimizes the chances of symbol conflicts between modules. E.g. if two + unrelated modules were statically linked to different (ABI-incompatible) + versions of the same third-party library, a symbol clash would be likely + (and would end with unpredictable results). + +* Produces smaller binaries on Linux and macOS, as pointed out previously. + +Within pybind11's CMake build system, ``pybind11_add_module`` has always been +setting the ``-fvisibility=hidden`` flag in release mode. From now on, it's +being applied unconditionally, even in debug mode and it can no longer be opted +out of with the ``NO_EXTRAS`` option. The ``pybind11::module`` target now also +adds this flag to it's interface. The ``pybind11::embed`` target is unchanged. + +The most significant change here is for the ``pybind11::module`` target. If you +were previously relying on default visibility, i.e. if your Python module was +doubling as a shared library with dependents, you'll need to either export +symbols manually (recommended for cross-platform libraries) or factor out the +shared library (and have the Python module link to it like the other +dependents). As a temporary workaround, you can also restore default visibility +using the CMake code below, but this is not recommended in the long run: + +.. code-block:: cmake + + target_link_libraries(mymodule PRIVATE pybind11::module) + + add_library(restore_default_visibility INTERFACE) + target_compile_options(restore_default_visibility INTERFACE -fvisibility=default) + target_link_libraries(mymodule PRIVATE restore_default_visibility) + + +Local STL container bindings +---------------------------- + +Previous pybind11 versions could only bind types globally -- all pybind11 +modules, even unrelated ones, would have access to the same exported types. +However, this would also result in a conflict if two modules exported the +same C++ type, which is especially problematic for very common types, e.g. +``std::vector``. :ref:`module_local` were added to resolve this (see +that section for a complete usage guide). + +``py::class_`` still defaults to global bindings (because these types are +usually unique across modules), however in order to avoid clashes of opaque +types, ``py::bind_vector`` and ``py::bind_map`` will now bind STL containers +as ``py::module_local`` if their elements are: builtins (``int``, ``float``, +etc.), not bound using ``py::class_``, or bound as ``py::module_local``. For +example, this change allows multiple modules to bind ``std::vector`` +without causing conflicts. See :ref:`stl_bind` for more details. + +When upgrading to this version, if you have multiple modules which depend on +a single global binding of an STL container, note that all modules can still +accept foreign ``py::module_local`` types in the direction of Python-to-C++. +The locality only affects the C++-to-Python direction. If this is needed in +multiple modules, you'll need to either: + +* Add a copy of the same STL binding to all of the modules which need it. + +* Restore the global status of that single binding by marking it + ``py::module_local(false)``. + +The latter is an easy workaround, but in the long run it would be best to +localize all common type bindings in order to avoid conflicts with +third-party modules. + + +Negative strides for Python buffer objects and numpy arrays +----------------------------------------------------------- + +Support for negative strides required changing the integer type from unsigned +to signed in the interfaces of ``py::buffer_info`` and ``py::array``. If you +have compiler warnings enabled, you may notice some new conversion warnings +after upgrading. These can be resolved using ``static_cast``. + + +Deprecation of some ``py::object`` APIs +--------------------------------------- + +To compare ``py::object`` instances by pointer, you should now use +``obj1.is(obj2)`` which is equivalent to ``obj1 is obj2`` in Python. +Previously, pybind11 used ``operator==`` for this (``obj1 == obj2``), but +that could be confusing and is now deprecated (so that it can eventually +be replaced with proper rich object comparison in a future release). + +For classes which inherit from ``py::object``, ``borrowed`` and ``stolen`` +were previously available as protected constructor tags. Now the types +should be used directly instead: ``borrowed_t{}`` and ``stolen_t{}`` +(`#771 `_). + + +Stricter compile-time error checking +------------------------------------ + +Some error checks have been moved from run time to compile time. Notably, +automatic conversion of ``std::shared_ptr`` is not possible when ``T`` is +not directly registered with ``py::class_`` (e.g. ``std::shared_ptr`` +or ``std::shared_ptr>`` are not automatically convertible). +Attempting to bind a function with such arguments now results in a compile-time +error instead of waiting to fail at run time. + +``py::init<...>()`` constructor definitions are also stricter and now prevent +bindings which could cause unexpected behavior: + +.. code-block:: cpp + + struct Example { + Example(int &); + }; + + py::class_(m, "Example") + .def(py::init()); // OK, exact match + // .def(py::init()); // compile-time error, mismatch + +A non-``const`` lvalue reference is not allowed to bind to an rvalue. However, +note that a constructor taking ``const T &`` can still be registered using +``py::init()`` because a ``const`` lvalue reference can bind to an rvalue. + +v2.1 +==== + +Minimum compiler versions are enforced at compile time +------------------------------------------------------ + +The minimums also apply to v2.0 but the check is now explicit and a compile-time +error is raised if the compiler does not meet the requirements: + +* GCC >= 4.8 +* clang >= 3.3 (appleclang >= 5.0) +* MSVC >= 2015u3 +* Intel C++ >= 15.0 + + +The ``py::metaclass`` attribute is not required for static properties +--------------------------------------------------------------------- + +Binding classes with static properties is now possible by default. The +zero-parameter version of ``py::metaclass()`` is deprecated. However, a new +one-parameter ``py::metaclass(python_type)`` version was added for rare +cases when a custom metaclass is needed to override pybind11's default. + +.. code-block:: cpp + + // old -- emits a deprecation warning + py::class_(m, "Foo", py::metaclass()) + .def_property_readonly_static("foo", ...); + + // new -- static properties work without the attribute + py::class_(m, "Foo") + .def_property_readonly_static("foo", ...); + + // new -- advanced feature, override pybind11's default metaclass + py::class_(m, "Bar", py::metaclass(custom_python_type)) + ... + + +v2.0 +==== + +Breaking changes in ``py::class_`` +---------------------------------- + +These changes were necessary to make type definitions in pybind11 +future-proof, to support PyPy via its ``cpyext`` mechanism (`#527 +`_), and to improve efficiency +(`rev. 86d825 `_). + +1. Declarations of types that provide access via the buffer protocol must + now include the ``py::buffer_protocol()`` annotation as an argument to + the ``py::class_`` constructor. + + .. code-block:: cpp + + py::class_("Matrix", py::buffer_protocol()) + .def(py::init<...>()) + .def_buffer(...); + +2. Classes which include static properties (e.g. ``def_readwrite_static()``) + must now include the ``py::metaclass()`` attribute. Note: this requirement + has since been removed in v2.1. If you're upgrading from 1.x, it's + recommended to skip directly to v2.1 or newer. + +3. This version of pybind11 uses a redesigned mechanism for instantiating + trampoline classes that are used to override virtual methods from within + Python. This led to the following user-visible syntax change: + + .. code-block:: cpp + + // old v1.x syntax + py::class_("MyClass") + .alias() + ... + + // new v2.x syntax + py::class_("MyClass") + ... + + Importantly, both the original and the trampoline class are now specified + as arguments to the ``py::class_`` template, and the ``alias<..>()`` call + is gone. The new scheme has zero overhead in cases when Python doesn't + override any functions of the underlying C++ class. + `rev. 86d825 `_. + + The class type must be the first template argument given to ``py::class_`` + while the trampoline can be mixed in arbitrary order with other arguments + (see the following section). + + +Deprecation of the ``py::base()`` attribute +---------------------------------------------- + +``py::base()`` was deprecated in favor of specifying ``T`` as a template +argument to ``py::class_``. This new syntax also supports multiple inheritance. +Note that, while the type being exported must be the first argument in the +``py::class_`` template, the order of the following types (bases, +holder and/or trampoline) is not important. + +.. code-block:: cpp + + // old v1.x + py::class_("Derived", py::base()); + + // new v2.x + py::class_("Derived"); + + // new -- multiple inheritance + py::class_("Derived"); + + // new -- apart from `Derived` the argument order can be arbitrary + py::class_("Derived"); + + +Out-of-the-box support for ``std::shared_ptr`` +---------------------------------------------- + +The relevant type caster is now built in, so it's no longer necessary to +include a declaration of the form: + +.. code-block:: cpp + + PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr) + +Continuing to do so won’t cause an error or even a deprecation warning, +but it's completely redundant. + + +Deprecation of a few ``py::object`` APIs +---------------------------------------- + +All of the old-style calls emit deprecation warnings. + ++---------------------------------------+---------------------------------------------+ +| Old syntax | New syntax | ++=======================================+=============================================+ +| ``obj.call(args...)`` | ``obj(args...)`` | ++---------------------------------------+---------------------------------------------+ +| ``obj.str()`` | ``py::str(obj)`` | ++---------------------------------------+---------------------------------------------+ +| ``auto l = py::list(obj); l.check()`` | ``py::isinstance(obj)`` | ++---------------------------------------+---------------------------------------------+ +| ``py::object(ptr, true)`` | ``py::reinterpret_borrow(ptr)`` | ++---------------------------------------+---------------------------------------------+ +| ``py::object(ptr, false)`` | ``py::reinterpret_steal(ptr)`` | ++---------------------------------------+---------------------------------------------+ +| ``if (obj.attr("foo"))`` | ``if (py::hasattr(obj, "foo"))`` | ++---------------------------------------+---------------------------------------------+ +| ``if (obj["bar"])`` | ``if (obj.contains("bar"))`` | ++---------------------------------------+---------------------------------------------+ diff --git a/diffvg/pybind11/include/pybind11/attr.h b/diffvg/pybind11/include/pybind11/attr.h new file mode 100644 index 0000000000000000000000000000000000000000..54065fc9e10a075e1a2de5d6095e88d4b0a4aca2 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/attr.h @@ -0,0 +1,528 @@ +/* + pybind11/attr.h: Infrastructure for processing custom + type and function attributes + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "cast.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/// \addtogroup annotations +/// @{ + +/// Annotation for methods +struct is_method { handle class_; is_method(const handle &c) : class_(c) { } }; + +/// Annotation for operators +struct is_operator { }; + +/// Annotation for classes that cannot be subclassed +struct is_final { }; + +/// Annotation for parent scope +struct scope { handle value; scope(const handle &s) : value(s) { } }; + +/// Annotation for documentation +struct doc { const char *value; doc(const char *value) : value(value) { } }; + +/// Annotation for function names +struct name { const char *value; name(const char *value) : value(value) { } }; + +/// Annotation indicating that a function is an overload associated with a given "sibling" +struct sibling { handle value; sibling(const handle &value) : value(value.ptr()) { } }; + +/// Annotation indicating that a class derives from another given type +template struct base { + PYBIND11_DEPRECATED("base() was deprecated in favor of specifying 'T' as a template argument to class_") + base() { } +}; + +/// Keep patient alive while nurse lives +template struct keep_alive { }; + +/// Annotation indicating that a class is involved in a multiple inheritance relationship +struct multiple_inheritance { }; + +/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class +struct dynamic_attr { }; + +/// Annotation which enables the buffer protocol for a type +struct buffer_protocol { }; + +/// Annotation which requests that a special metaclass is created for a type +struct metaclass { + handle value; + + PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.") + metaclass() {} + + /// Override pybind11's default metaclass + explicit metaclass(handle value) : value(value) { } +}; + +/// Annotation that marks a class as local to the module: +struct module_local { const bool value; constexpr module_local(bool v = true) : value(v) { } }; + +/// Annotation to mark enums as an arithmetic type +struct arithmetic { }; + +/** \rst + A call policy which places one or more guard variables (``Ts...``) around the function call. + + For example, this definition: + + .. code-block:: cpp + + m.def("foo", foo, py::call_guard()); + + is equivalent to the following pseudocode: + + .. code-block:: cpp + + m.def("foo", [](args...) { + T scope_guard; + return foo(args...); // forwarded arguments + }); + \endrst */ +template struct call_guard; + +template <> struct call_guard<> { using type = detail::void_type; }; + +template +struct call_guard { + static_assert(std::is_default_constructible::value, + "The guard type must be default constructible"); + + using type = T; +}; + +template +struct call_guard { + struct type { + T guard{}; // Compose multiple guard types with left-to-right default-constructor order + typename call_guard::type next{}; + }; +}; + +/// @} annotations + +PYBIND11_NAMESPACE_BEGIN(detail) +/* Forward declarations */ +enum op_id : int; +enum op_type : int; +struct undefined_t; +template struct op_; +inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret); + +/// Internal data structure which holds metadata about a keyword argument +struct argument_record { + const char *name; ///< Argument name + const char *descr; ///< Human-readable version of the argument value + handle value; ///< Associated Python object + bool convert : 1; ///< True if the argument is allowed to convert when loading + bool none : 1; ///< True if None is allowed when loading + + argument_record(const char *name, const char *descr, handle value, bool convert, bool none) + : name(name), descr(descr), value(value), convert(convert), none(none) { } +}; + +/// Internal data structure which holds metadata about a bound function (signature, overloads, etc.) +struct function_record { + function_record() + : is_constructor(false), is_new_style_constructor(false), is_stateless(false), + is_operator(false), is_method(false), + has_args(false), has_kwargs(false), has_kwonly_args(false) { } + + /// Function name + char *name = nullptr; /* why no C++ strings? They generate heavier code.. */ + + // User-specified documentation string + char *doc = nullptr; + + /// Human-readable version of the function signature + char *signature = nullptr; + + /// List of registered keyword arguments + std::vector args; + + /// Pointer to lambda function which converts arguments and performs the actual call + handle (*impl) (function_call &) = nullptr; + + /// Storage for the wrapped function pointer and captured data, if any + void *data[3] = { }; + + /// Pointer to custom destructor for 'data' (if needed) + void (*free_data) (function_record *ptr) = nullptr; + + /// Return value policy associated with this function + return_value_policy policy = return_value_policy::automatic; + + /// True if name == '__init__' + bool is_constructor : 1; + + /// True if this is a new-style `__init__` defined in `detail/init.h` + bool is_new_style_constructor : 1; + + /// True if this is a stateless function pointer + bool is_stateless : 1; + + /// True if this is an operator (__add__), etc. + bool is_operator : 1; + + /// True if this is a method + bool is_method : 1; + + /// True if the function has a '*args' argument + bool has_args : 1; + + /// True if the function has a '**kwargs' argument + bool has_kwargs : 1; + + /// True once a 'py::kwonly' is encountered (any following args are keyword-only) + bool has_kwonly_args : 1; + + /// Number of arguments (including py::args and/or py::kwargs, if present) + std::uint16_t nargs; + + /// Number of trailing arguments (counted in `nargs`) that are keyword-only + std::uint16_t nargs_kwonly = 0; + + /// Python method object + PyMethodDef *def = nullptr; + + /// Python handle to the parent scope (a class or a module) + handle scope; + + /// Python handle to the sibling function representing an overload chain + handle sibling; + + /// Pointer to next overload + function_record *next = nullptr; +}; + +/// Special data structure which (temporarily) holds metadata about a bound class +struct type_record { + PYBIND11_NOINLINE type_record() + : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false), + default_holder(true), module_local(false), is_final(false) { } + + /// Handle to the parent scope + handle scope; + + /// Name of the class + const char *name = nullptr; + + // Pointer to RTTI type_info data structure + const std::type_info *type = nullptr; + + /// How large is the underlying C++ type? + size_t type_size = 0; + + /// What is the alignment of the underlying C++ type? + size_t type_align = 0; + + /// How large is the type's holder? + size_t holder_size = 0; + + /// The global operator new can be overridden with a class-specific variant + void *(*operator_new)(size_t) = nullptr; + + /// Function pointer to class_<..>::init_instance + void (*init_instance)(instance *, const void *) = nullptr; + + /// Function pointer to class_<..>::dealloc + void (*dealloc)(detail::value_and_holder &) = nullptr; + + /// List of base classes of the newly created type + list bases; + + /// Optional docstring + const char *doc = nullptr; + + /// Custom metaclass (optional) + handle metaclass; + + /// Multiple inheritance marker + bool multiple_inheritance : 1; + + /// Does the class manage a __dict__? + bool dynamic_attr : 1; + + /// Does the class implement the buffer protocol? + bool buffer_protocol : 1; + + /// Is the default (unique_ptr) holder type used? + bool default_holder : 1; + + /// Is the class definition local to the module shared object? + bool module_local : 1; + + /// Is the class inheritable from python classes? + bool is_final : 1; + + PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) { + auto base_info = detail::get_type_info(base, false); + if (!base_info) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + + "\" referenced unknown base type \"" + tname + "\""); + } + + if (default_holder != base_info->default_holder) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + "\" " + + (default_holder ? "does not have" : "has") + + " a non-default holder type while its base \"" + tname + "\" " + + (base_info->default_holder ? "does not" : "does")); + } + + bases.append((PyObject *) base_info->type); + + if (base_info->type->tp_dictoffset != 0) + dynamic_attr = true; + + if (caster) + base_info->implicit_casts.emplace_back(type, caster); + } +}; + +inline function_call::function_call(const function_record &f, handle p) : + func(f), parent(p) { + args.reserve(f.nargs); + args_convert.reserve(f.nargs); +} + +/// Tag for a new-style `__init__` defined in `detail/init.h` +struct is_new_style_constructor { }; + +/** + * Partial template specializations to process custom attributes provided to + * cpp_function_ and class_. These are either used to initialize the respective + * fields in the type_record and function_record data structures or executed at + * runtime to deal with custom call policies (e.g. keep_alive). + */ +template struct process_attribute; + +template struct process_attribute_default { + /// Default implementation: do nothing + static void init(const T &, function_record *) { } + static void init(const T &, type_record *) { } + static void precall(function_call &) { } + static void postcall(function_call &, handle) { } +}; + +/// Process an attribute specifying the function's name +template <> struct process_attribute : process_attribute_default { + static void init(const name &n, function_record *r) { r->name = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring +template <> struct process_attribute : process_attribute_default { + static void init(const doc &n, function_record *r) { r->doc = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring (provided as a C-style string) +template <> struct process_attribute : process_attribute_default { + static void init(const char *d, function_record *r) { r->doc = const_cast(d); } + static void init(const char *d, type_record *r) { r->doc = const_cast(d); } +}; +template <> struct process_attribute : process_attribute { }; + +/// Process an attribute indicating the function's return value policy +template <> struct process_attribute : process_attribute_default { + static void init(const return_value_policy &p, function_record *r) { r->policy = p; } +}; + +/// Process an attribute which indicates that this is an overloaded function associated with a given sibling +template <> struct process_attribute : process_attribute_default { + static void init(const sibling &s, function_record *r) { r->sibling = s.value; } +}; + +/// Process an attribute which indicates that this function is a method +template <> struct process_attribute : process_attribute_default { + static void init(const is_method &s, function_record *r) { r->is_method = true; r->scope = s.class_; } +}; + +/// Process an attribute which indicates the parent scope of a method +template <> struct process_attribute : process_attribute_default { + static void init(const scope &s, function_record *r) { r->scope = s.value; } +}; + +/// Process an attribute which indicates that this function is an operator +template <> struct process_attribute : process_attribute_default { + static void init(const is_operator &, function_record *r) { r->is_operator = true; } +}; + +template <> struct process_attribute : process_attribute_default { + static void init(const is_new_style_constructor &, function_record *r) { r->is_new_style_constructor = true; } +}; + +inline void process_kwonly_arg(const arg &a, function_record *r) { + if (!a.name || strlen(a.name) == 0) + pybind11_fail("arg(): cannot specify an unnamed argument after an kwonly() annotation"); + ++r->nargs_kwonly; +} + +/// Process a keyword argument attribute (*without* a default value) +template <> struct process_attribute : process_attribute_default { + static void init(const arg &a, function_record *r) { + if (r->is_method && r->args.empty()) + r->args.emplace_back("self", nullptr, handle(), true /*convert*/, false /*none not allowed*/); + r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none); + + if (r->has_kwonly_args) process_kwonly_arg(a, r); + } +}; + +/// Process a keyword argument attribute (*with* a default value) +template <> struct process_attribute : process_attribute_default { + static void init(const arg_v &a, function_record *r) { + if (r->is_method && r->args.empty()) + r->args.emplace_back("self", nullptr /*descr*/, handle() /*parent*/, true /*convert*/, false /*none not allowed*/); + + if (!a.value) { +#if !defined(NDEBUG) + std::string descr("'"); + if (a.name) descr += std::string(a.name) + ": "; + descr += a.type + "'"; + if (r->is_method) { + if (r->name) + descr += " in method '" + (std::string) str(r->scope) + "." + (std::string) r->name + "'"; + else + descr += " in method of '" + (std::string) str(r->scope) + "'"; + } else if (r->name) { + descr += " in function '" + (std::string) r->name + "'"; + } + pybind11_fail("arg(): could not convert default argument " + + descr + " into a Python object (type not registered yet?)"); +#else + pybind11_fail("arg(): could not convert default argument " + "into a Python object (type not registered yet?). " + "Compile in debug mode for more information."); +#endif + } + r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none); + + if (r->has_kwonly_args) process_kwonly_arg(a, r); + } +}; + +/// Process a keyword-only-arguments-follow pseudo argument +template <> struct process_attribute : process_attribute_default { + static void init(const kwonly &, function_record *r) { + r->has_kwonly_args = true; + } +}; + +/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees that) +template +struct process_attribute::value>> : process_attribute_default { + static void init(const handle &h, type_record *r) { r->bases.append(h); } +}; + +/// Process a parent class attribute (deprecated, does not support multiple inheritance) +template +struct process_attribute> : process_attribute_default> { + static void init(const base &, type_record *r) { r->add_base(typeid(T), nullptr); } +}; + +/// Process a multiple inheritance attribute +template <> +struct process_attribute : process_attribute_default { + static void init(const multiple_inheritance &, type_record *r) { r->multiple_inheritance = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const is_final &, type_record *r) { r->is_final = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const module_local &l, type_record *r) { r->module_local = l.value; } +}; + +/// Process an 'arithmetic' attribute for enums (does nothing here) +template <> +struct process_attribute : process_attribute_default {}; + +template +struct process_attribute> : process_attribute_default> { }; + +/** + * Process a keep_alive call policy -- invokes keep_alive_impl during the + * pre-call handler if both Nurse, Patient != 0 and use the post-call handler + * otherwise + */ +template struct process_attribute> : public process_attribute_default> { + template = 0> + static void precall(function_call &call) { keep_alive_impl(Nurse, Patient, call, handle()); } + template = 0> + static void postcall(function_call &, handle) { } + template = 0> + static void precall(function_call &) { } + template = 0> + static void postcall(function_call &call, handle ret) { keep_alive_impl(Nurse, Patient, call, ret); } +}; + +/// Recursively iterate over variadic template arguments +template struct process_attributes { + static void init(const Args&... args, function_record *r) { + int unused[] = { 0, (process_attribute::type>::init(args, r), 0) ... }; + ignore_unused(unused); + } + static void init(const Args&... args, type_record *r) { + int unused[] = { 0, (process_attribute::type>::init(args, r), 0) ... }; + ignore_unused(unused); + } + static void precall(function_call &call) { + int unused[] = { 0, (process_attribute::type>::precall(call), 0) ... }; + ignore_unused(unused); + } + static void postcall(function_call &call, handle fn_ret) { + int unused[] = { 0, (process_attribute::type>::postcall(call, fn_ret), 0) ... }; + ignore_unused(unused); + } +}; + +template +using is_call_guard = is_instantiation; + +/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found) +template +using extract_guard_t = typename exactly_one_t, Extra...>::type; + +/// Check the number of named arguments at compile time +template ::value...), + size_t self = constexpr_sum(std::is_same::value...)> +constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) { + return named == 0 || (self + named + has_args + has_kwargs) == nargs; +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/buffer_info.h b/diffvg/pybind11/include/pybind11/buffer_info.h new file mode 100644 index 0000000000000000000000000000000000000000..8349a46b8b92f87e9f641b30b7b86617b7f85d50 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/buffer_info.h @@ -0,0 +1,116 @@ +/* + pybind11/buffer_info.h: Python buffer object interface + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/// Information record describing a Python buffer object +struct buffer_info { + void *ptr = nullptr; // Pointer to the underlying storage + ssize_t itemsize = 0; // Size of individual items in bytes + ssize_t size = 0; // Total number of entries + std::string format; // For homogeneous buffers, this should be set to format_descriptor::format() + ssize_t ndim = 0; // Number of dimensions + std::vector shape; // Shape of the tensor (1 entry per dimension) + std::vector strides; // Number of bytes between adjacent entries (for each per dimension) + bool readonly = false; // flag to indicate if the underlying storage may be written to + + buffer_info() { } + + buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim, + detail::any_container shape_in, detail::any_container strides_in, bool readonly=false) + : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim), + shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) { + if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) + pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length"); + for (size_t i = 0; i < (size_t) ndim; ++i) + size *= shape[i]; + } + + template + buffer_info(T *ptr, detail::any_container shape_in, detail::any_container strides_in, bool readonly=false) + : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor::format(), static_cast(shape_in->size()), std::move(shape_in), std::move(strides_in), readonly) { } + + buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size, bool readonly=false) + : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) { } + + template + buffer_info(T *ptr, ssize_t size, bool readonly=false) + : buffer_info(ptr, sizeof(T), format_descriptor::format(), size, readonly) { } + + template + buffer_info(const T *ptr, ssize_t size, bool readonly=true) + : buffer_info(const_cast(ptr), sizeof(T), format_descriptor::format(), size, readonly) { } + + explicit buffer_info(Py_buffer *view, bool ownview = true) + : buffer_info(view->buf, view->itemsize, view->format, view->ndim, + {view->shape, view->shape + view->ndim}, {view->strides, view->strides + view->ndim}, view->readonly) { + this->m_view = view; + this->ownview = ownview; + } + + buffer_info(const buffer_info &) = delete; + buffer_info& operator=(const buffer_info &) = delete; + + buffer_info(buffer_info &&other) { + (*this) = std::move(other); + } + + buffer_info& operator=(buffer_info &&rhs) { + ptr = rhs.ptr; + itemsize = rhs.itemsize; + size = rhs.size; + format = std::move(rhs.format); + ndim = rhs.ndim; + shape = std::move(rhs.shape); + strides = std::move(rhs.strides); + std::swap(m_view, rhs.m_view); + std::swap(ownview, rhs.ownview); + readonly = rhs.readonly; + return *this; + } + + ~buffer_info() { + if (m_view && ownview) { PyBuffer_Release(m_view); delete m_view; } + } + + Py_buffer *view() const { return m_view; } + Py_buffer *&view() { return m_view; } +private: + struct private_ctr_tag { }; + + buffer_info(private_ctr_tag, void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim, + detail::any_container &&shape_in, detail::any_container &&strides_in, bool readonly) + : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) { } + + Py_buffer *m_view = nullptr; + bool ownview = false; +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +template struct compare_buffer_info { + static bool compare(const buffer_info& b) { + return b.format == format_descriptor::format() && b.itemsize == (ssize_t) sizeof(T); + } +}; + +template struct compare_buffer_info::value>> { + static bool compare(const buffer_info& b) { + return (size_t) b.itemsize == sizeof(T) && (b.format == format_descriptor::value || + ((sizeof(T) == sizeof(long)) && b.format == (std::is_unsigned::value ? "L" : "l")) || + ((sizeof(T) == sizeof(size_t)) && b.format == (std::is_unsigned::value ? "N" : "n"))); + } +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/cast.h b/diffvg/pybind11/include/pybind11/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..5711004df9f575c66ec7ca389cbd995675ac69e0 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/cast.h @@ -0,0 +1,2210 @@ +/* + pybind11/cast.h: Partial template specializations to cast between + C++ and Python types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pytypes.h" +#include "detail/typeid.h" +#include "detail/descr.h" +#include "detail/internals.h" +#include +#include +#include +#include + +#if defined(PYBIND11_CPP17) +# if defined(__has_include) +# if __has_include() +# define PYBIND11_HAS_STRING_VIEW +# endif +# elif defined(_MSC_VER) +# define PYBIND11_HAS_STRING_VIEW +# endif +#endif +#ifdef PYBIND11_HAS_STRING_VIEW +#include +#endif + +#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L +# define PYBIND11_HAS_U8STRING +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +/// A life support system for temporary objects created by `type_caster::load()`. +/// Adding a patient will keep it alive up until the enclosing function returns. +class loader_life_support { +public: + /// A new patient frame is created when a function is entered + loader_life_support() { + get_internals().loader_patient_stack.push_back(nullptr); + } + + /// ... and destroyed after it returns + ~loader_life_support() { + auto &stack = get_internals().loader_patient_stack; + if (stack.empty()) + pybind11_fail("loader_life_support: internal error"); + + auto ptr = stack.back(); + stack.pop_back(); + Py_CLEAR(ptr); + + // A heuristic to reduce the stack's capacity (e.g. after long recursive calls) + if (stack.capacity() > 16 && stack.size() != 0 && stack.capacity() / stack.size() > 2) + stack.shrink_to_fit(); + } + + /// This can only be used inside a pybind11-bound function, either by `argument_loader` + /// at argument preparation time or by `py::cast()` at execution time. + PYBIND11_NOINLINE static void add_patient(handle h) { + auto &stack = get_internals().loader_patient_stack; + if (stack.empty()) + throw cast_error("When called outside a bound function, py::cast() cannot " + "do Python -> C++ conversions which require the creation " + "of temporary values"); + + auto &list_ptr = stack.back(); + if (list_ptr == nullptr) { + list_ptr = PyList_New(1); + if (!list_ptr) + pybind11_fail("loader_life_support: error allocating list"); + PyList_SET_ITEM(list_ptr, 0, h.inc_ref().ptr()); + } else { + auto result = PyList_Append(list_ptr, h.ptr()); + if (result == -1) + pybind11_fail("loader_life_support: error adding patient"); + } + } +}; + +// Gets the cache entry for the given type, creating it if necessary. The return value is the pair +// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was +// just created. +inline std::pair all_type_info_get_cache(PyTypeObject *type); + +// Populates a just-created cache entry. +PYBIND11_NOINLINE inline void all_type_info_populate(PyTypeObject *t, std::vector &bases) { + std::vector check; + for (handle parent : reinterpret_borrow(t->tp_bases)) + check.push_back((PyTypeObject *) parent.ptr()); + + auto const &type_dict = get_internals().registered_types_py; + for (size_t i = 0; i < check.size(); i++) { + auto type = check[i]; + // Ignore Python2 old-style class super type: + if (!PyType_Check((PyObject *) type)) continue; + + // Check `type` in the current set of registered python types: + auto it = type_dict.find(type); + if (it != type_dict.end()) { + // We found a cache entry for it, so it's either pybind-registered or has pre-computed + // pybind bases, but we have to make sure we haven't already seen the type(s) before: we + // want to follow Python/virtual C++ rules that there should only be one instance of a + // common base. + for (auto *tinfo : it->second) { + // NB: Could use a second set here, rather than doing a linear search, but since + // having a large number of immediate pybind11-registered types seems fairly + // unlikely, that probably isn't worthwhile. + bool found = false; + for (auto *known : bases) { + if (known == tinfo) { found = true; break; } + } + if (!found) bases.push_back(tinfo); + } + } + else if (type->tp_bases) { + // It's some python type, so keep follow its bases classes to look for one or more + // registered types + if (i + 1 == check.size()) { + // When we're at the end, we can pop off the current element to avoid growing + // `check` when adding just one base (which is typical--i.e. when there is no + // multiple inheritance) + check.pop_back(); + i--; + } + for (handle parent : reinterpret_borrow(type->tp_bases)) + check.push_back((PyTypeObject *) parent.ptr()); + } + } +} + +/** + * Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will + * be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side + * derived class that uses single inheritance. Will contain as many types as required for a Python + * class that uses multiple inheritance to inherit (directly or indirectly) from multiple + * pybind-registered classes. Will be empty if neither the type nor any base classes are + * pybind-registered. + * + * The value is cached for the lifetime of the Python type. + */ +inline const std::vector &all_type_info(PyTypeObject *type) { + auto ins = all_type_info_get_cache(type); + if (ins.second) + // New cache entry: populate it + all_type_info_populate(type, ins.first->second); + + return ins.first->second; +} + +/** + * Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any + * ancestors are pybind11-registered. Throws an exception if there are multiple bases--use + * `all_type_info` instead if you want to support multiple bases. + */ +PYBIND11_NOINLINE inline detail::type_info* get_type_info(PyTypeObject *type) { + auto &bases = all_type_info(type); + if (bases.size() == 0) + return nullptr; + if (bases.size() > 1) + pybind11_fail("pybind11::detail::get_type_info: type has multiple pybind11-registered bases"); + return bases.front(); +} + +inline detail::type_info *get_local_type_info(const std::type_index &tp) { + auto &locals = registered_local_types_cpp(); + auto it = locals.find(tp); + if (it != locals.end()) + return it->second; + return nullptr; +} + +inline detail::type_info *get_global_type_info(const std::type_index &tp) { + auto &types = get_internals().registered_types_cpp; + auto it = types.find(tp); + if (it != types.end()) + return it->second; + return nullptr; +} + +/// Return the type info for a given C++ type; on lookup failure can either throw or return nullptr. +PYBIND11_NOINLINE inline detail::type_info *get_type_info(const std::type_index &tp, + bool throw_if_missing = false) { + if (auto ltype = get_local_type_info(tp)) + return ltype; + if (auto gtype = get_global_type_info(tp)) + return gtype; + + if (throw_if_missing) { + std::string tname = tp.name(); + detail::clean_type_id(tname); + pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \"" + tname + "\""); + } + return nullptr; +} + +PYBIND11_NOINLINE inline handle get_type_handle(const std::type_info &tp, bool throw_if_missing) { + detail::type_info *type_info = get_type_info(tp, throw_if_missing); + return handle(type_info ? ((PyObject *) type_info->type) : nullptr); +} + +struct value_and_holder { + instance *inst = nullptr; + size_t index = 0u; + const detail::type_info *type = nullptr; + void **vh = nullptr; + + // Main constructor for a found value/holder: + value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index) : + inst{i}, index{index}, type{type}, + vh{inst->simple_layout ? inst->simple_value_holder : &inst->nonsimple.values_and_holders[vpos]} + {} + + // Default constructor (used to signal a value-and-holder not found by get_value_and_holder()) + value_and_holder() {} + + // Used for past-the-end iterator + value_and_holder(size_t index) : index{index} {} + + template V *&value_ptr() const { + return reinterpret_cast(vh[0]); + } + // True if this `value_and_holder` has a non-null value pointer + explicit operator bool() const { return value_ptr(); } + + template H &holder() const { + return reinterpret_cast(vh[1]); + } + bool holder_constructed() const { + return inst->simple_layout + ? inst->simple_holder_constructed + : inst->nonsimple.status[index] & instance::status_holder_constructed; + } + void set_holder_constructed(bool v = true) { + if (inst->simple_layout) + inst->simple_holder_constructed = v; + else if (v) + inst->nonsimple.status[index] |= instance::status_holder_constructed; + else + inst->nonsimple.status[index] &= (uint8_t) ~instance::status_holder_constructed; + } + bool instance_registered() const { + return inst->simple_layout + ? inst->simple_instance_registered + : inst->nonsimple.status[index] & instance::status_instance_registered; + } + void set_instance_registered(bool v = true) { + if (inst->simple_layout) + inst->simple_instance_registered = v; + else if (v) + inst->nonsimple.status[index] |= instance::status_instance_registered; + else + inst->nonsimple.status[index] &= (uint8_t) ~instance::status_instance_registered; + } +}; + +// Container for accessing and iterating over an instance's values/holders +struct values_and_holders { +private: + instance *inst; + using type_vec = std::vector; + const type_vec &tinfo; + +public: + values_and_holders(instance *inst) : inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {} + + struct iterator { + private: + instance *inst = nullptr; + const type_vec *types = nullptr; + value_and_holder curr; + friend struct values_and_holders; + iterator(instance *inst, const type_vec *tinfo) + : inst{inst}, types{tinfo}, + curr(inst /* instance */, + types->empty() ? nullptr : (*types)[0] /* type info */, + 0, /* vpos: (non-simple types only): the first vptr comes first */ + 0 /* index */) + {} + // Past-the-end iterator: + iterator(size_t end) : curr(end) {} + public: + bool operator==(const iterator &other) const { return curr.index == other.curr.index; } + bool operator!=(const iterator &other) const { return curr.index != other.curr.index; } + iterator &operator++() { + if (!inst->simple_layout) + curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs; + ++curr.index; + curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr; + return *this; + } + value_and_holder &operator*() { return curr; } + value_and_holder *operator->() { return &curr; } + }; + + iterator begin() { return iterator(inst, &tinfo); } + iterator end() { return iterator(tinfo.size()); } + + iterator find(const type_info *find_type) { + auto it = begin(), endit = end(); + while (it != endit && it->type != find_type) ++it; + return it; + } + + size_t size() { return tinfo.size(); } +}; + +/** + * Extracts C++ value and holder pointer references from an instance (which may contain multiple + * values/holders for python-side multiple inheritance) that match the given type. Throws an error + * if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If + * `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned, + * regardless of type (and the resulting .type will be nullptr). + * + * The returned object should be short-lived: in particular, it must not outlive the called-upon + * instance. + */ +PYBIND11_NOINLINE inline value_and_holder instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/, bool throw_if_missing /*= true in common.h*/) { + // Optimize common case: + if (!find_type || Py_TYPE(this) == find_type->type) + return value_and_holder(this, find_type, 0, 0); + + detail::values_and_holders vhs(this); + auto it = vhs.find(find_type); + if (it != vhs.end()) + return *it; + + if (!throw_if_missing) + return value_and_holder(); + +#if defined(NDEBUG) + pybind11_fail("pybind11::detail::instance::get_value_and_holder: " + "type is not a pybind11 base of the given instance " + "(compile in debug mode for type details)"); +#else + pybind11_fail("pybind11::detail::instance::get_value_and_holder: `" + + std::string(find_type->type->tp_name) + "' is not a pybind11 base of the given `" + + std::string(Py_TYPE(this)->tp_name) + "' instance"); +#endif +} + +PYBIND11_NOINLINE inline void instance::allocate_layout() { + auto &tinfo = all_type_info(Py_TYPE(this)); + + const size_t n_types = tinfo.size(); + + if (n_types == 0) + pybind11_fail("instance allocation failed: new instance has no pybind11-registered base types"); + + simple_layout = + n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs(); + + // Simple path: no python-side multiple inheritance, and a small-enough holder + if (simple_layout) { + simple_value_holder[0] = nullptr; + simple_holder_constructed = false; + simple_instance_registered = false; + } + else { // multiple base types or a too-large holder + // Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer, + // [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool + // values that tracks whether each associated holder has been initialized. Each [block] is + // padded, if necessary, to an integer multiple of sizeof(void *). + size_t space = 0; + for (auto t : tinfo) { + space += 1; // value pointer + space += t->holder_size_in_ptrs; // holder instance + } + size_t flags_at = space; + space += size_in_ptrs(n_types); // status bytes (holder_constructed and instance_registered) + + // Allocate space for flags, values, and holders, and initialize it to 0 (flags and values, + // in particular, need to be 0). Use Python's memory allocation functions: in Python 3.6 + // they default to using pymalloc, which is designed to be efficient for small allocations + // like the one we're doing here; in earlier versions (and for larger allocations) they are + // just wrappers around malloc. +#if PY_VERSION_HEX >= 0x03050000 + nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *)); + if (!nonsimple.values_and_holders) throw std::bad_alloc(); +#else + nonsimple.values_and_holders = (void **) PyMem_New(void *, space); + if (!nonsimple.values_and_holders) throw std::bad_alloc(); + std::memset(nonsimple.values_and_holders, 0, space * sizeof(void *)); +#endif + nonsimple.status = reinterpret_cast(&nonsimple.values_and_holders[flags_at]); + } + owned = true; +} + +PYBIND11_NOINLINE inline void instance::deallocate_layout() { + if (!simple_layout) + PyMem_Free(nonsimple.values_and_holders); +} + +PYBIND11_NOINLINE inline bool isinstance_generic(handle obj, const std::type_info &tp) { + handle type = detail::get_type_handle(tp, false); + if (!type) + return false; + return isinstance(obj, type); +} + +PYBIND11_NOINLINE inline std::string error_string() { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_RuntimeError, "Unknown internal error occurred"); + return "Unknown internal error occurred"; + } + + error_scope scope; // Preserve error state + + std::string errorString; + if (scope.type) { + errorString += handle(scope.type).attr("__name__").cast(); + errorString += ": "; + } + if (scope.value) + errorString += (std::string) str(scope.value); + + PyErr_NormalizeException(&scope.type, &scope.value, &scope.trace); + +#if PY_MAJOR_VERSION >= 3 + if (scope.trace != nullptr) + PyException_SetTraceback(scope.value, scope.trace); +#endif + +#if !defined(PYPY_VERSION) + if (scope.trace) { + PyTracebackObject *trace = (PyTracebackObject *) scope.trace; + + /* Get the deepest trace possible */ + while (trace->tb_next) + trace = trace->tb_next; + + PyFrameObject *frame = trace->tb_frame; + errorString += "\n\nAt:\n"; + while (frame) { + int lineno = PyFrame_GetLineNumber(frame); + errorString += + " " + handle(frame->f_code->co_filename).cast() + + "(" + std::to_string(lineno) + "): " + + handle(frame->f_code->co_name).cast() + "\n"; + frame = frame->f_back; + } + } +#endif + + return errorString; +} + +PYBIND11_NOINLINE inline handle get_object_handle(const void *ptr, const detail::type_info *type ) { + auto &instances = get_internals().registered_instances; + auto range = instances.equal_range(ptr); + for (auto it = range.first; it != range.second; ++it) { + for (const auto &vh : values_and_holders(it->second)) { + if (vh.type == type) + return handle((PyObject *) it->second); + } + } + return handle(); +} + +inline PyThreadState *get_thread_state_unchecked() { +#if defined(PYPY_VERSION) + return PyThreadState_GET(); +#elif PY_VERSION_HEX < 0x03000000 + return _PyThreadState_Current; +#elif PY_VERSION_HEX < 0x03050000 + return (PyThreadState*) _Py_atomic_load_relaxed(&_PyThreadState_Current); +#elif PY_VERSION_HEX < 0x03050200 + return (PyThreadState*) _PyThreadState_Current.value; +#else + return _PyThreadState_UncheckedGet(); +#endif +} + +// Forward declarations +inline void keep_alive_impl(handle nurse, handle patient); +inline PyObject *make_new_instance(PyTypeObject *type); + +class type_caster_generic { +public: + PYBIND11_NOINLINE type_caster_generic(const std::type_info &type_info) + : typeinfo(get_type_info(type_info)), cpptype(&type_info) { } + + type_caster_generic(const type_info *typeinfo) + : typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) { } + + bool load(handle src, bool convert) { + return load_impl(src, convert); + } + + PYBIND11_NOINLINE static handle cast(const void *_src, return_value_policy policy, handle parent, + const detail::type_info *tinfo, + void *(*copy_constructor)(const void *), + void *(*move_constructor)(const void *), + const void *existing_holder = nullptr) { + if (!tinfo) // no type info: error will be set already + return handle(); + + void *src = const_cast(_src); + if (src == nullptr) + return none().release(); + + auto it_instances = get_internals().registered_instances.equal_range(src); + for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) { + for (auto instance_type : detail::all_type_info(Py_TYPE(it_i->second))) { + if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype)) + return handle((PyObject *) it_i->second).inc_ref(); + } + } + + auto inst = reinterpret_steal(make_new_instance(tinfo->type)); + auto wrapper = reinterpret_cast(inst.ptr()); + wrapper->owned = false; + void *&valueptr = values_and_holders(wrapper).begin()->value_ptr(); + + switch (policy) { + case return_value_policy::automatic: + case return_value_policy::take_ownership: + valueptr = src; + wrapper->owned = true; + break; + + case return_value_policy::automatic_reference: + case return_value_policy::reference: + valueptr = src; + wrapper->owned = false; + break; + + case return_value_policy::copy: + if (copy_constructor) + valueptr = copy_constructor(src); + else { +#if defined(NDEBUG) + throw cast_error("return_value_policy = copy, but type is " + "non-copyable! (compile in debug mode for details)"); +#else + std::string type_name(tinfo->cpptype->name()); + detail::clean_type_id(type_name); + throw cast_error("return_value_policy = copy, but type " + + type_name + " is non-copyable!"); +#endif + } + wrapper->owned = true; + break; + + case return_value_policy::move: + if (move_constructor) + valueptr = move_constructor(src); + else if (copy_constructor) + valueptr = copy_constructor(src); + else { +#if defined(NDEBUG) + throw cast_error("return_value_policy = move, but type is neither " + "movable nor copyable! " + "(compile in debug mode for details)"); +#else + std::string type_name(tinfo->cpptype->name()); + detail::clean_type_id(type_name); + throw cast_error("return_value_policy = move, but type " + + type_name + " is neither movable nor copyable!"); +#endif + } + wrapper->owned = true; + break; + + case return_value_policy::reference_internal: + valueptr = src; + wrapper->owned = false; + keep_alive_impl(inst, parent); + break; + + default: + throw cast_error("unhandled return_value_policy: should not happen!"); + } + + tinfo->init_instance(wrapper, existing_holder); + + return inst.release(); + } + + // Base methods for generic caster; there are overridden in copyable_holder_caster + void load_value(value_and_holder &&v_h) { + auto *&vptr = v_h.value_ptr(); + // Lazy allocation for unallocated values: + if (vptr == nullptr) { + auto *type = v_h.type ? v_h.type : typeinfo; + if (type->operator_new) { + vptr = type->operator_new(type->type_size); + } else { + #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912) + if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) + vptr = ::operator new(type->type_size, + std::align_val_t(type->type_align)); + else + #endif + vptr = ::operator new(type->type_size); + } + } + value = vptr; + } + bool try_implicit_casts(handle src, bool convert) { + for (auto &cast : typeinfo->implicit_casts) { + type_caster_generic sub_caster(*cast.first); + if (sub_caster.load(src, convert)) { + value = cast.second(sub_caster.value); + return true; + } + } + return false; + } + bool try_direct_conversions(handle src) { + for (auto &converter : *typeinfo->direct_conversions) { + if (converter(src.ptr(), value)) + return true; + } + return false; + } + void check_holder_compat() {} + + PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) { + auto caster = type_caster_generic(ti); + if (caster.load(src, false)) + return caster.value; + return nullptr; + } + + /// Try to load with foreign typeinfo, if available. Used when there is no + /// native typeinfo, or when the native one wasn't able to produce a value. + PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) { + constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID; + const auto pytype = src.get_type(); + if (!hasattr(pytype, local_key)) + return false; + + type_info *foreign_typeinfo = reinterpret_borrow(getattr(pytype, local_key)); + // Only consider this foreign loader if actually foreign and is a loader of the correct cpp type + if (foreign_typeinfo->module_local_load == &local_load + || (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype))) + return false; + + if (auto result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) { + value = result; + return true; + } + return false; + } + + // Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant + // bits of code between here and copyable_holder_caster where the two classes need different + // logic (without having to resort to virtual inheritance). + template + PYBIND11_NOINLINE bool load_impl(handle src, bool convert) { + if (!src) return false; + if (!typeinfo) return try_load_foreign_module_local(src); + if (src.is_none()) { + // Defer accepting None to other overloads (if we aren't in convert mode): + if (!convert) return false; + value = nullptr; + return true; + } + + auto &this_ = static_cast(*this); + this_.check_holder_compat(); + + PyTypeObject *srctype = Py_TYPE(src.ptr()); + + // Case 1: If src is an exact type match for the target type then we can reinterpret_cast + // the instance's value pointer to the target type: + if (srctype == typeinfo->type) { + this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder()); + return true; + } + // Case 2: We have a derived class + else if (PyType_IsSubtype(srctype, typeinfo->type)) { + auto &bases = all_type_info(srctype); + bool no_cpp_mi = typeinfo->simple_type; + + // Case 2a: the python type is a Python-inherited derived class that inherits from just + // one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of + // the right type and we can use reinterpret_cast. + // (This is essentially the same as case 2b, but because not using multiple inheritance + // is extremely common, we handle it specially to avoid the loop iterator and type + // pointer lookup overhead) + if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) { + this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder()); + return true; + } + // Case 2b: the python type inherits from multiple C++ bases. Check the bases to see if + // we can find an exact match (or, for a simple C++ type, an inherited match); if so, we + // can safely reinterpret_cast to the relevant pointer. + else if (bases.size() > 1) { + for (auto base : bases) { + if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type) : base->type == typeinfo->type) { + this_.load_value(reinterpret_cast(src.ptr())->get_value_and_holder(base)); + return true; + } + } + } + + // Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type match + // in the registered bases, above, so try implicit casting (needed for proper C++ casting + // when MI is involved). + if (this_.try_implicit_casts(src, convert)) + return true; + } + + // Perform an implicit conversion + if (convert) { + for (auto &converter : typeinfo->implicit_conversions) { + auto temp = reinterpret_steal(converter(src.ptr(), typeinfo->type)); + if (load_impl(temp, false)) { + loader_life_support::add_patient(temp); + return true; + } + } + if (this_.try_direct_conversions(src)) + return true; + } + + // Failed to match local typeinfo. Try again with global. + if (typeinfo->module_local) { + if (auto gtype = get_global_type_info(*typeinfo->cpptype)) { + typeinfo = gtype; + return load(src, false); + } + } + + // Global typeinfo has precedence over foreign module_local + return try_load_foreign_module_local(src); + } + + + // Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast + // isn't needed or can't be used. If the type is unknown, sets the error and returns a pair + // with .second = nullptr. (p.first = nullptr is not an error: it becomes None). + PYBIND11_NOINLINE static std::pair src_and_type( + const void *src, const std::type_info &cast_type, const std::type_info *rtti_type = nullptr) { + if (auto *tpi = get_type_info(cast_type)) + return {src, const_cast(tpi)}; + + // Not found, set error: + std::string tname = rtti_type ? rtti_type->name() : cast_type.name(); + detail::clean_type_id(tname); + std::string msg = "Unregistered type : " + tname; + PyErr_SetString(PyExc_TypeError, msg.c_str()); + return {nullptr, nullptr}; + } + + const type_info *typeinfo = nullptr; + const std::type_info *cpptype = nullptr; + void *value = nullptr; +}; + +/** + * Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster + * needs to provide `operator T*()` and `operator T&()` operators. + * + * If the type supports moving the value away via an `operator T&&() &&` method, it should use + * `movable_cast_op_type` instead. + */ +template +using cast_op_type = + conditional_t>::value, + typename std::add_pointer>::type, + typename std::add_lvalue_reference>::type>; + +/** + * Determine suitable casting operator for a type caster with a movable value. Such a type caster + * needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be + * called in appropriate contexts where the value can be moved rather than copied. + * + * These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro. + */ +template +using movable_cast_op_type = + conditional_t::type>::value, + typename std::add_pointer>::type, + conditional_t::value, + typename std::add_rvalue_reference>::type, + typename std::add_lvalue_reference>::type>>; + +// std::is_copy_constructible isn't quite enough: it lets std::vector (and similar) through when +// T is non-copyable, but code containing such a copy constructor fails to actually compile. +template struct is_copy_constructible : std::is_copy_constructible {}; + +// Specialization for types that appear to be copy constructible but also look like stl containers +// (we specifically check for: has `value_type` and `reference` with `reference = value_type&`): if +// so, copy constructability depends on whether the value_type is copy constructible. +template struct is_copy_constructible, + std::is_same, + // Avoid infinite recursion + negation> + >::value>> : is_copy_constructible {}; + +// Likewise for std::pair +// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't themselves +// copy constructible, but this can not be relied upon when T1 or T2 are themselves containers). +template struct is_copy_constructible> + : all_of, is_copy_constructible> {}; + +// The same problems arise with std::is_copy_assignable, so we use the same workaround. +template struct is_copy_assignable : std::is_copy_assignable {}; +template struct is_copy_assignable, + std::is_same + >::value>> : is_copy_assignable {}; +template struct is_copy_assignable> + : all_of, is_copy_assignable> {}; + +PYBIND11_NAMESPACE_END(detail) + +// polymorphic_type_hook::get(src, tinfo) determines whether the object pointed +// to by `src` actually is an instance of some class derived from `itype`. +// If so, it sets `tinfo` to point to the std::type_info representing that derived +// type, and returns a pointer to the start of the most-derived object of that type +// (in which `src` is a subobject; this will be the same address as `src` in most +// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src` +// and leaves `tinfo` at its default value of nullptr. +// +// The default polymorphic_type_hook just returns src. A specialization for polymorphic +// types determines the runtime type of the passed object and adjusts the this-pointer +// appropriately via dynamic_cast. This is what enables a C++ Animal* to appear +// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is +// registered with pybind11, and this Animal is in fact a Dog). +// +// You may specialize polymorphic_type_hook yourself for types that want to appear +// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern +// in performance-sensitive applications, used most notably in LLVM.) +// +// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with +// std::enable_if. User provided specializations will always have higher priority than +// the default implementation and specialization provided in polymorphic_type_hook_base. +template +struct polymorphic_type_hook_base +{ + static const void *get(const itype *src, const std::type_info*&) { return src; } +}; +template +struct polymorphic_type_hook_base::value>> +{ + static const void *get(const itype *src, const std::type_info*& type) { + type = src ? &typeid(*src) : nullptr; + return dynamic_cast(src); + } +}; +template +struct polymorphic_type_hook : public polymorphic_type_hook_base {}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +/// Generic type caster for objects stored on the heap +template class type_caster_base : public type_caster_generic { + using itype = intrinsic_t; + +public: + static constexpr auto name = _(); + + type_caster_base() : type_caster_base(typeid(type)) { } + explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) { } + + static handle cast(const itype &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) + policy = return_value_policy::copy; + return cast(&src, policy, parent); + } + + static handle cast(itype &&src, return_value_policy, handle parent) { + return cast(&src, return_value_policy::move, parent); + } + + // Returns a (pointer, type_info) pair taking care of necessary type lookup for a + // polymorphic type (using RTTI by default, but can be overridden by specializing + // polymorphic_type_hook). If the instance isn't derived, returns the base version. + static std::pair src_and_type(const itype *src) { + auto &cast_type = typeid(itype); + const std::type_info *instance_type = nullptr; + const void *vsrc = polymorphic_type_hook::get(src, instance_type); + if (instance_type && !same_type(cast_type, *instance_type)) { + // This is a base pointer to a derived type. If the derived type is registered + // with pybind11, we want to make the full derived object available. + // In the typical case where itype is polymorphic, we get the correct + // derived pointer (which may be != base pointer) by a dynamic_cast to + // most derived type. If itype is not polymorphic, we won't get here + // except via a user-provided specialization of polymorphic_type_hook, + // and the user has promised that no this-pointer adjustment is + // required in that case, so it's OK to use static_cast. + if (const auto *tpi = get_type_info(*instance_type)) + return {vsrc, tpi}; + } + // Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer, so + // don't do a cast + return type_caster_generic::src_and_type(src, cast_type, instance_type); + } + + static handle cast(const itype *src, return_value_policy policy, handle parent) { + auto st = src_and_type(src); + return type_caster_generic::cast( + st.first, policy, parent, st.second, + make_copy_constructor(src), make_move_constructor(src)); + } + + static handle cast_holder(const itype *src, const void *holder) { + auto st = src_and_type(src); + return type_caster_generic::cast( + st.first, return_value_policy::take_ownership, {}, st.second, + nullptr, nullptr, holder); + } + + template using cast_op_type = detail::cast_op_type; + + operator itype*() { return (type *) value; } + operator itype&() { if (!value) throw reference_cast_error(); return *((itype *) value); } + +protected: + using Constructor = void *(*)(const void *); + + /* Only enabled when the types are {copy,move}-constructible *and* when the type + does not have a private operator new implementation. */ + template ::value>> + static auto make_copy_constructor(const T *x) -> decltype(new T(*x), Constructor{}) { + return [](const void *arg) -> void * { + return new T(*reinterpret_cast(arg)); + }; + } + + template ::value>> + static auto make_move_constructor(const T *x) -> decltype(new T(std::move(*const_cast(x))), Constructor{}) { + return [](const void *arg) -> void * { + return new T(std::move(*const_cast(reinterpret_cast(arg)))); + }; + } + + static Constructor make_copy_constructor(...) { return nullptr; } + static Constructor make_move_constructor(...) { return nullptr; } +}; + +template class type_caster : public type_caster_base { }; +template using make_caster = type_caster>; + +// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T +template typename make_caster::template cast_op_type cast_op(make_caster &caster) { + return caster.operator typename make_caster::template cast_op_type(); +} +template typename make_caster::template cast_op_type::type> +cast_op(make_caster &&caster) { + return std::move(caster).operator + typename make_caster::template cast_op_type::type>(); +} + +template class type_caster> { +private: + using caster_t = make_caster; + caster_t subcaster; + using subcaster_cast_op_type = typename caster_t::template cast_op_type; + static_assert(std::is_same::type &, subcaster_cast_op_type>::value, + "std::reference_wrapper caster requires T to have a caster with an `T &` operator"); +public: + bool load(handle src, bool convert) { return subcaster.load(src, convert); } + static constexpr auto name = caster_t::name; + static handle cast(const std::reference_wrapper &src, return_value_policy policy, handle parent) { + // It is definitely wrong to take ownership of this pointer, so mask that rvp + if (policy == return_value_policy::take_ownership || policy == return_value_policy::automatic) + policy = return_value_policy::automatic_reference; + return caster_t::cast(&src.get(), policy, parent); + } + template using cast_op_type = std::reference_wrapper; + operator std::reference_wrapper() { return subcaster.operator subcaster_cast_op_type&(); } +}; + +#define PYBIND11_TYPE_CASTER(type, py_name) \ + protected: \ + type value; \ + public: \ + static constexpr auto name = py_name; \ + template >::value, int> = 0> \ + static handle cast(T_ *src, return_value_policy policy, handle parent) { \ + if (!src) return none().release(); \ + if (policy == return_value_policy::take_ownership) { \ + auto h = cast(std::move(*src), policy, parent); delete src; return h; \ + } else { \ + return cast(*src, policy, parent); \ + } \ + } \ + operator type*() { return &value; } \ + operator type&() { return value; } \ + operator type&&() && { return std::move(value); } \ + template using cast_op_type = pybind11::detail::movable_cast_op_type + + +template using is_std_char_type = any_of< + std::is_same, /* std::string */ +#if defined(PYBIND11_HAS_U8STRING) + std::is_same, /* std::u8string */ +#endif + std::is_same, /* std::u16string */ + std::is_same, /* std::u32string */ + std::is_same /* std::wstring */ +>; + +template +struct type_caster::value && !is_std_char_type::value>> { + using _py_type_0 = conditional_t; + using _py_type_1 = conditional_t::value, _py_type_0, typename std::make_unsigned<_py_type_0>::type>; + using py_type = conditional_t::value, double, _py_type_1>; +public: + + bool load(handle src, bool convert) { + py_type py_value; + + if (!src) + return false; + + if (std::is_floating_point::value) { + if (convert || PyFloat_Check(src.ptr())) + py_value = (py_type) PyFloat_AsDouble(src.ptr()); + else + return false; + } else if (PyFloat_Check(src.ptr())) { + return false; + } else if (std::is_unsigned::value) { + py_value = as_unsigned(src.ptr()); + } else { // signed integer: + py_value = sizeof(T) <= sizeof(long) + ? (py_type) PyLong_AsLong(src.ptr()) + : (py_type) PYBIND11_LONG_AS_LONGLONG(src.ptr()); + } + + bool py_err = py_value == (py_type) -1 && PyErr_Occurred(); + + // Protect std::numeric_limits::min/max with parentheses + if (py_err || (std::is_integral::value && sizeof(py_type) != sizeof(T) && + (py_value < (py_type) (std::numeric_limits::min)() || + py_value > (py_type) (std::numeric_limits::max)()))) { + bool type_error = py_err && PyErr_ExceptionMatches( +#if PY_VERSION_HEX < 0x03000000 && !defined(PYPY_VERSION) + PyExc_SystemError +#else + PyExc_TypeError +#endif + ); + PyErr_Clear(); + if (type_error && convert && PyNumber_Check(src.ptr())) { + auto tmp = reinterpret_steal(std::is_floating_point::value + ? PyNumber_Float(src.ptr()) + : PyNumber_Long(src.ptr())); + PyErr_Clear(); + return load(tmp, false); + } + return false; + } + + value = (T) py_value; + return true; + } + + template + static typename std::enable_if::value, handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyFloat_FromDouble((double) src); + } + + template + static typename std::enable_if::value && std::is_signed::value && (sizeof(U) <= sizeof(long)), handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_SIGNED((long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value && (sizeof(U) <= sizeof(unsigned long)), handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src); + } + + template + static typename std::enable_if::value && std::is_signed::value && (sizeof(U) > sizeof(long)), handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromLongLong((long long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value && (sizeof(U) > sizeof(unsigned long)), handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromUnsignedLongLong((unsigned long long) src); + } + + PYBIND11_TYPE_CASTER(T, _::value>("int", "float")); +}; + +template struct void_caster { +public: + bool load(handle src, bool) { + if (src && src.is_none()) + return true; + return false; + } + static handle cast(T, return_value_policy /* policy */, handle /* parent */) { + return none().inc_ref(); + } + PYBIND11_TYPE_CASTER(T, _("None")); +}; + +template <> class type_caster : public void_caster {}; + +template <> class type_caster : public type_caster { +public: + using type_caster::cast; + + bool load(handle h, bool) { + if (!h) { + return false; + } else if (h.is_none()) { + value = nullptr; + return true; + } + + /* Check if this is a capsule */ + if (isinstance(h)) { + value = reinterpret_borrow(h); + return true; + } + + /* Check if this is a C++ type */ + auto &bases = all_type_info((PyTypeObject *) h.get_type().ptr()); + if (bases.size() == 1) { // Only allowing loading from a single-value type + value = values_and_holders(reinterpret_cast(h.ptr())).begin()->value_ptr(); + return true; + } + + /* Fail */ + return false; + } + + static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) { + if (ptr) + return capsule(ptr).release(); + else + return none().inc_ref(); + } + + template using cast_op_type = void*&; + operator void *&() { return value; } + static constexpr auto name = _("capsule"); +private: + void *value = nullptr; +}; + +template <> class type_caster : public void_caster { }; + +template <> class type_caster { +public: + bool load(handle src, bool convert) { + if (!src) return false; + else if (src.ptr() == Py_True) { value = true; return true; } + else if (src.ptr() == Py_False) { value = false; return true; } + else if (convert || !strcmp("numpy.bool_", Py_TYPE(src.ptr())->tp_name)) { + // (allow non-implicit conversion for numpy booleans) + + Py_ssize_t res = -1; + if (src.is_none()) { + res = 0; // None is implicitly converted to False + } + #if defined(PYPY_VERSION) + // On PyPy, check that "__bool__" (or "__nonzero__" on Python 2.7) attr exists + else if (hasattr(src, PYBIND11_BOOL_ATTR)) { + res = PyObject_IsTrue(src.ptr()); + } + #else + // Alternate approach for CPython: this does the same as the above, but optimized + // using the CPython API so as to avoid an unneeded attribute lookup. + else if (auto tp_as_number = src.ptr()->ob_type->tp_as_number) { + if (PYBIND11_NB_BOOL(tp_as_number)) { + res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr()); + } + } + #endif + if (res == 0 || res == 1) { + value = (bool) res; + return true; + } else { + PyErr_Clear(); + } + } + return false; + } + static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) { + return handle(src ? Py_True : Py_False).inc_ref(); + } + PYBIND11_TYPE_CASTER(bool, _("bool")); +}; + +// Helper class for UTF-{8,16,32} C++ stl strings: +template struct string_caster { + using CharT = typename StringType::value_type; + + // Simplify life by being able to assume standard char sizes (the standard only guarantees + // minimums, but Python requires exact sizes) + static_assert(!std::is_same::value || sizeof(CharT) == 1, "Unsupported char size != 1"); +#if defined(PYBIND11_HAS_U8STRING) + static_assert(!std::is_same::value || sizeof(CharT) == 1, "Unsupported char8_t size != 1"); +#endif + static_assert(!std::is_same::value || sizeof(CharT) == 2, "Unsupported char16_t size != 2"); + static_assert(!std::is_same::value || sizeof(CharT) == 4, "Unsupported char32_t size != 4"); + // wchar_t can be either 16 bits (Windows) or 32 (everywhere else) + static_assert(!std::is_same::value || sizeof(CharT) == 2 || sizeof(CharT) == 4, + "Unsupported wchar_t size != 2/4"); + static constexpr size_t UTF_N = 8 * sizeof(CharT); + + bool load(handle src, bool) { +#if PY_MAJOR_VERSION < 3 + object temp; +#endif + handle load_src = src; + if (!src) { + return false; + } else if (!PyUnicode_Check(load_src.ptr())) { +#if PY_MAJOR_VERSION >= 3 + return load_bytes(load_src); +#else + if (std::is_same::value) { + return load_bytes(load_src); + } + + // The below is a guaranteed failure in Python 3 when PyUnicode_Check returns false + if (!PYBIND11_BYTES_CHECK(load_src.ptr())) + return false; + + temp = reinterpret_steal(PyUnicode_FromObject(load_src.ptr())); + if (!temp) { PyErr_Clear(); return false; } + load_src = temp; +#endif + } + + object utfNbytes = reinterpret_steal(PyUnicode_AsEncodedString( + load_src.ptr(), UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr)); + if (!utfNbytes) { PyErr_Clear(); return false; } + + const CharT *buffer = reinterpret_cast(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr())); + size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT); + if (UTF_N > 8) { buffer++; length--; } // Skip BOM for UTF-16/32 + value = StringType(buffer, length); + + // If we're loading a string_view we need to keep the encoded Python object alive: + if (IsView) + loader_life_support::add_patient(utfNbytes); + + return true; + } + + static handle cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) { + const char *buffer = reinterpret_cast(src.data()); + ssize_t nbytes = ssize_t(src.size() * sizeof(CharT)); + handle s = decode_utfN(buffer, nbytes); + if (!s) throw error_already_set(); + return s; + } + + PYBIND11_TYPE_CASTER(StringType, _(PYBIND11_STRING_NAME)); + +private: + static handle decode_utfN(const char *buffer, ssize_t nbytes) { +#if !defined(PYPY_VERSION) + return + UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) : + UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) : + PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr); +#else + // PyPy seems to have multiple problems related to PyUnicode_UTF*: the UTF8 version + // sometimes segfaults for unknown reasons, while the UTF16 and 32 versions require a + // non-const char * arguments, which is also a nuisance, so bypass the whole thing by just + // passing the encoding as a string value, which works properly: + return PyUnicode_Decode(buffer, nbytes, UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr); +#endif + } + + // When loading into a std::string or char*, accept a bytes object as-is (i.e. + // without any encoding/decoding attempt). For other C++ char sizes this is a no-op. + // which supports loading a unicode from a str, doesn't take this path. + template + bool load_bytes(enable_if_t::value, handle> src) { + if (PYBIND11_BYTES_CHECK(src.ptr())) { + // We were passed a Python 3 raw bytes; accept it into a std::string or char* + // without any encoding attempt. + const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr()); + if (bytes) { + value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr())); + return true; + } + } + + return false; + } + + template + bool load_bytes(enable_if_t::value, handle>) { return false; } +}; + +template +struct type_caster, enable_if_t::value>> + : string_caster> {}; + +#ifdef PYBIND11_HAS_STRING_VIEW +template +struct type_caster, enable_if_t::value>> + : string_caster, true> {}; +#endif + +// Type caster for C-style strings. We basically use a std::string type caster, but also add the +// ability to use None as a nullptr char* (which the string caster doesn't allow). +template struct type_caster::value>> { + using StringType = std::basic_string; + using StringCaster = type_caster; + StringCaster str_caster; + bool none = false; + CharT one_char = 0; +public: + bool load(handle src, bool convert) { + if (!src) return false; + if (src.is_none()) { + // Defer accepting None to other overloads (if we aren't in convert mode): + if (!convert) return false; + none = true; + return true; + } + return str_caster.load(src, convert); + } + + static handle cast(const CharT *src, return_value_policy policy, handle parent) { + if (src == nullptr) return pybind11::none().inc_ref(); + return StringCaster::cast(StringType(src), policy, parent); + } + + static handle cast(CharT src, return_value_policy policy, handle parent) { + if (std::is_same::value) { + handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr); + if (!s) throw error_already_set(); + return s; + } + return StringCaster::cast(StringType(1, src), policy, parent); + } + + operator CharT*() { return none ? nullptr : const_cast(static_cast(str_caster).c_str()); } + operator CharT&() { + if (none) + throw value_error("Cannot convert None to a character"); + + auto &value = static_cast(str_caster); + size_t str_len = value.size(); + if (str_len == 0) + throw value_error("Cannot convert empty string to a character"); + + // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that + // is too high, and one for multiple unicode characters (caught later), so we need to figure + // out how long the first encoded character is in bytes to distinguish between these two + // errors. We also allow want to allow unicode characters U+0080 through U+00FF, as those + // can fit into a single char value. + if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) { + unsigned char v0 = static_cast(value[0]); + size_t char0_bytes = !(v0 & 0x80) ? 1 : // low bits only: 0-127 + (v0 & 0xE0) == 0xC0 ? 2 : // 0b110xxxxx - start of 2-byte sequence + (v0 & 0xF0) == 0xE0 ? 3 : // 0b1110xxxx - start of 3-byte sequence + 4; // 0b11110xxx - start of 4-byte sequence + + if (char0_bytes == str_len) { + // If we have a 128-255 value, we can decode it into a single char: + if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx + one_char = static_cast(((v0 & 3) << 6) + (static_cast(value[1]) & 0x3F)); + return one_char; + } + // Otherwise we have a single character, but it's > U+00FF + throw value_error("Character code point not in range(0x100)"); + } + } + + // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a + // surrogate pair with total length 2 instantly indicates a range error (but not a "your + // string was too long" error). + else if (StringCaster::UTF_N == 16 && str_len == 2) { + one_char = static_cast(value[0]); + if (one_char >= 0xD800 && one_char < 0xE000) + throw value_error("Character code point not in range(0x10000)"); + } + + if (str_len != 1) + throw value_error("Expected a character, but multi-character string found"); + + one_char = value[0]; + return one_char; + } + + static constexpr auto name = _(PYBIND11_STRING_NAME); + template using cast_op_type = pybind11::detail::cast_op_type<_T>; +}; + +// Base implementation for std::tuple and std::pair +template class Tuple, typename... Ts> class tuple_caster { + using type = Tuple; + static constexpr auto size = sizeof...(Ts); + using indices = make_index_sequence; +public: + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + const auto seq = reinterpret_borrow(src); + if (seq.size() != size) + return false; + return load_impl(seq, convert, indices{}); + } + + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + return cast_impl(std::forward(src), policy, parent, indices{}); + } + + // copied from the PYBIND11_TYPE_CASTER macro + template + static handle cast(T *src, return_value_policy policy, handle parent) { + if (!src) return none().release(); + if (policy == return_value_policy::take_ownership) { + auto h = cast(std::move(*src), policy, parent); delete src; return h; + } else { + return cast(*src, policy, parent); + } + } + + static constexpr auto name = _("Tuple[") + concat(make_caster::name...) + _("]"); + + template using cast_op_type = type; + + operator type() & { return implicit_cast(indices{}); } + operator type() && { return std::move(*this).implicit_cast(indices{}); } + +protected: + template + type implicit_cast(index_sequence) & { return type(cast_op(std::get(subcasters))...); } + template + type implicit_cast(index_sequence) && { return type(cast_op(std::move(std::get(subcasters)))...); } + + static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; } + + template + bool load_impl(const sequence &seq, bool convert, index_sequence) { +#ifdef __cpp_fold_expressions + if ((... || !std::get(subcasters).load(seq[Is], convert))) + return false; +#else + for (bool r : {std::get(subcasters).load(seq[Is], convert)...}) + if (!r) + return false; +#endif + return true; + } + + /* Implementation: Convert a C++ tuple into a Python tuple */ + template + static handle cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence) { + std::array entries{{ + reinterpret_steal(make_caster::cast(std::get(std::forward(src)), policy, parent))... + }}; + for (const auto &entry: entries) + if (!entry) + return handle(); + tuple result(size); + int counter = 0; + for (auto & entry: entries) + PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr()); + return result.release(); + } + + Tuple...> subcasters; +}; + +template class type_caster> + : public tuple_caster {}; + +template class type_caster> + : public tuple_caster {}; + +/// Helper class which abstracts away certain actions. Users can provide specializations for +/// custom holders, but it's only necessary if the type has a non-standard interface. +template +struct holder_helper { + static auto get(const T &p) -> decltype(p.get()) { return p.get(); } +}; + +/// Type caster for holder types like std::shared_ptr, etc. +template +struct copyable_holder_caster : public type_caster_base { +public: + using base = type_caster_base; + static_assert(std::is_base_of>::value, + "Holder classes are only supported for custom types"); + using base::base; + using base::cast; + using base::typeinfo; + using base::value; + + bool load(handle src, bool convert) { + return base::template load_impl>(src, convert); + } + + explicit operator type*() { return this->value; } + // static_cast works around compiler error with MSVC 17 and CUDA 10.2 + // see issue #2180 + explicit operator type&() { return *(static_cast(this->value)); } + explicit operator holder_type*() { return std::addressof(holder); } + + // Workaround for Intel compiler bug + // see pybind11 issue 94 + #if defined(__ICC) || defined(__INTEL_COMPILER) + operator holder_type&() { return holder; } + #else + explicit operator holder_type&() { return holder; } + #endif + + static handle cast(const holder_type &src, return_value_policy, handle) { + const auto *ptr = holder_helper::get(src); + return type_caster_base::cast_holder(ptr, &src); + } + +protected: + friend class type_caster_generic; + void check_holder_compat() { + if (typeinfo->default_holder) + throw cast_error("Unable to load a custom holder type from a default-holder instance"); + } + + bool load_value(value_and_holder &&v_h) { + if (v_h.holder_constructed()) { + value = v_h.value_ptr(); + holder = v_h.template holder(); + return true; + } else { + throw cast_error("Unable to cast from non-held to held instance (T& to Holder) " +#if defined(NDEBUG) + "(compile in debug mode for type information)"); +#else + "of type '" + type_id() + "''"); +#endif + } + } + + template ::value, int> = 0> + bool try_implicit_casts(handle, bool) { return false; } + + template ::value, int> = 0> + bool try_implicit_casts(handle src, bool convert) { + for (auto &cast : typeinfo->implicit_casts) { + copyable_holder_caster sub_caster(*cast.first); + if (sub_caster.load(src, convert)) { + value = cast.second(sub_caster.value); + holder = holder_type(sub_caster.holder, (type *) value); + return true; + } + } + return false; + } + + static bool try_direct_conversions(handle) { return false; } + + + holder_type holder; +}; + +/// Specialize for the common std::shared_ptr, so users don't need to +template +class type_caster> : public copyable_holder_caster> { }; + +template +struct move_only_holder_caster { + static_assert(std::is_base_of, type_caster>::value, + "Holder classes are only supported for custom types"); + + static handle cast(holder_type &&src, return_value_policy, handle) { + auto *ptr = holder_helper::get(src); + return type_caster_base::cast_holder(ptr, std::addressof(src)); + } + static constexpr auto name = type_caster_base::name; +}; + +template +class type_caster> + : public move_only_holder_caster> { }; + +template +using type_caster_holder = conditional_t::value, + copyable_holder_caster, + move_only_holder_caster>; + +template struct always_construct_holder { static constexpr bool value = Value; }; + +/// Create a specialization for custom holder types (silently ignores std::shared_ptr) +#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \ + namespace pybind11 { namespace detail { \ + template \ + struct always_construct_holder : always_construct_holder { }; \ + template \ + class type_caster::value>> \ + : public type_caster_holder { }; \ + }} + +// PYBIND11_DECLARE_HOLDER_TYPE holder types: +template struct is_holder_type : + std::is_base_of, detail::type_caster> {}; +// Specialization for always-supported unique_ptr holders: +template struct is_holder_type> : + std::true_type {}; + +template struct handle_type_name { static constexpr auto name = _(); }; +template <> struct handle_type_name { static constexpr auto name = _(PYBIND11_BYTES_NAME); }; +template <> struct handle_type_name { static constexpr auto name = _("int"); }; +template <> struct handle_type_name { static constexpr auto name = _("Iterable"); }; +template <> struct handle_type_name { static constexpr auto name = _("Iterator"); }; +template <> struct handle_type_name { static constexpr auto name = _("None"); }; +template <> struct handle_type_name { static constexpr auto name = _("*args"); }; +template <> struct handle_type_name { static constexpr auto name = _("**kwargs"); }; + +template +struct pyobject_caster { + template ::value, int> = 0> + bool load(handle src, bool /* convert */) { value = src; return static_cast(value); } + + template ::value, int> = 0> + bool load(handle src, bool /* convert */) { + if (!isinstance(src)) + return false; + value = reinterpret_borrow(src); + return true; + } + + static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) { + return src.inc_ref(); + } + PYBIND11_TYPE_CASTER(type, handle_type_name::name); +}; + +template +class type_caster::value>> : public pyobject_caster { }; + +// Our conditions for enabling moving are quite restrictive: +// At compile time: +// - T needs to be a non-const, non-pointer, non-reference type +// - type_caster::operator T&() must exist +// - the type must be move constructible (obviously) +// At run-time: +// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it +// must have ref_count() == 1)h +// If any of the above are not satisfied, we fall back to copying. +template using move_is_plain_type = satisfies_none_of; +template struct move_always : std::false_type {}; +template struct move_always, + negation>, + std::is_move_constructible, + std::is_same>().operator T&()), T&> +>::value>> : std::true_type {}; +template struct move_if_unreferenced : std::false_type {}; +template struct move_if_unreferenced, + negation>, + std::is_move_constructible, + std::is_same>().operator T&()), T&> +>::value>> : std::true_type {}; +template using move_never = none_of, move_if_unreferenced>; + +// Detect whether returning a `type` from a cast on type's type_caster is going to result in a +// reference or pointer to a local variable of the type_caster. Basically, only +// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe; +// everything else returns a reference/pointer to a local variable. +template using cast_is_temporary_value_reference = bool_constant< + (std::is_reference::value || std::is_pointer::value) && + !std::is_base_of>::value && + !std::is_same, void>::value +>; + +// When a value returned from a C++ function is being cast back to Python, we almost always want to +// force `policy = move`, regardless of the return value policy the function/method was declared +// with. +template struct return_value_policy_override { + static return_value_policy policy(return_value_policy p) { return p; } +}; + +template struct return_value_policy_override>::value, void>> { + static return_value_policy policy(return_value_policy p) { + return !std::is_lvalue_reference::value && + !std::is_pointer::value + ? return_value_policy::move : p; + } +}; + +// Basic python -> C++ casting; throws if casting fails +template type_caster &load_type(type_caster &conv, const handle &handle) { + if (!conv.load(handle, true)) { +#if defined(NDEBUG) + throw cast_error("Unable to cast Python instance to C++ type (compile in debug mode for details)"); +#else + throw cast_error("Unable to cast Python instance of type " + + (std::string) str(handle.get_type()) + " to C++ type '" + type_id() + "'"); +#endif + } + return conv; +} +// Wrapper around the above that also constructs and returns a type_caster +template make_caster load_type(const handle &handle) { + make_caster conv; + load_type(conv, handle); + return conv; +} + +PYBIND11_NAMESPACE_END(detail) + +// pytype -> C++ type +template ::value, int> = 0> +T cast(const handle &handle) { + using namespace detail; + static_assert(!cast_is_temporary_value_reference::value, + "Unable to cast type to reference: value is local to type caster"); + return cast_op(load_type(handle)); +} + +// pytype -> pytype (calls converting constructor) +template ::value, int> = 0> +T cast(const handle &handle) { return T(reinterpret_borrow(handle)); } + +// C++ type -> py::object +template ::value, int> = 0> +object cast(T &&value, return_value_policy policy = return_value_policy::automatic_reference, + handle parent = handle()) { + using no_ref_T = typename std::remove_reference::type; + if (policy == return_value_policy::automatic) + policy = std::is_pointer::value ? return_value_policy::take_ownership : + std::is_lvalue_reference::value ? return_value_policy::copy : return_value_policy::move; + else if (policy == return_value_policy::automatic_reference) + policy = std::is_pointer::value ? return_value_policy::reference : + std::is_lvalue_reference::value ? return_value_policy::copy : return_value_policy::move; + return reinterpret_steal(detail::make_caster::cast(std::forward(value), policy, parent)); +} + +template T handle::cast() const { return pybind11::cast(*this); } +template <> inline void handle::cast() const { return; } + +template +detail::enable_if_t::value, T> move(object &&obj) { + if (obj.ref_count() > 1) +#if defined(NDEBUG) + throw cast_error("Unable to cast Python instance to C++ rvalue: instance has multiple references" + " (compile in debug mode for details)"); +#else + throw cast_error("Unable to move from Python " + (std::string) str(obj.get_type()) + + " instance to C++ " + type_id() + " instance: instance has multiple references"); +#endif + + // Move into a temporary and return that, because the reference may be a local value of `conv` + T ret = std::move(detail::load_type(obj).operator T&()); + return ret; +} + +// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does: +// - If we have to move (because T has no copy constructor), do it. This will fail if the moved +// object has multiple references, but trying to copy will fail to compile. +// - If both movable and copyable, check ref count: if 1, move; otherwise copy +// - Otherwise (not movable), copy. +template detail::enable_if_t::value, T> cast(object &&object) { + return move(std::move(object)); +} +template detail::enable_if_t::value, T> cast(object &&object) { + if (object.ref_count() > 1) + return cast(object); + else + return move(std::move(object)); +} +template detail::enable_if_t::value, T> cast(object &&object) { + return cast(object); +} + +template T object::cast() const & { return pybind11::cast(*this); } +template T object::cast() && { return pybind11::cast(std::move(*this)); } +template <> inline void object::cast() const & { return; } +template <> inline void object::cast() && { return; } + +PYBIND11_NAMESPACE_BEGIN(detail) + +// Declared in pytypes.h: +template ::value, int>> +object object_or_cast(T &&o) { return pybind11::cast(std::forward(o)); } + +struct overload_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the OVERLOAD_INT macro +template using overload_caster_t = conditional_t< + cast_is_temporary_value_reference::value, make_caster, overload_unused>; + +// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then +// store the result in the given variable. For other types, this is a no-op. +template enable_if_t::value, T> cast_ref(object &&o, make_caster &caster) { + return cast_op(load_type(caster, o)); +} +template enable_if_t::value, T> cast_ref(object &&, overload_unused &) { + pybind11_fail("Internal error: cast_ref fallback invoked"); } + +// Trampoline use: Having a pybind11::cast with an invalid reference type is going to static_assert, even +// though if it's in dead code, so we provide a "trampoline" to pybind11::cast that only does anything in +// cases where pybind11::cast is valid. +template enable_if_t::value, T> cast_safe(object &&o) { + return pybind11::cast(std::move(o)); } +template enable_if_t::value, T> cast_safe(object &&) { + pybind11_fail("Internal error: cast_safe fallback invoked"); } +template <> inline void cast_safe(object &&) {} + +PYBIND11_NAMESPACE_END(detail) + +template +tuple make_tuple() { return tuple(0); } + +template tuple make_tuple(Args&&... args_) { + constexpr size_t size = sizeof...(Args); + std::array args { + { reinterpret_steal(detail::make_caster::cast( + std::forward(args_), policy, nullptr))... } + }; + for (size_t i = 0; i < args.size(); i++) { + if (!args[i]) { +#if defined(NDEBUG) + throw cast_error("make_tuple(): unable to convert arguments to Python object (compile in debug mode for details)"); +#else + std::array argtypes { {type_id()...} }; + throw cast_error("make_tuple(): unable to convert argument of type '" + + argtypes[i] + "' to Python object"); +#endif + } + } + tuple result(size); + int counter = 0; + for (auto &arg_value : args) + PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr()); + return result; +} + +/// \ingroup annotations +/// Annotation for arguments +struct arg { + /// Constructs an argument with the name of the argument; if null or omitted, this is a positional argument. + constexpr explicit arg(const char *name = nullptr) : name(name), flag_noconvert(false), flag_none(true) { } + /// Assign a value to this argument + template arg_v operator=(T &&value) const; + /// Indicate that the type should not be converted in the type caster + arg &noconvert(bool flag = true) { flag_noconvert = flag; return *this; } + /// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args) + arg &none(bool flag = true) { flag_none = flag; return *this; } + + const char *name; ///< If non-null, this is a named kwargs argument + bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type caster!) + bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument +}; + +/// \ingroup annotations +/// Annotation for arguments with values +struct arg_v : arg { +private: + template + arg_v(arg &&base, T &&x, const char *descr = nullptr) + : arg(base), + value(reinterpret_steal( + detail::make_caster::cast(x, return_value_policy::automatic, {}) + )), + descr(descr) +#if !defined(NDEBUG) + , type(type_id()) +#endif + { } + +public: + /// Direct construction with name, default, and description + template + arg_v(const char *name, T &&x, const char *descr = nullptr) + : arg_v(arg(name), std::forward(x), descr) { } + + /// Called internally when invoking `py::arg("a") = value` + template + arg_v(const arg &base, T &&x, const char *descr = nullptr) + : arg_v(arg(base), std::forward(x), descr) { } + + /// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg& + arg_v &noconvert(bool flag = true) { arg::noconvert(flag); return *this; } + + /// Same as `arg::nonone()`, but returns *this as arg_v&, not arg& + arg_v &none(bool flag = true) { arg::none(flag); return *this; } + + /// The default value + object value; + /// The (optional) description of the default value + const char *descr; +#if !defined(NDEBUG) + /// The C++ type name of the default value (only available when compiled in debug mode) + std::string type; +#endif +}; + +/// \ingroup annotations +/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of an +/// unnamed '*' argument (in Python 3) +struct kwonly {}; + +template +arg_v arg::operator=(T &&value) const { return {std::move(*this), std::forward(value)}; } + +/// Alias for backward compatibility -- to be removed in version 2.0 +template using arg_t = arg_v; + +inline namespace literals { +/** \rst + String literal version of `arg` + \endrst */ +constexpr arg operator"" _a(const char *name, size_t) { return arg(name); } +} + +PYBIND11_NAMESPACE_BEGIN(detail) + +// forward declaration (definition in attr.h) +struct function_record; + +/// Internal data associated with a single function call +struct function_call { + function_call(const function_record &f, handle p); // Implementation in attr.h + + /// The function data: + const function_record &func; + + /// Arguments passed to the function: + std::vector args; + + /// The `convert` value the arguments should be loaded with + std::vector args_convert; + + /// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if + /// present, are also in `args` but without a reference). + object args_ref, kwargs_ref; + + /// The parent, if any + handle parent; + + /// If this is a call to an initializer, this argument contains `self` + handle init_self; +}; + + +/// Helper class which loads arguments for C++ functions called from Python +template +class argument_loader { + using indices = make_index_sequence; + + template using argument_is_args = std::is_same, args>; + template using argument_is_kwargs = std::is_same, kwargs>; + // Get args/kwargs argument positions relative to the end of the argument list: + static constexpr auto args_pos = constexpr_first() - (int) sizeof...(Args), + kwargs_pos = constexpr_first() - (int) sizeof...(Args); + + static constexpr bool args_kwargs_are_last = kwargs_pos >= - 1 && args_pos >= kwargs_pos - 1; + + static_assert(args_kwargs_are_last, "py::args/py::kwargs are only permitted as the last argument(s) of a function"); + +public: + static constexpr bool has_kwargs = kwargs_pos < 0; + static constexpr bool has_args = args_pos < 0; + + static constexpr auto arg_names = concat(type_descr(make_caster::name)...); + + bool load_args(function_call &call) { + return load_impl_sequence(call, indices{}); + } + + template + enable_if_t::value, Return> call(Func &&f) && { + return std::move(*this).template call_impl(std::forward(f), indices{}, Guard{}); + } + + template + enable_if_t::value, void_type> call(Func &&f) && { + std::move(*this).template call_impl(std::forward(f), indices{}, Guard{}); + return void_type(); + } + +private: + + static bool load_impl_sequence(function_call &, index_sequence<>) { return true; } + + template + bool load_impl_sequence(function_call &call, index_sequence) { +#ifdef __cpp_fold_expressions + if ((... || !std::get(argcasters).load(call.args[Is], call.args_convert[Is]))) + return false; +#else + for (bool r : {std::get(argcasters).load(call.args[Is], call.args_convert[Is])...}) + if (!r) + return false; +#endif + return true; + } + + template + Return call_impl(Func &&f, index_sequence, Guard &&) && { + return std::forward(f)(cast_op(std::move(std::get(argcasters)))...); + } + + std::tuple...> argcasters; +}; + +/// Helper class which collects only positional arguments for a Python function call. +/// A fancier version below can collect any argument, but this one is optimal for simple calls. +template +class simple_collector { +public: + template + explicit simple_collector(Ts &&...values) + : m_args(pybind11::make_tuple(std::forward(values)...)) { } + + const tuple &args() const & { return m_args; } + dict kwargs() const { return {}; } + + tuple args() && { return std::move(m_args); } + + /// Call a Python function and pass the collected arguments + object call(PyObject *ptr) const { + PyObject *result = PyObject_CallObject(ptr, m_args.ptr()); + if (!result) + throw error_already_set(); + return reinterpret_steal(result); + } + +private: + tuple m_args; +}; + +/// Helper class which collects positional, keyword, * and ** arguments for a Python function call +template +class unpacking_collector { +public: + template + explicit unpacking_collector(Ts &&...values) { + // Tuples aren't (easily) resizable so a list is needed for collection, + // but the actual function call strictly requires a tuple. + auto args_list = list(); + int _[] = { 0, (process(args_list, std::forward(values)), 0)... }; + ignore_unused(_); + + m_args = std::move(args_list); + } + + const tuple &args() const & { return m_args; } + const dict &kwargs() const & { return m_kwargs; } + + tuple args() && { return std::move(m_args); } + dict kwargs() && { return std::move(m_kwargs); } + + /// Call a Python function and pass the collected arguments + object call(PyObject *ptr) const { + PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr()); + if (!result) + throw error_already_set(); + return reinterpret_steal(result); + } + +private: + template + void process(list &args_list, T &&x) { + auto o = reinterpret_steal(detail::make_caster::cast(std::forward(x), policy, {})); + if (!o) { +#if defined(NDEBUG) + argument_cast_error(); +#else + argument_cast_error(std::to_string(args_list.size()), type_id()); +#endif + } + args_list.append(o); + } + + void process(list &args_list, detail::args_proxy ap) { + for (const auto &a : ap) + args_list.append(a); + } + + void process(list &/*args_list*/, arg_v a) { + if (!a.name) +#if defined(NDEBUG) + nameless_argument_error(); +#else + nameless_argument_error(a.type); +#endif + + if (m_kwargs.contains(a.name)) { +#if defined(NDEBUG) + multiple_values_error(); +#else + multiple_values_error(a.name); +#endif + } + if (!a.value) { +#if defined(NDEBUG) + argument_cast_error(); +#else + argument_cast_error(a.name, a.type); +#endif + } + m_kwargs[a.name] = a.value; + } + + void process(list &/*args_list*/, detail::kwargs_proxy kp) { + if (!kp) + return; + for (const auto &k : reinterpret_borrow(kp)) { + if (m_kwargs.contains(k.first)) { +#if defined(NDEBUG) + multiple_values_error(); +#else + multiple_values_error(str(k.first)); +#endif + } + m_kwargs[k.first] = k.second; + } + } + + [[noreturn]] static void nameless_argument_error() { + throw type_error("Got kwargs without a name; only named arguments " + "may be passed via py::arg() to a python function call. " + "(compile in debug mode for details)"); + } + [[noreturn]] static void nameless_argument_error(std::string type) { + throw type_error("Got kwargs without a name of type '" + type + "'; only named " + "arguments may be passed via py::arg() to a python function call. "); + } + [[noreturn]] static void multiple_values_error() { + throw type_error("Got multiple values for keyword argument " + "(compile in debug mode for details)"); + } + + [[noreturn]] static void multiple_values_error(std::string name) { + throw type_error("Got multiple values for keyword argument '" + name + "'"); + } + + [[noreturn]] static void argument_cast_error() { + throw cast_error("Unable to convert call argument to Python object " + "(compile in debug mode for details)"); + } + + [[noreturn]] static void argument_cast_error(std::string name, std::string type) { + throw cast_error("Unable to convert call argument '" + name + + "' of type '" + type + "' to Python object"); + } + +private: + tuple m_args; + dict m_kwargs; +}; + +/// Collect only positional arguments for a Python function call +template ...>::value>> +simple_collector collect_arguments(Args &&...args) { + return simple_collector(std::forward(args)...); +} + +/// Collect all arguments, including keywords and unpacking (only instantiated when needed) +template ...>::value>> +unpacking_collector collect_arguments(Args &&...args) { + // Following argument order rules for generalized unpacking according to PEP 448 + static_assert( + constexpr_last() < constexpr_first() + && constexpr_last() < constexpr_first(), + "Invalid function call: positional args must precede keywords and ** unpacking; " + "* unpacking must precede ** unpacking" + ); + return unpacking_collector(std::forward(args)...); +} + +template +template +object object_api::operator()(Args &&...args) const { + return detail::collect_arguments(std::forward(args)...).call(derived().ptr()); +} + +template +template +object object_api::call(Args &&...args) const { + return operator()(std::forward(args)...); +} + +PYBIND11_NAMESPACE_END(detail) + +#define PYBIND11_MAKE_OPAQUE(...) \ + namespace pybind11 { namespace detail { \ + template<> class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> { }; \ + }} + +/// Lets you pass a type containing a `,` through a macro parameter without needing a separate +/// typedef, e.g.: `PYBIND11_OVERLOAD(PYBIND11_TYPE(ReturnType), PYBIND11_TYPE(Parent), f, arg)` +#define PYBIND11_TYPE(...) __VA_ARGS__ + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/chrono.h b/diffvg/pybind11/include/pybind11/chrono.h new file mode 100644 index 0000000000000000000000000000000000000000..6127c659bdcef2da89d9fb80568f1c570bbb6534 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/chrono.h @@ -0,0 +1,191 @@ +/* + pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime + + Copyright (c) 2016 Trent Houliston and + Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include +#include +#include +#include + +// Backport the PyDateTime_DELTA functions from Python3.3 if required +#ifndef PyDateTime_DELTA_GET_DAYS +#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) +#endif +#ifndef PyDateTime_DELTA_GET_SECONDS +#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) +#endif +#ifndef PyDateTime_DELTA_GET_MICROSECONDS +#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +template class duration_caster { +public: + typedef typename type::rep rep; + typedef typename type::period period; + + typedef std::chrono::duration> days; + + bool load(handle src, bool) { + using namespace std::chrono; + + // Lazy initialise the PyDateTime import + if (!PyDateTimeAPI) { PyDateTime_IMPORT; } + + if (!src) return false; + // If invoked with datetime.delta object + if (PyDelta_Check(src.ptr())) { + value = type(duration_cast>( + days(PyDateTime_DELTA_GET_DAYS(src.ptr())) + + seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr())) + + microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr())))); + return true; + } + // If invoked with a float we assume it is seconds and convert + else if (PyFloat_Check(src.ptr())) { + value = type(duration_cast>(duration(PyFloat_AsDouble(src.ptr())))); + return true; + } + else return false; + } + + // If this is a duration just return it back + static const std::chrono::duration& get_duration(const std::chrono::duration &src) { + return src; + } + + // If this is a time_point get the time_since_epoch + template static std::chrono::duration get_duration(const std::chrono::time_point> &src) { + return src.time_since_epoch(); + } + + static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) { + using namespace std::chrono; + + // Use overloaded function to get our duration from our source + // Works out if it is a duration or time_point and get the duration + auto d = get_duration(src); + + // Lazy initialise the PyDateTime import + if (!PyDateTimeAPI) { PyDateTime_IMPORT; } + + // Declare these special duration types so the conversions happen with the correct primitive types (int) + using dd_t = duration>; + using ss_t = duration>; + using us_t = duration; + + auto dd = duration_cast(d); + auto subd = d - dd; + auto ss = duration_cast(subd); + auto us = duration_cast(subd - ss); + return PyDelta_FromDSU(dd.count(), ss.count(), us.count()); + } + + PYBIND11_TYPE_CASTER(type, _("datetime.timedelta")); +}; + +// This is for casting times on the system clock into datetime.datetime instances +template class type_caster> { +public: + typedef std::chrono::time_point type; + bool load(handle src, bool) { + using namespace std::chrono; + + // Lazy initialise the PyDateTime import + if (!PyDateTimeAPI) { PyDateTime_IMPORT; } + + if (!src) return false; + + std::tm cal; + microseconds msecs; + + if (PyDateTime_Check(src.ptr())) { + cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr()); + cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr()); + cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr()); + cal.tm_mday = PyDateTime_GET_DAY(src.ptr()); + cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1; + cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900; + cal.tm_isdst = -1; + msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr())); + } else if (PyDate_Check(src.ptr())) { + cal.tm_sec = 0; + cal.tm_min = 0; + cal.tm_hour = 0; + cal.tm_mday = PyDateTime_GET_DAY(src.ptr()); + cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1; + cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900; + cal.tm_isdst = -1; + msecs = microseconds(0); + } else if (PyTime_Check(src.ptr())) { + cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr()); + cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr()); + cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr()); + cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70) + cal.tm_mon = 0; // represents 1-Jan-1970, which is the first + cal.tm_year = 70; // earliest available date for Python's datetime + cal.tm_isdst = -1; + msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr())); + } + else return false; + + value = system_clock::from_time_t(std::mktime(&cal)) + msecs; + return true; + } + + static handle cast(const std::chrono::time_point &src, return_value_policy /* policy */, handle /* parent */) { + using namespace std::chrono; + + // Lazy initialise the PyDateTime import + if (!PyDateTimeAPI) { PyDateTime_IMPORT; } + + // Get out microseconds, and make sure they are positive, to avoid bug in eastern hemisphere time zones + // (cfr. https://github.com/pybind/pybind11/issues/2417) + using us_t = duration; + auto us = duration_cast(src.time_since_epoch() % seconds(1)); + if (us.count() < 0) + us += seconds(1); + + // Subtract microseconds BEFORE `system_clock::to_time_t`, because: + // > If std::time_t has lower precision, it is implementation-defined whether the value is rounded or truncated. + // (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t) + std::time_t tt = system_clock::to_time_t(time_point_cast(src - us)); + // this function uses static memory so it's best to copy it out asap just in case + // otherwise other code that is using localtime may break this (not just python code) + std::tm localtime = *std::localtime(&tt); + + return PyDateTime_FromDateAndTime(localtime.tm_year + 1900, + localtime.tm_mon + 1, + localtime.tm_mday, + localtime.tm_hour, + localtime.tm_min, + localtime.tm_sec, + us.count()); + } + PYBIND11_TYPE_CASTER(type, _("datetime.datetime")); +}; + +// Other clocks that are not the system clock are not measured as datetime.datetime objects +// since they are not measured on calendar time. So instead we just make them timedeltas +// Or if they have passed us a time as a float we convert that +template class type_caster> +: public duration_caster> { +}; + +template class type_caster> +: public duration_caster> { +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/common.h b/diffvg/pybind11/include/pybind11/common.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8a4f1e88e493ee08d24e668639c8d495fd49b1 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/common.h @@ -0,0 +1,2 @@ +#include "detail/common.h" +#warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'." diff --git a/diffvg/pybind11/include/pybind11/complex.h b/diffvg/pybind11/include/pybind11/complex.h new file mode 100644 index 0000000000000000000000000000000000000000..f8327eb37307490b658becf3d151132ddb5df531 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/complex.h @@ -0,0 +1,65 @@ +/* + pybind11/complex.h: Complex number support + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include + +/// glibc defines I as a macro which breaks things, e.g., boost template names +#ifdef I +# undef I +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +template struct format_descriptor, detail::enable_if_t::value>> { + static constexpr const char c = format_descriptor::c; + static constexpr const char value[3] = { 'Z', c, '\0' }; + static std::string format() { return std::string(value); } +}; + +#ifndef PYBIND11_CPP17 + +template constexpr const char format_descriptor< + std::complex, detail::enable_if_t::value>>::value[3]; + +#endif + +PYBIND11_NAMESPACE_BEGIN(detail) + +template struct is_fmt_numeric, detail::enable_if_t::value>> { + static constexpr bool value = true; + static constexpr int index = is_fmt_numeric::index + 3; +}; + +template class type_caster> { +public: + bool load(handle src, bool convert) { + if (!src) + return false; + if (!convert && !PyComplex_Check(src.ptr())) + return false; + Py_complex result = PyComplex_AsCComplex(src.ptr()); + if (result.real == -1.0 && PyErr_Occurred()) { + PyErr_Clear(); + return false; + } + value = std::complex((T) result.real, (T) result.imag); + return true; + } + + static handle cast(const std::complex &src, return_value_policy /* policy */, handle /* parent */) { + return PyComplex_FromDoubles((double) src.real(), (double) src.imag()); + } + + PYBIND11_TYPE_CASTER(std::complex, _("complex")); +}; +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/detail/class.h b/diffvg/pybind11/include/pybind11/detail/class.h new file mode 100644 index 0000000000000000000000000000000000000000..8d36744f2736d79c6fb9c6d93a1ce44f89e3b60e --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/class.h @@ -0,0 +1,668 @@ +/* + pybind11/detail/class.h: Python C API implementation details for py::class_ + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "../attr.h" +#include "../options.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +#if PY_VERSION_HEX >= 0x03030000 && !defined(PYPY_VERSION) +# define PYBIND11_BUILTIN_QUALNAME +# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) +#else +// In pre-3.3 Python, we still set __qualname__ so that we can produce reliable function type +// signatures; in 3.3+ this macro expands to nothing: +# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) setattr((PyObject *) obj, "__qualname__", nameobj) +#endif + +inline PyTypeObject *type_incref(PyTypeObject *type) { + Py_INCREF(type); + return type; +} + +#if !defined(PYPY_VERSION) + +/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance. +extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) { + return PyProperty_Type.tp_descr_get(self, cls, cls); +} + +/// `pybind11_static_property.__set__()`: Just like the above `__get__()`. +extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) { + PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj); + return PyProperty_Type.tp_descr_set(self, cls, value); +} + +/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()` + methods are modified to always use the object type instead of a concrete instance. + Return value: New reference. */ +inline PyTypeObject *make_static_property_type() { + constexpr auto *name = "pybind11_static_property"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + if (!heap_type) + pybind11_fail("make_static_property_type(): error allocating type!"); + + heap_type->ht_name = name_obj.inc_ref().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +#endif + + auto type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyProperty_Type); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + type->tp_descr_get = pybind11_static_get; + type->tp_descr_set = pybind11_static_set; + + if (PyType_Ready(type) < 0) + pybind11_fail("make_static_property_type(): failure in PyType_Ready()!"); + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + return type; +} + +#else // PYPY + +/** PyPy has some issues with the above C API, so we evaluate Python code instead. + This function will only be called once so performance isn't really a concern. + Return value: New reference. */ +inline PyTypeObject *make_static_property_type() { + auto d = dict(); + PyObject *result = PyRun_String(R"(\ + class pybind11_static_property(property): + def __get__(self, obj, cls): + return property.__get__(self, cls, cls) + + def __set__(self, obj, value): + cls = obj if isinstance(obj, type) else type(obj) + property.__set__(self, cls, value) + )", Py_file_input, d.ptr(), d.ptr() + ); + if (result == nullptr) + throw error_already_set(); + Py_DECREF(result); + return (PyTypeObject *) d["pybind11_static_property"].cast().release().ptr(); +} + +#endif // PYPY + +/** Types with static properties need to handle `Type.static_prop = x` in a specific way. + By default, Python replaces the `static_property` itself, but for wrapped C++ types + we need to call `static_property.__set__()` in order to propagate the new value to + the underlying C++ data structure. */ +extern "C" inline int pybind11_meta_setattro(PyObject* obj, PyObject* name, PyObject* value) { + // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw + // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`). + PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); + + // The following assignment combinations are possible: + // 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)` + // 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop` + // 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment + const auto static_prop = (PyObject *) get_internals().static_property_type; + const auto call_descr_set = descr && PyObject_IsInstance(descr, static_prop) + && !PyObject_IsInstance(value, static_prop); + if (call_descr_set) { + // Call `static_property.__set__()` instead of replacing the `static_property`. +#if !defined(PYPY_VERSION) + return Py_TYPE(descr)->tp_descr_set(descr, obj, value); +#else + if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) { + Py_DECREF(result); + return 0; + } else { + return -1; + } +#endif + } else { + // Replace existing attribute. + return PyType_Type.tp_setattro(obj, name, value); + } +} + +#if PY_MAJOR_VERSION >= 3 +/** + * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing + * methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function, + * when called on a class, or a PyMethod, when called on an instance. Override that behaviour here + * to do a special case bypass for PyInstanceMethod_Types. + */ +extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) { + PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); + if (descr && PyInstanceMethod_Check(descr)) { + Py_INCREF(descr); + return descr; + } + else { + return PyType_Type.tp_getattro(obj, name); + } +} +#endif + +/// metaclass `__call__` function that is used to create all pybind11 objects. +extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) { + + // use the default metaclass call to create/initialize the object + PyObject *self = PyType_Type.tp_call(type, args, kwargs); + if (self == nullptr) { + return nullptr; + } + + // This must be a pybind11 instance + auto instance = reinterpret_cast(self); + + // Ensure that the base __init__ function(s) were called + for (const auto &vh : values_and_holders(instance)) { + if (!vh.holder_constructed()) { + PyErr_Format(PyExc_TypeError, "%.200s.__init__() must be called when overriding __init__", + vh.type->type->tp_name); + Py_DECREF(self); + return nullptr; + } + } + + return self; +} + +/** This metaclass is assigned by default to all pybind11 types and is required in order + for static properties to function correctly. Users may override this using `py::metaclass`. + Return value: New reference. */ +inline PyTypeObject* make_default_metaclass() { + constexpr auto *name = "pybind11_type"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + if (!heap_type) + pybind11_fail("make_default_metaclass(): error allocating metaclass!"); + + heap_type->ht_name = name_obj.inc_ref().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +#endif + + auto type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyType_Type); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + + type->tp_call = pybind11_meta_call; + + type->tp_setattro = pybind11_meta_setattro; +#if PY_MAJOR_VERSION >= 3 + type->tp_getattro = pybind11_meta_getattro; +#endif + + if (PyType_Ready(type) < 0) + pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!"); + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + return type; +} + +/// For multiple inheritance types we need to recursively register/deregister base pointers for any +/// base classes with pointers that are difference from the instance value pointer so that we can +/// correctly recognize an offset base class pointer. This calls a function with any offset base ptrs. +inline void traverse_offset_bases(void *valueptr, const detail::type_info *tinfo, instance *self, + bool (*f)(void * /*parentptr*/, instance * /*self*/)) { + for (handle h : reinterpret_borrow(tinfo->type->tp_bases)) { + if (auto parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) { + for (auto &c : parent_tinfo->implicit_casts) { + if (c.first == tinfo->cpptype) { + auto *parentptr = c.second(valueptr); + if (parentptr != valueptr) + f(parentptr, self); + traverse_offset_bases(parentptr, parent_tinfo, self, f); + break; + } + } + } + } +} + +inline bool register_instance_impl(void *ptr, instance *self) { + get_internals().registered_instances.emplace(ptr, self); + return true; // unused, but gives the same signature as the deregister func +} +inline bool deregister_instance_impl(void *ptr, instance *self) { + auto ®istered_instances = get_internals().registered_instances; + auto range = registered_instances.equal_range(ptr); + for (auto it = range.first; it != range.second; ++it) { + if (Py_TYPE(self) == Py_TYPE(it->second)) { + registered_instances.erase(it); + return true; + } + } + return false; +} + +inline void register_instance(instance *self, void *valptr, const type_info *tinfo) { + register_instance_impl(valptr, self); + if (!tinfo->simple_ancestors) + traverse_offset_bases(valptr, tinfo, self, register_instance_impl); +} + +inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) { + bool ret = deregister_instance_impl(valptr, self); + if (!tinfo->simple_ancestors) + traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl); + return ret; +} + +/// Instance creation function for all pybind11 types. It allocates the internal instance layout for +/// holding C++ objects and holders. Allocation is done lazily (the first time the instance is cast +/// to a reference or pointer), and initialization is done by an `__init__` function. +inline PyObject *make_new_instance(PyTypeObject *type) { +#if defined(PYPY_VERSION) + // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first inherited + // object is a a plain Python type (i.e. not derived from an extension type). Fix it. + ssize_t instance_size = static_cast(sizeof(instance)); + if (type->tp_basicsize < instance_size) { + type->tp_basicsize = instance_size; + } +#endif + PyObject *self = type->tp_alloc(type, 0); + auto inst = reinterpret_cast(self); + // Allocate the value/holder internals: + inst->allocate_layout(); + + inst->owned = true; + + return self; +} + +/// Instance creation function for all pybind11 types. It only allocates space for the +/// C++ object, but doesn't call the constructor -- an `__init__` function must do that. +extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) { + return make_new_instance(type); +} + +/// An `__init__` function constructs the C++ object. Users should provide at least one +/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the +/// following default function will be used which simply throws an exception. +extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) { + PyTypeObject *type = Py_TYPE(self); + std::string msg; +#if defined(PYPY_VERSION) + msg += handle((PyObject *) type).attr("__module__").cast() + "."; +#endif + msg += type->tp_name; + msg += ": No constructor defined!"; + PyErr_SetString(PyExc_TypeError, msg.c_str()); + return -1; +} + +inline void add_patient(PyObject *nurse, PyObject *patient) { + auto &internals = get_internals(); + auto instance = reinterpret_cast(nurse); + instance->has_patients = true; + Py_INCREF(patient); + internals.patients[nurse].push_back(patient); +} + +inline void clear_patients(PyObject *self) { + auto instance = reinterpret_cast(self); + auto &internals = get_internals(); + auto pos = internals.patients.find(self); + assert(pos != internals.patients.end()); + // Clearing the patients can cause more Python code to run, which + // can invalidate the iterator. Extract the vector of patients + // from the unordered_map first. + auto patients = std::move(pos->second); + internals.patients.erase(pos); + instance->has_patients = false; + for (PyObject *&patient : patients) + Py_CLEAR(patient); +} + +/// Clears all internal data from the instance and removes it from registered instances in +/// preparation for deallocation. +inline void clear_instance(PyObject *self) { + auto instance = reinterpret_cast(self); + + // Deallocate any values/holders, if present: + for (auto &v_h : values_and_holders(instance)) { + if (v_h) { + + // We have to deregister before we call dealloc because, for virtual MI types, we still + // need to be able to get the parent pointers. + if (v_h.instance_registered() && !deregister_instance(instance, v_h.value_ptr(), v_h.type)) + pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!"); + + if (instance->owned || v_h.holder_constructed()) + v_h.type->dealloc(v_h); + } + } + // Deallocate the value/holder layout internals: + instance->deallocate_layout(); + + if (instance->weakrefs) + PyObject_ClearWeakRefs(self); + + PyObject **dict_ptr = _PyObject_GetDictPtr(self); + if (dict_ptr) + Py_CLEAR(*dict_ptr); + + if (instance->has_patients) + clear_patients(self); +} + +/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc` +/// to destroy the C++ object itself, while the rest is Python bookkeeping. +extern "C" inline void pybind11_object_dealloc(PyObject *self) { + clear_instance(self); + + auto type = Py_TYPE(self); + type->tp_free(self); + +#if PY_VERSION_HEX < 0x03080000 + // `type->tp_dealloc != pybind11_object_dealloc` means that we're being called + // as part of a derived type's dealloc, in which case we're not allowed to decref + // the type here. For cross-module compatibility, we shouldn't compare directly + // with `pybind11_object_dealloc`, but with the common one stashed in internals. + auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base; + if (type->tp_dealloc == pybind11_object_type->tp_dealloc) + Py_DECREF(type); +#else + // This was not needed before Python 3.8 (Python issue 35810) + // https://github.com/pybind/pybind11/issues/1946 + Py_DECREF(type); +#endif +} + +/** Create the type which can be used as a common base for all classes. This is + needed in order to satisfy Python's requirements for multiple inheritance. + Return value: New reference. */ +inline PyObject *make_object_base_type(PyTypeObject *metaclass) { + constexpr auto *name = "pybind11_object"; + auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); + if (!heap_type) + pybind11_fail("make_object_base_type(): error allocating type!"); + + heap_type->ht_name = name_obj.inc_ref().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = name_obj.inc_ref().ptr(); +#endif + + auto type = &heap_type->ht_type; + type->tp_name = name; + type->tp_base = type_incref(&PyBaseObject_Type); + type->tp_basicsize = static_cast(sizeof(instance)); + type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + + type->tp_new = pybind11_object_new; + type->tp_init = pybind11_object_init; + type->tp_dealloc = pybind11_object_dealloc; + + /* Support weak references (needed for the keep_alive feature) */ + type->tp_weaklistoffset = offsetof(instance, weakrefs); + + if (PyType_Ready(type) < 0) + pybind11_fail("PyType_Ready failed in make_object_base_type():" + error_string()); + + setattr((PyObject *) type, "__module__", str("pybind11_builtins")); + PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); + + assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); + return (PyObject *) heap_type; +} + +/// dynamic_attr: Support for `d = instance.__dict__`. +extern "C" inline PyObject *pybind11_get_dict(PyObject *self, void *) { + PyObject *&dict = *_PyObject_GetDictPtr(self); + if (!dict) + dict = PyDict_New(); + Py_XINCREF(dict); + return dict; +} + +/// dynamic_attr: Support for `instance.__dict__ = dict()`. +extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) { + if (!PyDict_Check(new_dict)) { + PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, not a '%.200s'", + Py_TYPE(new_dict)->tp_name); + return -1; + } + PyObject *&dict = *_PyObject_GetDictPtr(self); + Py_INCREF(new_dict); + Py_CLEAR(dict); + dict = new_dict; + return 0; +} + +/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`. +extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) { + PyObject *&dict = *_PyObject_GetDictPtr(self); + Py_VISIT(dict); + return 0; +} + +/// dynamic_attr: Allow the GC to clear the dictionary. +extern "C" inline int pybind11_clear(PyObject *self) { + PyObject *&dict = *_PyObject_GetDictPtr(self); + Py_CLEAR(dict); + return 0; +} + +/// Give instances of this type a `__dict__` and opt into garbage collection. +inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) { + auto type = &heap_type->ht_type; +#if defined(PYPY_VERSION) && (PYPY_VERSION_NUM < 0x06000000) + pybind11_fail(std::string(type->tp_name) + ": dynamic attributes are " + "currently not supported in " + "conjunction with PyPy!"); +#endif + type->tp_flags |= Py_TPFLAGS_HAVE_GC; + type->tp_dictoffset = type->tp_basicsize; // place dict at the end + type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it + type->tp_traverse = pybind11_traverse; + type->tp_clear = pybind11_clear; + + static PyGetSetDef getset[] = { + {const_cast("__dict__"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr}, + {nullptr, nullptr, nullptr, nullptr, nullptr} + }; + type->tp_getset = getset; +} + +/// buffer_protocol: Fill in the view as specified by flags. +extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) { + // Look for a `get_buffer` implementation in this type's info or any bases (following MRO). + type_info *tinfo = nullptr; + for (auto type : reinterpret_borrow(Py_TYPE(obj)->tp_mro)) { + tinfo = get_type_info((PyTypeObject *) type.ptr()); + if (tinfo && tinfo->get_buffer) + break; + } + if (view == nullptr || !tinfo || !tinfo->get_buffer) { + if (view) + view->obj = nullptr; + PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error"); + return -1; + } + std::memset(view, 0, sizeof(Py_buffer)); + buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data); + view->obj = obj; + view->ndim = 1; + view->internal = info; + view->buf = info->ptr; + view->itemsize = info->itemsize; + view->len = view->itemsize; + for (auto s : info->shape) + view->len *= s; + view->readonly = info->readonly; + if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) { + if (view) + view->obj = nullptr; + PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage"); + return -1; + } + if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) + view->format = const_cast(info->format.c_str()); + if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { + view->ndim = (int) info->ndim; + view->strides = &info->strides[0]; + view->shape = &info->shape[0]; + } + Py_INCREF(view->obj); + return 0; +} + +/// buffer_protocol: Release the resources of the buffer. +extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) { + delete (buffer_info *) view->internal; +} + +/// Give this type a buffer interface. +inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) { + heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer; +#if PY_MAJOR_VERSION < 3 + heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER; +#endif + + heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer; + heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer; +} + +/** Create a brand new Python type according to the `type_record` specification. + Return value: New reference. */ +inline PyObject* make_new_python_type(const type_record &rec) { + auto name = reinterpret_steal(PYBIND11_FROM_STRING(rec.name)); + + auto qualname = name; + if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) { +#if PY_MAJOR_VERSION >= 3 + qualname = reinterpret_steal( + PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr())); +#else + qualname = str(rec.scope.attr("__qualname__").cast() + "." + rec.name); +#endif + } + + object module; + if (rec.scope) { + if (hasattr(rec.scope, "__module__")) + module = rec.scope.attr("__module__"); + else if (hasattr(rec.scope, "__name__")) + module = rec.scope.attr("__name__"); + } + + auto full_name = c_str( +#if !defined(PYPY_VERSION) + module ? str(module).cast() + "." + rec.name : +#endif + rec.name); + + char *tp_doc = nullptr; + if (rec.doc && options::show_user_defined_docstrings()) { + /* Allocate memory for docstring (using PyObject_MALLOC, since + Python will free this later on) */ + size_t size = strlen(rec.doc) + 1; + tp_doc = (char *) PyObject_MALLOC(size); + memcpy((void *) tp_doc, rec.doc, size); + } + + auto &internals = get_internals(); + auto bases = tuple(rec.bases); + auto base = (bases.size() == 0) ? internals.instance_base + : bases[0].ptr(); + + /* Danger zone: from now (and until PyType_Ready), make sure to + issue no Python C API calls which could potentially invoke the + garbage collector (the GC will call type_traverse(), which will in + turn find the newly constructed type in an invalid state) */ + auto metaclass = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() + : internals.default_metaclass; + + auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); + if (!heap_type) + pybind11_fail(std::string(rec.name) + ": Unable to create type object!"); + + heap_type->ht_name = name.release().ptr(); +#ifdef PYBIND11_BUILTIN_QUALNAME + heap_type->ht_qualname = qualname.inc_ref().ptr(); +#endif + + auto type = &heap_type->ht_type; + type->tp_name = full_name; + type->tp_doc = tp_doc; + type->tp_base = type_incref((PyTypeObject *)base); + type->tp_basicsize = static_cast(sizeof(instance)); + if (bases.size() > 0) + type->tp_bases = bases.release().ptr(); + + /* Don't inherit base __init__ */ + type->tp_init = pybind11_object_init; + + /* Supported protocols */ + type->tp_as_number = &heap_type->as_number; + type->tp_as_sequence = &heap_type->as_sequence; + type->tp_as_mapping = &heap_type->as_mapping; +#if PY_VERSION_HEX >= 0x03050000 + type->tp_as_async = &heap_type->as_async; +#endif + + /* Flags */ + type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; +#if PY_MAJOR_VERSION < 3 + type->tp_flags |= Py_TPFLAGS_CHECKTYPES; +#endif + if (!rec.is_final) + type->tp_flags |= Py_TPFLAGS_BASETYPE; + + if (rec.dynamic_attr) + enable_dynamic_attributes(heap_type); + + if (rec.buffer_protocol) + enable_buffer_protocol(heap_type); + + if (PyType_Ready(type) < 0) + pybind11_fail(std::string(rec.name) + ": PyType_Ready failed (" + error_string() + ")!"); + + assert(rec.dynamic_attr ? PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) + : !PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); + + /* Register type with the parent scope */ + if (rec.scope) + setattr(rec.scope, rec.name, (PyObject *) type); + else + Py_INCREF(type); // Keep it alive forever (reference leak) + + if (module) // Needed by pydoc + setattr((PyObject *) type, "__module__", module); + + PYBIND11_SET_OLDPY_QUALNAME(type, qualname); + + return (PyObject *) type; +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/detail/common.h b/diffvg/pybind11/include/pybind11/detail/common.h new file mode 100644 index 0000000000000000000000000000000000000000..8923faef76eb4e06613273a0c71307814085bcac --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/common.h @@ -0,0 +1,837 @@ +/* + pybind11/detail/common.h -- Basic macros + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#define PYBIND11_VERSION_MAJOR 2 +#define PYBIND11_VERSION_MINOR 6 +#define PYBIND11_VERSION_PATCH dev0 + +#define PYBIND11_NAMESPACE_BEGIN(name) namespace name { +#define PYBIND11_NAMESPACE_END(name) } + +// Robust support for some features and loading modules compiled against different pybind versions +// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute on +// the main `pybind11` namespace. +#if !defined(PYBIND11_NAMESPACE) +# ifdef __GNUG__ +# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden"))) +# else +# define PYBIND11_NAMESPACE pybind11 +# endif +#endif + +#if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER) +# if __cplusplus >= 201402L +# define PYBIND11_CPP14 +# if __cplusplus >= 201703L +# define PYBIND11_CPP17 +# endif +# endif +#elif defined(_MSC_VER) && __cplusplus == 199711L +// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented) +// Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer +# if _MSVC_LANG >= 201402L +# define PYBIND11_CPP14 +# if _MSVC_LANG > 201402L && _MSC_VER >= 1910 +# define PYBIND11_CPP17 +# endif +# endif +#endif + +// Compiler version assertions +#if defined(__INTEL_COMPILER) +# if __INTEL_COMPILER < 1700 +# error pybind11 requires Intel C++ compiler v17 or newer +# endif +#elif defined(__clang__) && !defined(__apple_build_version__) +# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3) +# error pybind11 requires clang 3.3 or newer +# endif +#elif defined(__clang__) +// Apple changes clang version macros to its Xcode version; the first Xcode release based on +// (upstream) clang 3.3 was Xcode 5: +# if __clang_major__ < 5 +# error pybind11 requires Xcode/clang 5.0 or newer +# endif +#elif defined(__GNUG__) +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +# error pybind11 requires gcc 4.8 or newer +# endif +#elif defined(_MSC_VER) +// Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features +// (e.g. std::negation) added in 2015u3: +# if _MSC_FULL_VER < 190024210 +# error pybind11 requires MSVC 2015 update 3 or newer +# endif +#endif + +#if !defined(PYBIND11_EXPORT) +# if defined(WIN32) || defined(_WIN32) +# define PYBIND11_EXPORT __declspec(dllexport) +# else +# define PYBIND11_EXPORT __attribute__ ((visibility("default"))) +# endif +#endif + +#if defined(_MSC_VER) +# define PYBIND11_NOINLINE __declspec(noinline) +#else +# define PYBIND11_NOINLINE __attribute__ ((noinline)) +#endif + +#if defined(PYBIND11_CPP14) +# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]] +#else +# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason))) +#endif + +#if defined(PYBIND11_CPP17) +# define PYBIND11_MAYBE_UNUSED [[maybe_unused]] +#elif defined(_MSC_VER) && !defined(__clang__) +# define PYBIND11_MAYBE_UNUSED +#else +# define PYBIND11_MAYBE_UNUSED __attribute__ ((__unused__)) +#endif + +/* Don't let Python.h #define (v)snprintf as macro because they are implemented + properly in Visual Studio since 2015. */ +#if defined(_MSC_VER) && _MSC_VER >= 1900 +# define HAVE_SNPRINTF 1 +#endif + +/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode +#if defined(_MSC_VER) +# if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) +# define HAVE_ROUND 1 +# endif +# pragma warning(push) +# pragma warning(disable: 4510 4610 4512 4005) +# if defined(_DEBUG) && !defined(Py_DEBUG) +# define PYBIND11_DEBUG_MARKER +# undef _DEBUG +# endif +#endif + +#include +#include +#include + +/* Python #defines overrides on all sorts of core functions, which + tends to weak havok in C++ codebases that expect these to work + like regular functions (potentially with several overloads) */ +#if defined(isalnum) +# undef isalnum +# undef isalpha +# undef islower +# undef isspace +# undef isupper +# undef tolower +# undef toupper +#endif + +#if defined(copysign) +# undef copysign +#endif + +#if defined(_MSC_VER) +# if defined(PYBIND11_DEBUG_MARKER) +# define _DEBUG +# undef PYBIND11_DEBUG_MARKER +# endif +# pragma warning(pop) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions +#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr) +#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check +#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION +#define PYBIND11_BYTES_CHECK PyBytes_Check +#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString +#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize +#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize +#define PYBIND11_BYTES_AS_STRING PyBytes_AsString +#define PYBIND11_BYTES_SIZE PyBytes_Size +#define PYBIND11_LONG_CHECK(o) PyLong_Check(o) +#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o) +#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) o) +#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) o) +#define PYBIND11_BYTES_NAME "bytes" +#define PYBIND11_STRING_NAME "str" +#define PYBIND11_SLICE_OBJECT PyObject +#define PYBIND11_FROM_STRING PyUnicode_FromString +#define PYBIND11_STR_TYPE ::pybind11::str +#define PYBIND11_BOOL_ATTR "__bool__" +#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool) +// Providing a separate declaration to make Clang's -Wmissing-prototypes happy. +// See comment for PYBIND11_MODULE below for why this is marked "maybe unused". +#define PYBIND11_PLUGIN_IMPL(name) \ + extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \ + extern "C" PYBIND11_EXPORT PyObject *PyInit_##name() + +#else +#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_) +#define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check +#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION +#define PYBIND11_BYTES_CHECK PyString_Check +#define PYBIND11_BYTES_FROM_STRING PyString_FromString +#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize +#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize +#define PYBIND11_BYTES_AS_STRING PyString_AsString +#define PYBIND11_BYTES_SIZE PyString_Size +#define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o)) +#define PYBIND11_LONG_AS_LONGLONG(o) (PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o)) +#define PYBIND11_LONG_FROM_SIGNED(o) PyInt_FromSsize_t((ssize_t) o) // Returns long if needed. +#define PYBIND11_LONG_FROM_UNSIGNED(o) PyInt_FromSize_t((size_t) o) // Returns long if needed. +#define PYBIND11_BYTES_NAME "str" +#define PYBIND11_STRING_NAME "unicode" +#define PYBIND11_SLICE_OBJECT PySliceObject +#define PYBIND11_FROM_STRING PyString_FromString +#define PYBIND11_STR_TYPE ::pybind11::bytes +#define PYBIND11_BOOL_ATTR "__nonzero__" +#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero) +// Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy. +// See comment for PYBIND11_MODULE below for why this is marked "maybe unused". +#define PYBIND11_PLUGIN_IMPL(name) \ + static PyObject *pybind11_init_wrapper(); \ + extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT void init##name(); \ + extern "C" PYBIND11_EXPORT void init##name() { \ + (void)pybind11_init_wrapper(); \ + } \ + PyObject *pybind11_init_wrapper() +#endif + +#if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200 +extern "C" { + struct _Py_atomic_address { void *value; }; + PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current; +} +#endif + +#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code +#define PYBIND11_STRINGIFY(x) #x +#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x) +#define PYBIND11_CONCAT(first, second) first##second +#define PYBIND11_ENSURE_INTERNALS_READY \ + pybind11::detail::get_internals(); + +#define PYBIND11_CHECK_PYTHON_VERSION \ + { \ + const char *compiled_ver = PYBIND11_TOSTRING(PY_MAJOR_VERSION) \ + "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \ + const char *runtime_ver = Py_GetVersion(); \ + size_t len = std::strlen(compiled_ver); \ + if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \ + || (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \ + PyErr_Format(PyExc_ImportError, \ + "Python version mismatch: module was compiled for Python %s, " \ + "but the interpreter version is incompatible: %s.", \ + compiled_ver, runtime_ver); \ + return nullptr; \ + } \ + } + +#define PYBIND11_CATCH_INIT_EXCEPTIONS \ + catch (pybind11::error_already_set &e) { \ + PyErr_SetString(PyExc_ImportError, e.what()); \ + return nullptr; \ + } catch (const std::exception &e) { \ + PyErr_SetString(PyExc_ImportError, e.what()); \ + return nullptr; \ + } \ + +/** \rst + ***Deprecated in favor of PYBIND11_MODULE*** + + This macro creates the entry point that will be invoked when the Python interpreter + imports a plugin library. Please create a `module` in the function body and return + the pointer to its underlying Python object at the end. + + .. code-block:: cpp + + PYBIND11_PLUGIN(example) { + pybind11::module m("example", "pybind11 example plugin"); + /// Set up bindings here + return m.ptr(); + } +\endrst */ +#define PYBIND11_PLUGIN(name) \ + PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \ + static PyObject *pybind11_init(); \ + PYBIND11_PLUGIN_IMPL(name) { \ + PYBIND11_CHECK_PYTHON_VERSION \ + PYBIND11_ENSURE_INTERNALS_READY \ + try { \ + return pybind11_init(); \ + } PYBIND11_CATCH_INIT_EXCEPTIONS \ + } \ + PyObject *pybind11_init() + +/** \rst + This macro creates the entry point that will be invoked when the Python interpreter + imports an extension module. The module name is given as the fist argument and it + should not be in quotes. The second macro argument defines a variable of type + `py::module` which can be used to initialize the module. + + The entry point is marked as "maybe unused" to aid dead-code detection analysis: + since the entry point is typically only looked up at runtime and not referenced + during translation, it would otherwise appear as unused ("dead") code. + + .. code-block:: cpp + + PYBIND11_MODULE(example, m) { + m.doc() = "pybind11 example module"; + + // Add bindings here + m.def("foo", []() { + return "Hello, World!"; + }); + } +\endrst */ +#define PYBIND11_MODULE(name, variable) \ + PYBIND11_MAYBE_UNUSED \ + static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &); \ + PYBIND11_PLUGIN_IMPL(name) { \ + PYBIND11_CHECK_PYTHON_VERSION \ + PYBIND11_ENSURE_INTERNALS_READY \ + auto m = pybind11::module(PYBIND11_TOSTRING(name)); \ + try { \ + PYBIND11_CONCAT(pybind11_init_, name)(m); \ + return m.ptr(); \ + } PYBIND11_CATCH_INIT_EXCEPTIONS \ + } \ + void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable) + + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +using ssize_t = Py_ssize_t; +using size_t = std::size_t; + +/// Approach used to cast a previously unknown C++ instance into a Python object +enum class return_value_policy : uint8_t { + /** This is the default return value policy, which falls back to the policy + return_value_policy::take_ownership when the return value is a pointer. + Otherwise, it uses return_value::move or return_value::copy for rvalue + and lvalue references, respectively. See below for a description of what + all of these different policies do. */ + automatic = 0, + + /** As above, but use policy return_value_policy::reference when the return + value is a pointer. This is the default conversion policy for function + arguments when calling Python functions manually from C++ code (i.e. via + handle::operator()). You probably won't need to use this. */ + automatic_reference, + + /** Reference an existing object (i.e. do not create a new copy) and take + ownership. Python will call the destructor and delete operator when the + object’s reference count reaches zero. Undefined behavior ensues when + the C++ side does the same.. */ + take_ownership, + + /** Create a new copy of the returned object, which will be owned by + Python. This policy is comparably safe because the lifetimes of the two + instances are decoupled. */ + copy, + + /** Use std::move to move the return value contents into a new instance + that will be owned by Python. This policy is comparably safe because the + lifetimes of the two instances (move source and destination) are + decoupled. */ + move, + + /** Reference an existing object, but do not take ownership. The C++ side + is responsible for managing the object’s lifetime and deallocating it + when it is no longer used. Warning: undefined behavior will ensue when + the C++ side deletes an object that is still referenced and used by + Python. */ + reference, + + /** This policy only applies to methods and properties. It references the + object without taking ownership similar to the above + return_value_policy::reference policy. In contrast to that policy, the + function or property’s implicit this argument (called the parent) is + considered to be the the owner of the return value (the child). + pybind11 then couples the lifetime of the parent to the child via a + reference relationship that ensures that the parent cannot be garbage + collected while Python is still using the child. More advanced + variations of this scheme are also possible using combinations of + return_value_policy::reference and the keep_alive call policy */ + reference_internal +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +inline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); } + +// Returns the size as a multiple of sizeof(void *), rounded up. +inline static constexpr size_t size_in_ptrs(size_t s) { return 1 + ((s - 1) >> log2(sizeof(void *))); } + +/** + * The space to allocate for simple layout instance holders (see below) in multiple of the size of + * a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required + * to holder either a std::unique_ptr or std::shared_ptr (which is almost always + * sizeof(std::shared_ptr)). + */ +constexpr size_t instance_simple_holder_in_ptrs() { + static_assert(sizeof(std::shared_ptr) >= sizeof(std::unique_ptr), + "pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs"); + return size_in_ptrs(sizeof(std::shared_ptr)); +} + +// Forward declarations +struct type_info; +struct value_and_holder; + +struct nonsimple_values_and_holders { + void **values_and_holders; + uint8_t *status; +}; + +/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof') +struct instance { + PyObject_HEAD + /// Storage for pointers and holder; see simple_layout, below, for a description + union { + void *simple_value_holder[1 + instance_simple_holder_in_ptrs()]; + nonsimple_values_and_holders nonsimple; + }; + /// Weak references + PyObject *weakrefs; + /// If true, the pointer is owned which means we're free to manage it with a holder. + bool owned : 1; + /** + * An instance has two possible value/holder layouts. + * + * Simple layout (when this flag is true), means the `simple_value_holder` is set with a pointer + * and the holder object governing that pointer, i.e. [val1*][holder]. This layout is applied + * whenever there is no python-side multiple inheritance of bound C++ types *and* the type's + * holder will fit in the default space (which is large enough to hold either a std::unique_ptr + * or std::shared_ptr). + * + * Non-simple layout applies when using custom holders that require more space than `shared_ptr` + * (which is typically the size of two pointers), or when multiple inheritance is used on the + * python side. Non-simple layout allocates the required amount of memory to have multiple + * bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is set to a + * pointer to allocated space of the required space to hold a sequence of value pointers and + * holders followed `status`, a set of bit flags (1 byte each), i.e. + * [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple of + * `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the + * beginning of the [bb...] block (but not independently allocated). + * + * Status bits indicate whether the associated holder is constructed (& + * status_holder_constructed) and whether the value pointer is registered (& + * status_instance_registered) in `registered_instances`. + */ + bool simple_layout : 1; + /// For simple layout, tracks whether the holder has been constructed + bool simple_holder_constructed : 1; + /// For simple layout, tracks whether the instance is registered in `registered_instances` + bool simple_instance_registered : 1; + /// If true, get_internals().patients has an entry for this object + bool has_patients : 1; + + /// Initializes all of the above type/values/holders data (but not the instance values themselves) + void allocate_layout(); + + /// Destroys/deallocates all of the above + void deallocate_layout(); + + /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type` + /// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if + /// `throw_if_missing` is false. + value_and_holder get_value_and_holder(const type_info *find_type = nullptr, bool throw_if_missing = true); + + /// Bit values for the non-simple status flags + static constexpr uint8_t status_holder_constructed = 1; + static constexpr uint8_t status_instance_registered = 2; +}; + +static_assert(std::is_standard_layout::value, "Internal error: `pybind11::detail::instance` is not standard layout!"); + +/// from __cpp_future__ import (convenient aliases from C++14/17) +#if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910) +using std::enable_if_t; +using std::conditional_t; +using std::remove_cv_t; +using std::remove_reference_t; +#else +template using enable_if_t = typename std::enable_if::type; +template using conditional_t = typename std::conditional::type; +template using remove_cv_t = typename std::remove_cv::type; +template using remove_reference_t = typename std::remove_reference::type; +#endif + +/// Index sequences +#if defined(PYBIND11_CPP14) +using std::index_sequence; +using std::make_index_sequence; +#else +template struct index_sequence { }; +template struct make_index_sequence_impl : make_index_sequence_impl { }; +template struct make_index_sequence_impl <0, S...> { typedef index_sequence type; }; +template using make_index_sequence = typename make_index_sequence_impl::type; +#endif + +/// Make an index sequence of the indices of true arguments +template struct select_indices_impl { using type = ISeq; }; +template struct select_indices_impl, I, B, Bs...> + : select_indices_impl, index_sequence>, I + 1, Bs...> {}; +template using select_indices = typename select_indices_impl, 0, Bs...>::type; + +/// Backports of std::bool_constant and std::negation to accommodate older compilers +template using bool_constant = std::integral_constant; +template struct negation : bool_constant { }; + +template struct void_t_impl { using type = void; }; +template using void_t = typename void_t_impl::type; + +/// Compile-time all/any/none of that check the boolean value of all template types +#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916)) +template using all_of = bool_constant<(Ts::value && ...)>; +template using any_of = bool_constant<(Ts::value || ...)>; +#elif !defined(_MSC_VER) +template struct bools {}; +template using all_of = std::is_same< + bools, + bools>; +template using any_of = negation...>>; +#else +// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit +// at a slight loss of compilation efficiency). +template using all_of = std::conjunction; +template using any_of = std::disjunction; +#endif +template using none_of = negation>; + +template class... Predicates> using satisfies_all_of = all_of...>; +template class... Predicates> using satisfies_any_of = any_of...>; +template class... Predicates> using satisfies_none_of = none_of...>; + +/// Strip the class from a method type +template struct remove_class { }; +template struct remove_class { typedef R type(A...); }; +template struct remove_class { typedef R type(A...); }; + +/// Helper template to strip away type modifiers +template struct intrinsic_type { typedef T type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template struct intrinsic_type { typedef typename intrinsic_type::type type; }; +template using intrinsic_t = typename intrinsic_type::type; + +/// Helper type to replace 'void' in some expressions +struct void_type { }; + +/// Helper template which holds a list of types +template struct type_list { }; + +/// Compile-time integer sum +#ifdef __cpp_fold_expressions +template constexpr size_t constexpr_sum(Ts... ns) { return (0 + ... + size_t{ns}); } +#else +constexpr size_t constexpr_sum() { return 0; } +template +constexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); } +#endif + +PYBIND11_NAMESPACE_BEGIN(constexpr_impl) +/// Implementation details for constexpr functions +constexpr int first(int i) { return i; } +template +constexpr int first(int i, T v, Ts... vs) { return v ? i : first(i + 1, vs...); } + +constexpr int last(int /*i*/, int result) { return result; } +template +constexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); } +PYBIND11_NAMESPACE_END(constexpr_impl) + +/// Return the index of the first type in Ts which satisfies Predicate. Returns sizeof...(Ts) if +/// none match. +template class Predicate, typename... Ts> +constexpr int constexpr_first() { return constexpr_impl::first(0, Predicate::value...); } + +/// Return the index of the last type in Ts which satisfies Predicate, or -1 if none match. +template class Predicate, typename... Ts> +constexpr int constexpr_last() { return constexpr_impl::last(0, -1, Predicate::value...); } + +/// Return the Nth element from the parameter pack +template +struct pack_element { using type = typename pack_element::type; }; +template +struct pack_element<0, T, Ts...> { using type = T; }; + +/// Return the one and only type which matches the predicate, or Default if none match. +/// If more than one type matches the predicate, fail at compile-time. +template class Predicate, typename Default, typename... Ts> +struct exactly_one { + static constexpr auto found = constexpr_sum(Predicate::value...); + static_assert(found <= 1, "Found more than one type matching the predicate"); + + static constexpr auto index = found ? constexpr_first() : 0; + using type = conditional_t::type, Default>; +}; +template class P, typename Default> +struct exactly_one { using type = Default; }; + +template class Predicate, typename Default, typename... Ts> +using exactly_one_t = typename exactly_one::type; + +/// Defer the evaluation of type T until types Us are instantiated +template struct deferred_type { using type = T; }; +template using deferred_t = typename deferred_type::type; + +/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of::value == false`, +/// unlike `std::is_base_of`) +template using is_strict_base_of = bool_constant< + std::is_base_of::value && !std::is_same::value>; + +/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived pointer +/// can be converted to a Base pointer) +template using is_accessible_base_of = bool_constant< + std::is_base_of::value && std::is_convertible::value>; + +template class Base> +struct is_template_base_of_impl { + template static std::true_type check(Base *); + static std::false_type check(...); +}; + +/// Check if a template is the base of a type. For example: +/// `is_template_base_of` is true if `struct T : Base {}` where U can be anything +template class Base, typename T> +#if !defined(_MSC_VER) +using is_template_base_of = decltype(is_template_base_of_impl::check((intrinsic_t*)nullptr)); +#else // MSVC2015 has trouble with decltype in template aliases +struct is_template_base_of : decltype(is_template_base_of_impl::check((intrinsic_t*)nullptr)) { }; +#endif + +/// Check if T is an instantiation of the template `Class`. For example: +/// `is_instantiation` is true if `T == shared_ptr` where U can be anything. +template class Class, typename T> +struct is_instantiation : std::false_type { }; +template class Class, typename... Us> +struct is_instantiation> : std::true_type { }; + +/// Check if T is std::shared_ptr where U can be anything +template using is_shared_ptr = is_instantiation; + +/// Check if T looks like an input iterator +template struct is_input_iterator : std::false_type {}; +template +struct is_input_iterator()), decltype(++std::declval())>> + : std::true_type {}; + +template using is_function_pointer = bool_constant< + std::is_pointer::value && std::is_function::type>::value>; + +template struct strip_function_object { + using type = typename remove_class::type; +}; + +// Extracts the function signature from a function, function pointer or lambda. +template > +using function_signature_t = conditional_t< + std::is_function::value, + F, + typename conditional_t< + std::is_pointer::value || std::is_member_pointer::value, + std::remove_pointer, + strip_function_object + >::type +>; + +/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member +/// pointer. Note that this can catch all sorts of other things, too; this is intended to be used +/// in a place where passing a lambda makes sense. +template using is_lambda = satisfies_none_of, + std::is_function, std::is_pointer, std::is_member_pointer>; + +/// Ignore that a variable is unused in compiler warnings +inline void ignore_unused(const int *) { } + +/// Apply a function over each element of a parameter pack +#ifdef __cpp_fold_expressions +#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...) +#else +using expand_side_effects = bool[]; +#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (void)pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false } +#endif + +PYBIND11_NAMESPACE_END(detail) + +/// C++ bindings of builtin Python exceptions +class builtin_exception : public std::runtime_error { +public: + using std::runtime_error::runtime_error; + /// Set the error using the Python C API + virtual void set_error() const = 0; +}; + +#define PYBIND11_RUNTIME_EXCEPTION(name, type) \ + class name : public builtin_exception { public: \ + using builtin_exception::builtin_exception; \ + name() : name("") { } \ + void set_error() const override { PyErr_SetString(type, what()); } \ + }; + +PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration) +PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError) +PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError) +PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError) +PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError) +PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError) +PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError) +PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error +PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally + +[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const char *reason) { throw std::runtime_error(reason); } +[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const std::string &reason) { throw std::runtime_error(reason); } + +template struct format_descriptor { }; + +PYBIND11_NAMESPACE_BEGIN(detail) +// Returns the index of the given type in the type char array below, and in the list in numpy.h +// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double; +// complex float,double,long double. Note that the long double types only participate when long +// double is actually longer than double (it isn't under MSVC). +// NB: not only the string below but also complex.h and numpy.h rely on this order. +template struct is_fmt_numeric { static constexpr bool value = false; }; +template struct is_fmt_numeric::value>> { + static constexpr bool value = true; + static constexpr int index = std::is_same::value ? 0 : 1 + ( + std::is_integral::value ? detail::log2(sizeof(T))*2 + std::is_unsigned::value : 8 + ( + std::is_same::value ? 1 : std::is_same::value ? 2 : 0)); +}; +PYBIND11_NAMESPACE_END(detail) + +template struct format_descriptor::value>> { + static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric::index]; + static constexpr const char value[2] = { c, '\0' }; + static std::string format() { return std::string(1, c); } +}; + +#if !defined(PYBIND11_CPP17) + +template constexpr const char format_descriptor< + T, detail::enable_if_t::value>>::value[2]; + +#endif + +/// RAII wrapper that temporarily clears any Python error state +struct error_scope { + PyObject *type, *value, *trace; + error_scope() { PyErr_Fetch(&type, &value, &trace); } + ~error_scope() { PyErr_Restore(type, value, trace); } +}; + +/// Dummy destructor wrapper that can be used to expose classes with a private destructor +struct nodelete { template void operator()(T*) { } }; + +PYBIND11_NAMESPACE_BEGIN(detail) +template +struct overload_cast_impl { + constexpr overload_cast_impl() {} // MSVC 2015 needs this + + template + constexpr auto operator()(Return (*pf)(Args...)) const noexcept + -> decltype(pf) { return pf; } + + template + constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept + -> decltype(pmf) { return pmf; } + + template + constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept + -> decltype(pmf) { return pmf; } +}; +PYBIND11_NAMESPACE_END(detail) + +// overload_cast requires variable templates: C++14 +#if defined(PYBIND11_CPP14) +#define PYBIND11_OVERLOAD_CAST 1 +/// Syntax sugar for resolving overloaded function pointers: +/// - regular: static_cast(&Class::func) +/// - sweet: overload_cast(&Class::func) +template +static constexpr detail::overload_cast_impl overload_cast = {}; +// MSVC 2015 only accepts this particular initialization syntax for this variable template. +#endif + +/// Const member function selector for overload_cast +/// - regular: static_cast(&Class::func) +/// - sweet: overload_cast(&Class::func, const_) +static constexpr auto const_ = std::true_type{}; + +#if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails: +template struct overload_cast { + static_assert(detail::deferred_t::value, + "pybind11::overload_cast<...> requires compiling in C++14 mode"); +}; +#endif // overload_cast + +PYBIND11_NAMESPACE_BEGIN(detail) + +// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from +// any standard container (or C-style array) supporting std::begin/std::end, any singleton +// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair. +template +class any_container { + std::vector v; +public: + any_container() = default; + + // Can construct from a pair of iterators + template ::value>> + any_container(It first, It last) : v(first, last) { } + + // Implicit conversion constructor from any arbitrary container type with values convertible to T + template ())), T>::value>> + any_container(const Container &c) : any_container(std::begin(c), std::end(c)) { } + + // initializer_list's aren't deducible, so don't get matched by the above template; we need this + // to explicitly allow implicit conversion from one: + template ::value>> + any_container(const std::initializer_list &c) : any_container(c.begin(), c.end()) { } + + // Avoid copying if given an rvalue vector of the correct type. + any_container(std::vector &&v) : v(std::move(v)) { } + + // Moves the vector out of an rvalue any_container + operator std::vector &&() && { return std::move(v); } + + // Dereferencing obtains a reference to the underlying vector + std::vector &operator*() { return v; } + const std::vector &operator*() const { return v; } + + // -> lets you call methods on the underlying vector + std::vector *operator->() { return &v; } + const std::vector *operator->() const { return &v; } +}; + +PYBIND11_NAMESPACE_END(detail) + + + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/detail/descr.h b/diffvg/pybind11/include/pybind11/detail/descr.h new file mode 100644 index 0000000000000000000000000000000000000000..92720cd56277e73a27da3bac85c3c2ae6a3589ac --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/descr.h @@ -0,0 +1,100 @@ +/* + pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +#if !defined(_MSC_VER) +# define PYBIND11_DESCR_CONSTEXPR static constexpr +#else +# define PYBIND11_DESCR_CONSTEXPR const +#endif + +/* Concatenate type signatures at compile time */ +template +struct descr { + char text[N + 1]; + + constexpr descr() : text{'\0'} { } + constexpr descr(char const (&s)[N+1]) : descr(s, make_index_sequence()) { } + + template + constexpr descr(char const (&s)[N+1], index_sequence) : text{s[Is]..., '\0'} { } + + template + constexpr descr(char c, Chars... cs) : text{c, static_cast(cs)..., '\0'} { } + + static constexpr std::array types() { + return {{&typeid(Ts)..., nullptr}}; + } +}; + +template +constexpr descr plus_impl(const descr &a, const descr &b, + index_sequence, index_sequence) { + return {a.text[Is1]..., b.text[Is2]...}; +} + +template +constexpr descr operator+(const descr &a, const descr &b) { + return plus_impl(a, b, make_index_sequence(), make_index_sequence()); +} + +template +constexpr descr _(char const(&text)[N]) { return descr(text); } +constexpr descr<0> _(char const(&)[1]) { return {}; } + +template struct int_to_str : int_to_str { }; +template struct int_to_str<0, Digits...> { + static constexpr auto digits = descr(('0' + Digits)...); +}; + +// Ternary description (like std::conditional) +template +constexpr enable_if_t> _(char const(&text1)[N1], char const(&)[N2]) { + return _(text1); +} +template +constexpr enable_if_t> _(char const(&)[N1], char const(&text2)[N2]) { + return _(text2); +} + +template +constexpr enable_if_t _(const T1 &d, const T2 &) { return d; } +template +constexpr enable_if_t _(const T1 &, const T2 &d) { return d; } + +template auto constexpr _() -> decltype(int_to_str::digits) { + return int_to_str::digits; +} + +template constexpr descr<1, Type> _() { return {'%'}; } + +constexpr descr<0> concat() { return {}; } + +template +constexpr descr concat(const descr &descr) { return descr; } + +template +constexpr auto concat(const descr &d, const Args &...args) + -> decltype(std::declval>() + concat(args...)) { + return d + _(", ") + concat(args...); +} + +template +constexpr descr type_descr(const descr &descr) { + return _("{") + descr + _("}"); +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/detail/init.h b/diffvg/pybind11/include/pybind11/detail/init.h new file mode 100644 index 0000000000000000000000000000000000000000..3ef78c1179f5b533c3ba3f637420c8125d632a7f --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/init.h @@ -0,0 +1,336 @@ +/* + pybind11/detail/init.h: init factory function implementation and support code. + + Copyright (c) 2017 Jason Rhinelander + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "class.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +template <> +class type_caster { +public: + bool load(handle h, bool) { + value = reinterpret_cast(h.ptr()); + return true; + } + + template using cast_op_type = value_and_holder &; + operator value_and_holder &() { return *value; } + static constexpr auto name = _(); + +private: + value_and_holder *value = nullptr; +}; + +PYBIND11_NAMESPACE_BEGIN(initimpl) + +inline void no_nullptr(void *ptr) { + if (!ptr) throw type_error("pybind11::init(): factory function returned nullptr"); +} + +// Implementing functions for all forms of py::init<...> and py::init(...) +template using Cpp = typename Class::type; +template using Alias = typename Class::type_alias; +template using Holder = typename Class::holder_type; + +template using is_alias_constructible = std::is_constructible, Cpp &&>; + +// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance. +template = 0> +bool is_alias(Cpp *ptr) { + return dynamic_cast *>(ptr) != nullptr; +} +// Failing fallback version of the above for a no-alias class (always returns false) +template +constexpr bool is_alias(void *) { return false; } + +// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall +// back to brace aggregate initiailization so that for aggregate initialization can be used with +// py::init, e.g. `py::init` to initialize a `struct T { int a; int b; }`. For +// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually +// works, but will not do the expected thing when `T` has an `initializer_list` constructor). +template ::value, int> = 0> +inline Class *construct_or_initialize(Args &&...args) { return new Class(std::forward(args)...); } +template ::value, int> = 0> +inline Class *construct_or_initialize(Args &&...args) { return new Class{std::forward(args)...}; } + +// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor. This allows types with +// an alias to provide only a single Cpp factory function as long as the Alias can be +// constructed from an rvalue reference of the base Cpp type. This means that Alias classes +// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to +// inherit all the base class constructors. +template +void construct_alias_from_cpp(std::true_type /*is_alias_constructible*/, + value_and_holder &v_h, Cpp &&base) { + v_h.value_ptr() = new Alias(std::move(base)); +} +template +[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/, + value_and_holder &, Cpp &&) { + throw type_error("pybind11::init(): unable to convert returned instance to required " + "alias class: no `Alias(Class &&)` constructor available"); +} + +// Error-generating fallback for factories that don't match one of the below construction +// mechanisms. +template +void construct(...) { + static_assert(!std::is_same::value /* always false */, + "pybind11::init(): init function must return a compatible pointer, " + "holder, or value"); +} + +// Pointer return v1: the factory function returns a class pointer for a registered class. +// If we don't need an alias (because this class doesn't have one, or because the final type is +// inherited on the Python side) we can simply take over ownership. Otherwise we need to try to +// construct an Alias from the returned base instance. +template +void construct(value_and_holder &v_h, Cpp *ptr, bool need_alias) { + no_nullptr(ptr); + if (Class::has_alias && need_alias && !is_alias(ptr)) { + // We're going to try to construct an alias by moving the cpp type. Whether or not + // that succeeds, we still need to destroy the original cpp pointer (either the + // moved away leftover, if the alias construction works, or the value itself if we + // throw an error), but we can't just call `delete ptr`: it might have a special + // deleter, or might be shared_from_this. So we construct a holder around it as if + // it was a normal instance, then steal the holder away into a local variable; thus + // the holder and destruction happens when we leave the C++ scope, and the holder + // class gets to handle the destruction however it likes. + v_h.value_ptr() = ptr; + v_h.set_instance_registered(true); // To prevent init_instance from registering it + v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder + Holder temp_holder(std::move(v_h.holder>())); // Steal the holder + v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null + v_h.set_instance_registered(false); + + construct_alias_from_cpp(is_alias_constructible{}, v_h, std::move(*ptr)); + } else { + // Otherwise the type isn't inherited, so we don't need an Alias + v_h.value_ptr() = ptr; + } +} + +// Pointer return v2: a factory that always returns an alias instance ptr. We simply take over +// ownership of the pointer. +template = 0> +void construct(value_and_holder &v_h, Alias *alias_ptr, bool) { + no_nullptr(alias_ptr); + v_h.value_ptr() = static_cast *>(alias_ptr); +} + +// Holder return: copy its pointer, and move or copy the returned holder into the new instance's +// holder. This also handles types like std::shared_ptr and std::unique_ptr where T is a +// derived type (through those holder's implicit conversion from derived class holder constructors). +template +void construct(value_and_holder &v_h, Holder holder, bool need_alias) { + auto *ptr = holder_helper>::get(holder); + no_nullptr(ptr); + // If we need an alias, check that the held pointer is actually an alias instance + if (Class::has_alias && need_alias && !is_alias(ptr)) + throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance " + "is not an alias instance"); + + v_h.value_ptr() = ptr; + v_h.type->init_instance(v_h.inst, &holder); +} + +// return-by-value version 1: returning a cpp class by value. If the class has an alias and an +// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct +// the alias from the base when needed (i.e. because of Python-side inheritance). When we don't +// need it, we simply move-construct the cpp value into a new instance. +template +void construct(value_and_holder &v_h, Cpp &&result, bool need_alias) { + static_assert(std::is_move_constructible>::value, + "pybind11::init() return-by-value factory function requires a movable class"); + if (Class::has_alias && need_alias) + construct_alias_from_cpp(is_alias_constructible{}, v_h, std::move(result)); + else + v_h.value_ptr() = new Cpp(std::move(result)); +} + +// return-by-value version 2: returning a value of the alias type itself. We move-construct an +// Alias instance (even if no the python-side inheritance is involved). The is intended for +// cases where Alias initialization is always desired. +template +void construct(value_and_holder &v_h, Alias &&result, bool) { + static_assert(std::is_move_constructible>::value, + "pybind11::init() return-by-alias-value factory function requires a movable alias class"); + v_h.value_ptr() = new Alias(std::move(result)); +} + +// Implementing class for py::init<...>() +template +struct constructor { + template = 0> + static void execute(Class &cl, const Extra&... extra) { + cl.def("__init__", [](value_and_holder &v_h, Args... args) { + v_h.value_ptr() = construct_or_initialize>(std::forward(args)...); + }, is_new_style_constructor(), extra...); + } + + template , Args...>::value, int> = 0> + static void execute(Class &cl, const Extra&... extra) { + cl.def("__init__", [](value_and_holder &v_h, Args... args) { + if (Py_TYPE(v_h.inst) == v_h.type->type) + v_h.value_ptr() = construct_or_initialize>(std::forward(args)...); + else + v_h.value_ptr() = construct_or_initialize>(std::forward(args)...); + }, is_new_style_constructor(), extra...); + } + + template , Args...>::value, int> = 0> + static void execute(Class &cl, const Extra&... extra) { + cl.def("__init__", [](value_and_holder &v_h, Args... args) { + v_h.value_ptr() = construct_or_initialize>(std::forward(args)...); + }, is_new_style_constructor(), extra...); + } +}; + +// Implementing class for py::init_alias<...>() +template struct alias_constructor { + template , Args...>::value, int> = 0> + static void execute(Class &cl, const Extra&... extra) { + cl.def("__init__", [](value_and_holder &v_h, Args... args) { + v_h.value_ptr() = construct_or_initialize>(std::forward(args)...); + }, is_new_style_constructor(), extra...); + } +}; + +// Implementation class for py::init(Func) and py::init(Func, AliasFunc) +template , typename = function_signature_t> +struct factory; + +// Specialization for py::init(Func) +template +struct factory { + remove_reference_t class_factory; + + factory(Func &&f) : class_factory(std::forward(f)) { } + + // The given class either has no alias or has no separate alias factory; + // this always constructs the class itself. If the class is registered with an alias + // type and an alias instance is needed (i.e. because the final type is a Python class + // inheriting from the C++ type) the returned value needs to either already be an alias + // instance, or the alias needs to be constructible from a `Class &&` argument. + template + void execute(Class &cl, const Extra &...extra) && { + #if defined(PYBIND11_CPP14) + cl.def("__init__", [func = std::move(class_factory)] + #else + auto &func = class_factory; + cl.def("__init__", [func] + #endif + (value_and_holder &v_h, Args... args) { + construct(v_h, func(std::forward(args)...), + Py_TYPE(v_h.inst) != v_h.type->type); + }, is_new_style_constructor(), extra...); + } +}; + +// Specialization for py::init(Func, AliasFunc) +template +struct factory { + static_assert(sizeof...(CArgs) == sizeof...(AArgs), + "pybind11::init(class_factory, alias_factory): class and alias factories " + "must have identical argument signatures"); + static_assert(all_of...>::value, + "pybind11::init(class_factory, alias_factory): class and alias factories " + "must have identical argument signatures"); + + remove_reference_t class_factory; + remove_reference_t alias_factory; + + factory(CFunc &&c, AFunc &&a) + : class_factory(std::forward(c)), alias_factory(std::forward(a)) { } + + // The class factory is called when the `self` type passed to `__init__` is the direct + // class (i.e. not inherited), the alias factory when `self` is a Python-side subtype. + template + void execute(Class &cl, const Extra&... extra) && { + static_assert(Class::has_alias, "The two-argument version of `py::init()` can " + "only be used if the class has an alias"); + #if defined(PYBIND11_CPP14) + cl.def("__init__", [class_func = std::move(class_factory), alias_func = std::move(alias_factory)] + #else + auto &class_func = class_factory; + auto &alias_func = alias_factory; + cl.def("__init__", [class_func, alias_func] + #endif + (value_and_holder &v_h, CArgs... args) { + if (Py_TYPE(v_h.inst) == v_h.type->type) + // If the instance type equals the registered type we don't have inheritance, so + // don't need the alias and can construct using the class function: + construct(v_h, class_func(std::forward(args)...), false); + else + construct(v_h, alias_func(std::forward(args)...), true); + }, is_new_style_constructor(), extra...); + } +}; + +/// Set just the C++ state. Same as `__init__`. +template +void setstate(value_and_holder &v_h, T &&result, bool need_alias) { + construct(v_h, std::forward(result), need_alias); +} + +/// Set both the C++ and Python states +template ::value, int> = 0> +void setstate(value_and_holder &v_h, std::pair &&result, bool need_alias) { + construct(v_h, std::move(result.first), need_alias); + setattr((PyObject *) v_h.inst, "__dict__", result.second); +} + +/// Implementation for py::pickle(GetState, SetState) +template , typename = function_signature_t> +struct pickle_factory; + +template +struct pickle_factory { + static_assert(std::is_same, intrinsic_t>::value, + "The type returned by `__getstate__` must be the same " + "as the argument accepted by `__setstate__`"); + + remove_reference_t get; + remove_reference_t set; + + pickle_factory(Get get, Set set) + : get(std::forward(get)), set(std::forward(set)) { } + + template + void execute(Class &cl, const Extra &...extra) && { + cl.def("__getstate__", std::move(get)); + +#if defined(PYBIND11_CPP14) + cl.def("__setstate__", [func = std::move(set)] +#else + auto &func = set; + cl.def("__setstate__", [func] +#endif + (value_and_holder &v_h, ArgState state) { + setstate(v_h, func(std::forward(state)), + Py_TYPE(v_h.inst) != v_h.type->type); + }, is_new_style_constructor(), extra...); + } +}; + +PYBIND11_NAMESPACE_END(initimpl) +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(pybind11) diff --git a/diffvg/pybind11/include/pybind11/detail/internals.h b/diffvg/pybind11/include/pybind11/detail/internals.h new file mode 100644 index 0000000000000000000000000000000000000000..cf40e9fe995cd952e0dec8378b44b3ac8477f235 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/internals.h @@ -0,0 +1,352 @@ +/* + pybind11/detail/internals.h: Internal data structure and related functions + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "../pytypes.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) +// Forward declarations +inline PyTypeObject *make_static_property_type(); +inline PyTypeObject *make_default_metaclass(); +inline PyObject *make_object_base_type(PyTypeObject *metaclass); + +// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new +// Thread Specific Storage (TSS) API. +#if PY_VERSION_HEX >= 0x03070000 +# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr +# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key)) +# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value)) +# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr) +# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key) +#else + // Usually an int but a long on Cygwin64 with Python 3.x +# define PYBIND11_TLS_KEY_INIT(var) decltype(PyThread_create_key()) var = 0 +# define PYBIND11_TLS_GET_VALUE(key) PyThread_get_key_value((key)) +# if PY_MAJOR_VERSION < 3 +# define PYBIND11_TLS_DELETE_VALUE(key) \ + PyThread_delete_key_value(key) +# define PYBIND11_TLS_REPLACE_VALUE(key, value) \ + do { \ + PyThread_delete_key_value((key)); \ + PyThread_set_key_value((key), (value)); \ + } while (false) +# else +# define PYBIND11_TLS_DELETE_VALUE(key) \ + PyThread_set_key_value((key), nullptr) +# define PYBIND11_TLS_REPLACE_VALUE(key, value) \ + PyThread_set_key_value((key), (value)) +# endif +# define PYBIND11_TLS_FREE(key) (void)key +#endif + +// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly +// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module +// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under +// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name, +// which works. If not under a known-good stl, provide our own name-based hash and equality +// functions that use the type name. +#if defined(__GLIBCXX__) +inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; } +using type_hash = std::hash; +using type_equal_to = std::equal_to; +#else +inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { + return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0; +} + +struct type_hash { + size_t operator()(const std::type_index &t) const { + size_t hash = 5381; + const char *ptr = t.name(); + while (auto c = static_cast(*ptr++)) + hash = (hash * 33) ^ c; + return hash; + } +}; + +struct type_equal_to { + bool operator()(const std::type_index &lhs, const std::type_index &rhs) const { + return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0; + } +}; +#endif + +template +using type_map = std::unordered_map; + +struct overload_hash { + inline size_t operator()(const std::pair& v) const { + size_t value = std::hash()(v.first); + value ^= std::hash()(v.second) + 0x9e3779b9 + (value<<6) + (value>>2); + return value; + } +}; + +/// Internal data structure used to track registered instances and types. +/// Whenever binary incompatible changes are made to this structure, +/// `PYBIND11_INTERNALS_VERSION` must be incremented. +struct internals { + type_map registered_types_cpp; // std::type_index -> pybind11's type information + std::unordered_map> registered_types_py; // PyTypeObject* -> base type_info(s) + std::unordered_multimap registered_instances; // void * -> instance* + std::unordered_set, overload_hash> inactive_overload_cache; + type_map> direct_conversions; + std::unordered_map> patients; + std::forward_list registered_exception_translators; + std::unordered_map shared_data; // Custom data to be shared across extensions + std::vector loader_patient_stack; // Used by `loader_life_support` + std::forward_list static_strings; // Stores the std::strings backing detail::c_str() + PyTypeObject *static_property_type; + PyTypeObject *default_metaclass; + PyObject *instance_base; +#if defined(WITH_THREAD) + PYBIND11_TLS_KEY_INIT(tstate); + PyInterpreterState *istate = nullptr; + ~internals() { + // This destructor is called *after* Py_Finalize() in finalize_interpreter(). + // That *SHOULD BE* fine. The following details what happens whe PyThread_tss_free is called. + // PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does nothing. + // PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree. + // PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX). Neither + // of those have anything to do with CPython internals. + // PyMem_RawFree *requires* that the `tstate` be allocated with the CPython allocator. + PYBIND11_TLS_FREE(tstate); + } +#endif +}; + +/// Additional type information which does not fit into the PyTypeObject. +/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`. +struct type_info { + PyTypeObject *type; + const std::type_info *cpptype; + size_t type_size, type_align, holder_size_in_ptrs; + void *(*operator_new)(size_t); + void (*init_instance)(instance *, const void *); + void (*dealloc)(value_and_holder &v_h); + std::vector implicit_conversions; + std::vector> implicit_casts; + std::vector *direct_conversions; + buffer_info *(*get_buffer)(PyObject *, void *) = nullptr; + void *get_buffer_data = nullptr; + void *(*module_local_load)(PyObject *, const type_info *) = nullptr; + /* A simple type never occurs as a (direct or indirect) parent + * of a class that makes use of multiple inheritance */ + bool simple_type : 1; + /* True if there is no multiple inheritance in this type's inheritance tree */ + bool simple_ancestors : 1; + /* for base vs derived holder_type checks */ + bool default_holder : 1; + /* true if this is a type registered with py::module_local */ + bool module_local : 1; +}; + +/// Tracks the `internals` and `type_info` ABI version independent of the main library version +#define PYBIND11_INTERNALS_VERSION 4 + +/// On MSVC, debug and release builds are not ABI-compatible! +#if defined(_MSC_VER) && defined(_DEBUG) +# define PYBIND11_BUILD_TYPE "_debug" +#else +# define PYBIND11_BUILD_TYPE "" +#endif + +/// Let's assume that different compilers are ABI-incompatible. +#if defined(_MSC_VER) +# define PYBIND11_COMPILER_TYPE "_msvc" +#elif defined(__INTEL_COMPILER) +# define PYBIND11_COMPILER_TYPE "_icc" +#elif defined(__clang__) +# define PYBIND11_COMPILER_TYPE "_clang" +#elif defined(__PGI) +# define PYBIND11_COMPILER_TYPE "_pgi" +#elif defined(__MINGW32__) +# define PYBIND11_COMPILER_TYPE "_mingw" +#elif defined(__CYGWIN__) +# define PYBIND11_COMPILER_TYPE "_gcc_cygwin" +#elif defined(__GNUC__) +# define PYBIND11_COMPILER_TYPE "_gcc" +#else +# define PYBIND11_COMPILER_TYPE "_unknown" +#endif + +#if defined(_LIBCPP_VERSION) +# define PYBIND11_STDLIB "_libcpp" +#elif defined(__GLIBCXX__) || defined(__GLIBCPP__) +# define PYBIND11_STDLIB "_libstdcpp" +#else +# define PYBIND11_STDLIB "" +#endif + +/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility. +#if defined(__GXX_ABI_VERSION) +# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION) +#else +# define PYBIND11_BUILD_ABI "" +#endif + +#if defined(WITH_THREAD) +# define PYBIND11_INTERNALS_KIND "" +#else +# define PYBIND11_INTERNALS_KIND "_without_thread" +#endif + +#define PYBIND11_INTERNALS_ID "__pybind11_internals_v" \ + PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI PYBIND11_BUILD_TYPE "__" + +#define PYBIND11_MODULE_LOCAL_ID "__pybind11_module_local_v" \ + PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI PYBIND11_BUILD_TYPE "__" + +/// Each module locally stores a pointer to the `internals` data. The data +/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`. +inline internals **&get_internals_pp() { + static internals **internals_pp = nullptr; + return internals_pp; +} + +inline void translate_exception(std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (error_already_set &e) { e.restore(); return; + } catch (const builtin_exception &e) { e.set_error(); return; + } catch (const std::bad_alloc &e) { PyErr_SetString(PyExc_MemoryError, e.what()); return; + } catch (const std::domain_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; + } catch (const std::invalid_argument &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; + } catch (const std::length_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; + } catch (const std::out_of_range &e) { PyErr_SetString(PyExc_IndexError, e.what()); return; + } catch (const std::range_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return; + } catch (const std::overflow_error &e) { PyErr_SetString(PyExc_OverflowError, e.what()); return; + } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); return; + } catch (...) { + PyErr_SetString(PyExc_RuntimeError, "Caught an unknown exception!"); + return; + } +} + +#if !defined(__GLIBCXX__) +inline void translate_local_exception(std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (error_already_set &e) { e.restore(); return; + } catch (const builtin_exception &e) { e.set_error(); return; + } +} +#endif + +/// Return a reference to the current `internals` data +PYBIND11_NOINLINE inline internals &get_internals() { + auto **&internals_pp = get_internals_pp(); + if (internals_pp && *internals_pp) + return **internals_pp; + + // Ensure that the GIL is held since we will need to make Python calls. + // Cannot use py::gil_scoped_acquire here since that constructor calls get_internals. + struct gil_scoped_acquire_local { + gil_scoped_acquire_local() : state (PyGILState_Ensure()) {} + ~gil_scoped_acquire_local() { PyGILState_Release(state); } + const PyGILState_STATE state; + } gil; + + constexpr auto *id = PYBIND11_INTERNALS_ID; + auto builtins = handle(PyEval_GetBuiltins()); + if (builtins.contains(id) && isinstance(builtins[id])) { + internals_pp = static_cast(capsule(builtins[id])); + + // We loaded builtins through python's builtins, which means that our `error_already_set` + // and `builtin_exception` may be different local classes than the ones set up in the + // initial exception translator, below, so add another for our local exception classes. + // + // libstdc++ doesn't require this (types there are identified only by name) +#if !defined(__GLIBCXX__) + (*internals_pp)->registered_exception_translators.push_front(&translate_local_exception); +#endif + } else { + if (!internals_pp) internals_pp = new internals*(); + auto *&internals_ptr = *internals_pp; + internals_ptr = new internals(); +#if defined(WITH_THREAD) + + #if PY_VERSION_HEX < 0x03090000 + PyEval_InitThreads(); + #endif + PyThreadState *tstate = PyThreadState_Get(); + #if PY_VERSION_HEX >= 0x03070000 + internals_ptr->tstate = PyThread_tss_alloc(); + if (!internals_ptr->tstate || PyThread_tss_create(internals_ptr->tstate)) + pybind11_fail("get_internals: could not successfully initialize the TSS key!"); + PyThread_tss_set(internals_ptr->tstate, tstate); + #else + internals_ptr->tstate = PyThread_create_key(); + if (internals_ptr->tstate == -1) + pybind11_fail("get_internals: could not successfully initialize the TLS key!"); + PyThread_set_key_value(internals_ptr->tstate, tstate); + #endif + internals_ptr->istate = tstate->interp; +#endif + builtins[id] = capsule(internals_pp); + internals_ptr->registered_exception_translators.push_front(&translate_exception); + internals_ptr->static_property_type = make_static_property_type(); + internals_ptr->default_metaclass = make_default_metaclass(); + internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass); + } + return **internals_pp; +} + +/// Works like `internals.registered_types_cpp`, but for module-local registered types: +inline type_map ®istered_local_types_cpp() { + static type_map locals{}; + return locals; +} + +/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its +/// `c_str()`. Such strings objects have a long storage duration -- the internal strings are only +/// cleared when the program exits or after interpreter shutdown (when embedding), and so are +/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name). +template +const char *c_str(Args &&...args) { + auto &strings = get_internals().static_strings; + strings.emplace_front(std::forward(args)...); + return strings.front().c_str(); +} + +PYBIND11_NAMESPACE_END(detail) + +/// Returns a named pointer that is shared among all extension modules (using the same +/// pybind11 version) running in the current interpreter. Names starting with underscores +/// are reserved for internal usage. Returns `nullptr` if no matching entry was found. +inline PYBIND11_NOINLINE void *get_shared_data(const std::string &name) { + auto &internals = detail::get_internals(); + auto it = internals.shared_data.find(name); + return it != internals.shared_data.end() ? it->second : nullptr; +} + +/// Set the shared data that can be later recovered by `get_shared_data()`. +inline PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) { + detail::get_internals().shared_data[name] = data; + return data; +} + +/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if +/// such entry exists. Otherwise, a new object of default-constructible type `T` is +/// added to the shared data under the given name and a reference to it is returned. +template +T &get_or_create_shared_data(const std::string &name) { + auto &internals = detail::get_internals(); + auto it = internals.shared_data.find(name); + T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr); + if (!ptr) { + ptr = new T(); + internals.shared_data[name] = ptr; + } + return *ptr; +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/detail/typeid.h b/diffvg/pybind11/include/pybind11/detail/typeid.h new file mode 100644 index 0000000000000000000000000000000000000000..148889ffefdcc4de303d99f82af43aa9302c0a7c --- /dev/null +++ b/diffvg/pybind11/include/pybind11/detail/typeid.h @@ -0,0 +1,55 @@ +/* + pybind11/detail/typeid.h: Compiler-independent access to type identifiers + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include +#include + +#if defined(__GNUG__) +#include +#endif + +#include "common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) +/// Erase all occurrences of a substring +inline void erase_all(std::string &string, const std::string &search) { + for (size_t pos = 0;;) { + pos = string.find(search, pos); + if (pos == std::string::npos) break; + string.erase(pos, search.length()); + } +} + +PYBIND11_NOINLINE inline void clean_type_id(std::string &name) { +#if defined(__GNUG__) + int status = 0; + std::unique_ptr res { + abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free }; + if (status == 0) + name = res.get(); +#else + detail::erase_all(name, "class "); + detail::erase_all(name, "struct "); + detail::erase_all(name, "enum "); +#endif + detail::erase_all(name, "pybind11::"); +} +PYBIND11_NAMESPACE_END(detail) + +/// Return a string representation of a C++ type +template static std::string type_id() { + std::string name(typeid(T).name()); + detail::clean_type_id(name); + return name; +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/eigen.h b/diffvg/pybind11/include/pybind11/eigen.h new file mode 100644 index 0000000000000000000000000000000000000000..22139def6013b47005df22be778bd6984e05ea1d --- /dev/null +++ b/diffvg/pybind11/include/pybind11/eigen.h @@ -0,0 +1,607 @@ +/* + pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "numpy.h" + +#if defined(__INTEL_COMPILER) +# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) +#elif defined(__GNUG__) || defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wconversion" +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" +# ifdef __clang__ +// Eigen generates a bunch of implicit-copy-constructor-is-deprecated warnings with -Wdeprecated +// under Clang, so disable that warning here: +# pragma GCC diagnostic ignored "-Wdeprecated" +# endif +# if __GNUC__ >= 7 +# pragma GCC diagnostic ignored "-Wint-in-bool-context" +# endif +#endif + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +# pragma warning(disable: 4996) // warning C4996: std::unary_negate is deprecated in C++17 +#endif + +#include +#include + +// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit +// move constructors that break things. We could detect this an explicitly copy, but an extra copy +// of matrices seems highly undesirable. +static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7"); + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides: +using EigenDStride = Eigen::Stride; +template using EigenDRef = Eigen::Ref; +template using EigenDMap = Eigen::Map; + +PYBIND11_NAMESPACE_BEGIN(detail) + +#if EIGEN_VERSION_AT_LEAST(3,3,0) +using EigenIndex = Eigen::Index; +#else +using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE; +#endif + +// Matches Eigen::Map, Eigen::Ref, blocks, etc: +template using is_eigen_dense_map = all_of, std::is_base_of, T>>; +template using is_eigen_mutable_map = std::is_base_of, T>; +template using is_eigen_dense_plain = all_of>, is_template_base_of>; +template using is_eigen_sparse = is_template_base_of; +// Test for objects inheriting from EigenBase that aren't captured by the above. This +// basically covers anything that can be assigned to a dense matrix but that don't have a typical +// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and +// SelfAdjointView fall into this category. +template using is_eigen_other = all_of< + is_template_base_of, + negation, is_eigen_dense_plain, is_eigen_sparse>> +>; + +// Captures numpy/eigen conformability status (returned by EigenProps::conformable()): +template struct EigenConformable { + bool conformable = false; + EigenIndex rows = 0, cols = 0; + EigenDStride stride{0, 0}; // Only valid if negativestrides is false! + bool negativestrides = false; // If true, do not use stride! + + EigenConformable(bool fits = false) : conformable{fits} {} + // Matrix type: + EigenConformable(EigenIndex r, EigenIndex c, + EigenIndex rstride, EigenIndex cstride) : + conformable{true}, rows{r}, cols{c} { + // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747 + if (rstride < 0 || cstride < 0) { + negativestrides = true; + } else { + stride = {EigenRowMajor ? rstride : cstride /* outer stride */, + EigenRowMajor ? cstride : rstride /* inner stride */ }; + } + } + // Vector type: + EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride) + : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {} + + template bool stride_compatible() const { + // To have compatible strides, we need (on both dimensions) one of fully dynamic strides, + // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant) + return + !negativestrides && + (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() || + (EigenRowMajor ? cols : rows) == 1) && + (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() || + (EigenRowMajor ? rows : cols) == 1); + } + operator bool() const { return conformable; } +}; + +template struct eigen_extract_stride { using type = Type; }; +template +struct eigen_extract_stride> { using type = StrideType; }; +template +struct eigen_extract_stride> { using type = StrideType; }; + +// Helper struct for extracting information from an Eigen type +template struct EigenProps { + using Type = Type_; + using Scalar = typename Type::Scalar; + using StrideType = typename eigen_extract_stride::type; + static constexpr EigenIndex + rows = Type::RowsAtCompileTime, + cols = Type::ColsAtCompileTime, + size = Type::SizeAtCompileTime; + static constexpr bool + row_major = Type::IsRowMajor, + vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1 + fixed_rows = rows != Eigen::Dynamic, + fixed_cols = cols != Eigen::Dynamic, + fixed = size != Eigen::Dynamic, // Fully-fixed size + dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size + + template using if_zero = std::integral_constant; + static constexpr EigenIndex inner_stride = if_zero::value, + outer_stride = if_zero::value; + static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic; + static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1; + static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1; + + // Takes an input array and determines whether we can make it fit into the Eigen type. If + // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector + // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type). + static EigenConformable conformable(const array &a) { + const auto dims = a.ndim(); + if (dims < 1 || dims > 2) + return false; + + if (dims == 2) { // Matrix type: require exact match (or dynamic) + + EigenIndex + np_rows = a.shape(0), + np_cols = a.shape(1), + np_rstride = a.strides(0) / static_cast(sizeof(Scalar)), + np_cstride = a.strides(1) / static_cast(sizeof(Scalar)); + if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols)) + return false; + + return {np_rows, np_cols, np_rstride, np_cstride}; + } + + // Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever + // is used, we want the (single) numpy stride value. + const EigenIndex n = a.shape(0), + stride = a.strides(0) / static_cast(sizeof(Scalar)); + + if (vector) { // Eigen type is a compile-time vector + if (fixed && size != n) + return false; // Vector size mismatch + return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride}; + } + else if (fixed) { + // The type has a fixed size, but is not a vector: abort + return false; + } + else if (fixed_cols) { + // Since this isn't a vector, cols must be != 1. We allow this only if it exactly + // equals the number of elements (rows is Dynamic, and so 1 row is allowed). + if (cols != n) return false; + return {1, n, stride}; + } + else { + // Otherwise it's either fully dynamic, or column dynamic; both become a column vector + if (fixed_rows && rows != n) return false; + return {n, 1, stride}; + } + } + + static constexpr bool show_writeable = is_eigen_dense_map::value && is_eigen_mutable_map::value; + static constexpr bool show_order = is_eigen_dense_map::value; + static constexpr bool show_c_contiguous = show_order && requires_row_major; + static constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major; + + static constexpr auto descriptor = + _("numpy.ndarray[") + npy_format_descriptor::name + + _("[") + _(_<(size_t) rows>(), _("m")) + + _(", ") + _(_<(size_t) cols>(), _("n")) + + _("]") + + // For a reference type (e.g. Ref) we have other constraints that might need to be + // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride + // options, possibly f_contiguous or c_contiguous. We include them in the descriptor output + // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to + // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you + // *gave* a numpy.ndarray of the right type and dimensions. + _(", flags.writeable", "") + + _(", flags.c_contiguous", "") + + _(", flags.f_contiguous", "") + + _("]"); +}; + +// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data, +// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array. +template handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) { + constexpr ssize_t elem_size = sizeof(typename props::Scalar); + array a; + if (props::vector) + a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base); + else + a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() }, + src.data(), base); + + if (!writeable) + array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_; + + return a.release(); +} + +// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that +// reference the Eigen object's data with `base` as the python-registered base class (if omitted, +// the base will be set to None, and lifetime management is up to the caller). The numpy array is +// non-writeable if the given type is const. +template +handle eigen_ref_array(Type &src, handle parent = none()) { + // none here is to get past array's should-we-copy detection, which currently always + // copies when there is no base. Setting the base to None should be harmless. + return eigen_array_cast(src, parent, !std::is_const::value); +} + +// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy +// array that references the encapsulated data with a python-side reference to the capsule to tie +// its destruction to that of any dependent python objects. Const-ness is determined by whether or +// not the Type of the pointer given is const. +template ::value>> +handle eigen_encapsulate(Type *src) { + capsule base(src, [](void *o) { delete static_cast(o); }); + return eigen_ref_array(*src, base); +} + +// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense +// types. +template +struct type_caster::value>> { + using Scalar = typename Type::Scalar; + using props = EigenProps; + + bool load(handle src, bool convert) { + // If we're in no-convert mode, only load if given an array of the correct type + if (!convert && !isinstance>(src)) + return false; + + // Coerce into an array, but don't do type conversion yet; the copy below handles it. + auto buf = array::ensure(src); + + if (!buf) + return false; + + auto dims = buf.ndim(); + if (dims < 1 || dims > 2) + return false; + + auto fits = props::conformable(buf); + if (!fits) + return false; + + // Allocate the new type, then build a numpy reference into it + value = Type(fits.rows, fits.cols); + auto ref = reinterpret_steal(eigen_ref_array(value)); + if (dims == 1) ref = ref.squeeze(); + else if (ref.ndim() == 1) buf = buf.squeeze(); + + int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr()); + + if (result < 0) { // Copy failed! + PyErr_Clear(); + return false; + } + + return true; + } + +private: + + // Cast implementation + template + static handle cast_impl(CType *src, return_value_policy policy, handle parent) { + switch (policy) { + case return_value_policy::take_ownership: + case return_value_policy::automatic: + return eigen_encapsulate(src); + case return_value_policy::move: + return eigen_encapsulate(new CType(std::move(*src))); + case return_value_policy::copy: + return eigen_array_cast(*src); + case return_value_policy::reference: + case return_value_policy::automatic_reference: + return eigen_ref_array(*src); + case return_value_policy::reference_internal: + return eigen_ref_array(*src, parent); + default: + throw cast_error("unhandled return_value_policy: should not happen!"); + }; + } + +public: + + // Normal returned non-reference, non-const value: + static handle cast(Type &&src, return_value_policy /* policy */, handle parent) { + return cast_impl(&src, return_value_policy::move, parent); + } + // If you return a non-reference const, we mark the numpy array readonly: + static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) { + return cast_impl(&src, return_value_policy::move, parent); + } + // lvalue reference return; default (automatic) becomes copy + static handle cast(Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) + policy = return_value_policy::copy; + return cast_impl(&src, policy, parent); + } + // const lvalue reference return; default (automatic) becomes copy + static handle cast(const Type &src, return_value_policy policy, handle parent) { + if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference) + policy = return_value_policy::copy; + return cast(&src, policy, parent); + } + // non-const pointer return + static handle cast(Type *src, return_value_policy policy, handle parent) { + return cast_impl(src, policy, parent); + } + // const pointer return + static handle cast(const Type *src, return_value_policy policy, handle parent) { + return cast_impl(src, policy, parent); + } + + static constexpr auto name = props::descriptor; + + operator Type*() { return &value; } + operator Type&() { return value; } + operator Type&&() && { return std::move(value); } + template using cast_op_type = movable_cast_op_type; + +private: + Type value; +}; + +// Base class for casting reference/map/block/etc. objects back to python. +template struct eigen_map_caster { +private: + using props = EigenProps; + +public: + + // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has + // to stay around), but we'll allow it under the assumption that you know what you're doing (and + // have an appropriate keep_alive in place). We return a numpy array pointing directly at the + // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note + // that this means you need to ensure you don't destroy the object in some other way (e.g. with + // an appropriate keep_alive, or with a reference to a statically allocated matrix). + static handle cast(const MapType &src, return_value_policy policy, handle parent) { + switch (policy) { + case return_value_policy::copy: + return eigen_array_cast(src); + case return_value_policy::reference_internal: + return eigen_array_cast(src, parent, is_eigen_mutable_map::value); + case return_value_policy::reference: + case return_value_policy::automatic: + case return_value_policy::automatic_reference: + return eigen_array_cast(src, none(), is_eigen_mutable_map::value); + default: + // move, take_ownership don't make any sense for a ref/map: + pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type"); + } + } + + static constexpr auto name = props::descriptor; + + // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return + // types but not bound arguments). We still provide them (with an explicitly delete) so that + // you end up here if you try anyway. + bool load(handle, bool) = delete; + operator MapType() = delete; + template using cast_op_type = MapType; +}; + +// We can return any map-like object (but can only load Refs, specialized next): +template struct type_caster::value>> + : eigen_map_caster {}; + +// Loader for Ref<...> arguments. See the documentation for info on how to make this work without +// copying (it requires some extra effort in many cases). +template +struct type_caster< + Eigen::Ref, + enable_if_t>::value> +> : public eigen_map_caster> { +private: + using Type = Eigen::Ref; + using props = EigenProps; + using Scalar = typename props::Scalar; + using MapType = Eigen::Map; + using Array = array_t; + static constexpr bool need_writeable = is_eigen_mutable_map::value; + // Delay construction (these have no default constructor) + std::unique_ptr map; + std::unique_ptr ref; + // Our array. When possible, this is just a numpy array pointing to the source data, but + // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible + // layout, or is an array of a type that needs to be converted). Using a numpy temporary + // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and + // storage order conversion. (Note that we refuse to use this temporary copy when loading an + // argument for a Ref with M non-const, i.e. a read-write reference). + Array copy_or_ref; +public: + bool load(handle src, bool convert) { + // First check whether what we have is already an array of the right type. If not, we can't + // avoid a copy (because the copy is also going to do type conversion). + bool need_copy = !isinstance(src); + + EigenConformable fits; + if (!need_copy) { + // We don't need a converting copy, but we also need to check whether the strides are + // compatible with the Ref's stride requirements + Array aref = reinterpret_borrow(src); + + if (aref && (!need_writeable || aref.writeable())) { + fits = props::conformable(aref); + if (!fits) return false; // Incompatible dimensions + if (!fits.template stride_compatible()) + need_copy = true; + else + copy_or_ref = std::move(aref); + } + else { + need_copy = true; + } + } + + if (need_copy) { + // We need to copy: If we need a mutable reference, or we're not supposed to convert + // (either because we're in the no-convert overload pass, or because we're explicitly + // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading. + if (!convert || need_writeable) return false; + + Array copy = Array::ensure(src); + if (!copy) return false; + fits = props::conformable(copy); + if (!fits || !fits.template stride_compatible()) + return false; + copy_or_ref = std::move(copy); + loader_life_support::add_patient(copy_or_ref); + } + + ref.reset(); + map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner()))); + ref.reset(new Type(*map)); + + return true; + } + + operator Type*() { return ref.get(); } + operator Type&() { return *ref; } + template using cast_op_type = pybind11::detail::cast_op_type<_T>; + +private: + template ::value, int> = 0> + Scalar *data(Array &a) { return a.mutable_data(); } + + template ::value, int> = 0> + const Scalar *data(Array &a) { return a.data(); } + + // Attempt to figure out a constructor of `Stride` that will work. + // If both strides are fixed, use a default constructor: + template using stride_ctor_default = bool_constant< + S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic && + std::is_default_constructible::value>; + // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like + // Eigen::Stride, and use it: + template using stride_ctor_dual = bool_constant< + !stride_ctor_default::value && std::is_constructible::value>; + // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use + // it (passing whichever stride is dynamic). + template using stride_ctor_outer = bool_constant< + !any_of, stride_ctor_dual>::value && + S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic && + std::is_constructible::value>; + template using stride_ctor_inner = bool_constant< + !any_of, stride_ctor_dual>::value && + S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic && + std::is_constructible::value>; + + template ::value, int> = 0> + static S make_stride(EigenIndex, EigenIndex) { return S(); } + template ::value, int> = 0> + static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); } + template ::value, int> = 0> + static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); } + template ::value, int> = 0> + static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); } + +}; + +// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not +// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout). +// load() is not supported, but we can cast them into the python domain by first copying to a +// regular Eigen::Matrix, then casting that. +template +struct type_caster::value>> { +protected: + using Matrix = Eigen::Matrix; + using props = EigenProps; +public: + static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { + handle h = eigen_encapsulate(new Matrix(src)); + return h; + } + static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); } + + static constexpr auto name = props::descriptor; + + // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return + // types but not bound arguments). We still provide them (with an explicitly delete) so that + // you end up here if you try anyway. + bool load(handle, bool) = delete; + operator Type() = delete; + template using cast_op_type = Type; +}; + +template +struct type_caster::value>> { + typedef typename Type::Scalar Scalar; + typedef remove_reference_t().outerIndexPtr())> StorageIndex; + typedef typename Type::Index Index; + static constexpr bool rowMajor = Type::IsRowMajor; + + bool load(handle src, bool) { + if (!src) + return false; + + auto obj = reinterpret_borrow(src); + object sparse_module = module::import("scipy.sparse"); + object matrix_type = sparse_module.attr( + rowMajor ? "csr_matrix" : "csc_matrix"); + + if (!obj.get_type().is(matrix_type)) { + try { + obj = matrix_type(obj); + } catch (const error_already_set &) { + return false; + } + } + + auto values = array_t((object) obj.attr("data")); + auto innerIndices = array_t((object) obj.attr("indices")); + auto outerIndices = array_t((object) obj.attr("indptr")); + auto shape = pybind11::tuple((pybind11::object) obj.attr("shape")); + auto nnz = obj.attr("nnz").cast(); + + if (!values || !innerIndices || !outerIndices) + return false; + + value = Eigen::MappedSparseMatrix( + shape[0].cast(), shape[1].cast(), nnz, + outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data()); + + return true; + } + + static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) { + const_cast(src).makeCompressed(); + + object matrix_type = module::import("scipy.sparse").attr( + rowMajor ? "csr_matrix" : "csc_matrix"); + + array data(src.nonZeros(), src.valuePtr()); + array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr()); + array innerIndices(src.nonZeros(), src.innerIndexPtr()); + + return matrix_type( + std::make_tuple(data, innerIndices, outerIndices), + std::make_pair(src.rows(), src.cols()) + ).release(); + } + + PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[") + + npy_format_descriptor::name + _("]")); +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) + +#if defined(__GNUG__) || defined(__clang__) +# pragma GCC diagnostic pop +#elif defined(_MSC_VER) +# pragma warning(pop) +#endif diff --git a/diffvg/pybind11/include/pybind11/embed.h b/diffvg/pybind11/include/pybind11/embed.h new file mode 100644 index 0000000000000000000000000000000000000000..eae86c714ca17191bb03fd4df7c9384422168858 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/embed.h @@ -0,0 +1,203 @@ +/* + pybind11/embed.h: Support for embedding the interpreter + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include "eval.h" + +#if defined(PYPY_VERSION) +# error Embedding the interpreter is not supported with PyPy +#endif + +#if PY_MAJOR_VERSION >= 3 +# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \ + extern "C" PyObject *pybind11_init_impl_##name(); \ + extern "C" PyObject *pybind11_init_impl_##name() { \ + return pybind11_init_wrapper_##name(); \ + } +#else +# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \ + extern "C" void pybind11_init_impl_##name(); \ + extern "C" void pybind11_init_impl_##name() { \ + pybind11_init_wrapper_##name(); \ + } +#endif + +/** \rst + Add a new module to the table of builtins for the interpreter. Must be + defined in global scope. The first macro parameter is the name of the + module (without quotes). The second parameter is the variable which will + be used as the interface to add functions and classes to the module. + + .. code-block:: cpp + + PYBIND11_EMBEDDED_MODULE(example, m) { + // ... initialize functions and classes here + m.def("foo", []() { + return "Hello, World!"; + }); + } + \endrst */ +#define PYBIND11_EMBEDDED_MODULE(name, variable) \ + static void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &); \ + static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \ + auto m = pybind11::module(PYBIND11_TOSTRING(name)); \ + try { \ + PYBIND11_CONCAT(pybind11_init_, name)(m); \ + return m.ptr(); \ + } catch (pybind11::error_already_set &e) { \ + PyErr_SetString(PyExc_ImportError, e.what()); \ + return nullptr; \ + } catch (const std::exception &e) { \ + PyErr_SetString(PyExc_ImportError, e.what()); \ + return nullptr; \ + } \ + } \ + PYBIND11_EMBEDDED_MODULE_IMPL(name) \ + pybind11::detail::embedded_module PYBIND11_CONCAT(pybind11_module_, name) \ + (PYBIND11_TOSTRING(name), \ + PYBIND11_CONCAT(pybind11_init_impl_, name)); \ + void PYBIND11_CONCAT(pybind11_init_, name)(pybind11::module &variable) + + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks. +struct embedded_module { +#if PY_MAJOR_VERSION >= 3 + using init_t = PyObject *(*)(); +#else + using init_t = void (*)(); +#endif + embedded_module(const char *name, init_t init) { + if (Py_IsInitialized()) + pybind11_fail("Can't add new modules after the interpreter has been initialized"); + + auto result = PyImport_AppendInittab(name, init); + if (result == -1) + pybind11_fail("Insufficient memory to add a new module"); + } +}; + +PYBIND11_NAMESPACE_END(detail) + +/** \rst + Initialize the Python interpreter. No other pybind11 or CPython API functions can be + called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The + optional parameter can be used to skip the registration of signal handlers (see the + `Python documentation`_ for details). Calling this function again after the interpreter + has already been initialized is a fatal error. + + If initializing the Python interpreter fails, then the program is terminated. (This + is controlled by the CPython runtime and is an exception to pybind11's normal behavior + of throwing exceptions on errors.) + + .. _Python documentation: https://docs.python.org/3/c-api/init.html#c.Py_InitializeEx + \endrst */ +inline void initialize_interpreter(bool init_signal_handlers = true) { + if (Py_IsInitialized()) + pybind11_fail("The interpreter is already running"); + + Py_InitializeEx(init_signal_handlers ? 1 : 0); + + // Make .py files in the working directory available by default + module::import("sys").attr("path").cast().append("."); +} + +/** \rst + Shut down the Python interpreter. No pybind11 or CPython API functions can be called + after this. In addition, pybind11 objects must not outlive the interpreter: + + .. code-block:: cpp + + { // BAD + py::initialize_interpreter(); + auto hello = py::str("Hello, World!"); + py::finalize_interpreter(); + } // <-- BOOM, hello's destructor is called after interpreter shutdown + + { // GOOD + py::initialize_interpreter(); + { // scoped + auto hello = py::str("Hello, World!"); + } // <-- OK, hello is cleaned up properly + py::finalize_interpreter(); + } + + { // BETTER + py::scoped_interpreter guard{}; + auto hello = py::str("Hello, World!"); + } + + .. warning:: + + The interpreter can be restarted by calling `initialize_interpreter` again. + Modules created using pybind11 can be safely re-initialized. However, Python + itself cannot completely unload binary extension modules and there are several + caveats with regard to interpreter restarting. All the details can be found + in the CPython documentation. In short, not all interpreter memory may be + freed, either due to reference cycles or user-created global data. + + \endrst */ +inline void finalize_interpreter() { + handle builtins(PyEval_GetBuiltins()); + const char *id = PYBIND11_INTERNALS_ID; + + // Get the internals pointer (without creating it if it doesn't exist). It's possible for the + // internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()` + // during destruction), so we get the pointer-pointer here and check it after Py_Finalize(). + detail::internals **internals_ptr_ptr = detail::get_internals_pp(); + // It could also be stashed in builtins, so look there too: + if (builtins.contains(id) && isinstance(builtins[id])) + internals_ptr_ptr = capsule(builtins[id]); + + Py_Finalize(); + + if (internals_ptr_ptr) { + delete *internals_ptr_ptr; + *internals_ptr_ptr = nullptr; + } +} + +/** \rst + Scope guard version of `initialize_interpreter` and `finalize_interpreter`. + This a move-only guard and only a single instance can exist. + + .. code-block:: cpp + + #include + + int main() { + py::scoped_interpreter guard{}; + py::print(Hello, World!); + } // <-- interpreter shutdown + \endrst */ +class scoped_interpreter { +public: + scoped_interpreter(bool init_signal_handlers = true) { + initialize_interpreter(init_signal_handlers); + } + + scoped_interpreter(const scoped_interpreter &) = delete; + scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; } + scoped_interpreter &operator=(const scoped_interpreter &) = delete; + scoped_interpreter &operator=(scoped_interpreter &&) = delete; + + ~scoped_interpreter() { + if (is_valid) + finalize_interpreter(); + } + +private: + bool is_valid = true; +}; + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/eval.h b/diffvg/pybind11/include/pybind11/eval.h new file mode 100644 index 0000000000000000000000000000000000000000..ba82cf42ae3673a3de391eb55777ef413c43dc33 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/eval.h @@ -0,0 +1,132 @@ +/* + pybind11/exec.h: Support for evaluating Python expressions and statements + from strings and files + + Copyright (c) 2016 Klemens Morgenstern and + Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +enum eval_mode { + /// Evaluate a string containing an isolated expression + eval_expr, + + /// Evaluate a string containing a single statement. Returns \c none + eval_single_statement, + + /// Evaluate a string containing a sequence of statement. Returns \c none + eval_statements +}; + +template +object eval(str expr, object global = globals(), object local = object()) { + if (!local) + local = global; + + /* PyRun_String does not accept a PyObject / encoding specifier, + this seems to be the only alternative */ + std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr; + + int start; + switch (mode) { + case eval_expr: start = Py_eval_input; break; + case eval_single_statement: start = Py_single_input; break; + case eval_statements: start = Py_file_input; break; + default: pybind11_fail("invalid evaluation mode"); + } + + PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr()); + if (!result) + throw error_already_set(); + return reinterpret_steal(result); +} + +template +object eval(const char (&s)[N], object global = globals(), object local = object()) { + /* Support raw string literals by removing common leading whitespace */ + auto expr = (s[0] == '\n') ? str(module::import("textwrap").attr("dedent")(s)) + : str(s); + return eval(expr, global, local); +} + +inline void exec(str expr, object global = globals(), object local = object()) { + eval(expr, global, local); +} + +template +void exec(const char (&s)[N], object global = globals(), object local = object()) { + eval(s, global, local); +} + +#if defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x3000000 +template +object eval_file(str, object, object) { + pybind11_fail("eval_file not supported in PyPy3. Use eval"); +} +template +object eval_file(str, object) { + pybind11_fail("eval_file not supported in PyPy3. Use eval"); +} +template +object eval_file(str) { + pybind11_fail("eval_file not supported in PyPy3. Use eval"); +} +#else +template +object eval_file(str fname, object global = globals(), object local = object()) { + if (!local) + local = global; + + int start; + switch (mode) { + case eval_expr: start = Py_eval_input; break; + case eval_single_statement: start = Py_single_input; break; + case eval_statements: start = Py_file_input; break; + default: pybind11_fail("invalid evaluation mode"); + } + + int closeFile = 1; + std::string fname_str = (std::string) fname; +#if PY_VERSION_HEX >= 0x03040000 + FILE *f = _Py_fopen_obj(fname.ptr(), "r"); +#elif PY_VERSION_HEX >= 0x03000000 + FILE *f = _Py_fopen(fname.ptr(), "r"); +#else + /* No unicode support in open() :( */ + auto fobj = reinterpret_steal(PyFile_FromString( + const_cast(fname_str.c_str()), + const_cast("r"))); + FILE *f = nullptr; + if (fobj) + f = PyFile_AsFile(fobj.ptr()); + closeFile = 0; +#endif + if (!f) { + PyErr_Clear(); + pybind11_fail("File \"" + fname_str + "\" could not be opened!"); + } + +#if PY_VERSION_HEX < 0x03000000 && defined(PYPY_VERSION) + PyObject *result = PyRun_File(f, fname_str.c_str(), start, global.ptr(), + local.ptr()); + (void) closeFile; +#else + PyObject *result = PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(), + local.ptr(), closeFile); +#endif + + if (!result) + throw error_already_set(); + return reinterpret_steal(result); +} +#endif + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/functional.h b/diffvg/pybind11/include/pybind11/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..57b6cd210f4b99d9d76a93c17aeed3a183fc01a0 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/functional.h @@ -0,0 +1,101 @@ +/* + pybind11/functional.h: std::function<> support + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +template +struct type_caster> { + using type = std::function; + using retval_type = conditional_t::value, void_type, Return>; + using function_type = Return (*) (Args...); + +public: + bool load(handle src, bool convert) { + if (src.is_none()) { + // Defer accepting None to other overloads (if we aren't in convert mode): + if (!convert) return false; + return true; + } + + if (!isinstance(src)) + return false; + + auto func = reinterpret_borrow(src); + + /* + When passing a C++ function as an argument to another C++ + function via Python, every function call would normally involve + a full C++ -> Python -> C++ roundtrip, which can be prohibitive. + Here, we try to at least detect the case where the function is + stateless (i.e. function pointer or lambda function without + captured variables), in which case the roundtrip can be avoided. + */ + if (auto cfunc = func.cpp_function()) { + auto c = reinterpret_borrow(PyCFunction_GET_SELF(cfunc.ptr())); + auto rec = (function_record *) c; + + if (rec && rec->is_stateless && + same_type(typeid(function_type), *reinterpret_cast(rec->data[1]))) { + struct capture { function_type f; }; + value = ((capture *) &rec->data)->f; + return true; + } + } + + // ensure GIL is held during functor destruction + struct func_handle { + function f; + func_handle(function&& f_) : f(std::move(f_)) {} + func_handle(const func_handle&) = default; + ~func_handle() { + gil_scoped_acquire acq; + function kill_f(std::move(f)); + } + }; + + // to emulate 'move initialization capture' in C++11 + struct func_wrapper { + func_handle hfunc; + func_wrapper(func_handle&& hf): hfunc(std::move(hf)) {} + Return operator()(Args... args) const { + gil_scoped_acquire acq; + object retval(hfunc.f(std::forward(args)...)); + /* Visual studio 2015 parser issue: need parentheses around this expression */ + return (retval.template cast()); + } + }; + + value = func_wrapper(func_handle(std::move(func))); + return true; + } + + template + static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) { + if (!f_) + return none().inc_ref(); + + auto result = f_.template target(); + if (result) + return cpp_function(*result, policy).release(); + else + return cpp_function(std::forward(f_), policy).release(); + } + + PYBIND11_TYPE_CASTER(type, _("Callable[[") + concat(make_caster::name...) + _("], ") + + make_caster::name + _("]")); +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/iostream.h b/diffvg/pybind11/include/pybind11/iostream.h new file mode 100644 index 0000000000000000000000000000000000000000..eaf92dfa49add54c298844b31898a82de3fb429d --- /dev/null +++ b/diffvg/pybind11/include/pybind11/iostream.h @@ -0,0 +1,209 @@ +/* + pybind11/iostream.h -- Tools to assist with redirecting cout and cerr to Python + + Copyright (c) 2017 Henry F. Schreiner + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" + +#include +#include +#include +#include +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +// Buffer that writes to Python instead of C++ +class pythonbuf : public std::streambuf { +private: + using traits_type = std::streambuf::traits_type; + + const size_t buf_size; + std::unique_ptr d_buffer; + object pywrite; + object pyflush; + + int overflow(int c) { + if (!traits_type::eq_int_type(c, traits_type::eof())) { + *pptr() = traits_type::to_char_type(c); + pbump(1); + } + return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof(); + } + + int sync() { + if (pbase() != pptr()) { + // This subtraction cannot be negative, so dropping the sign + str line(pbase(), static_cast(pptr() - pbase())); + + { + gil_scoped_acquire tmp; + pywrite(line); + pyflush(); + } + + setp(pbase(), epptr()); + } + return 0; + } + +public: + + pythonbuf(object pyostream, size_t buffer_size = 1024) + : buf_size(buffer_size), + d_buffer(new char[buf_size]), + pywrite(pyostream.attr("write")), + pyflush(pyostream.attr("flush")) { + setp(d_buffer.get(), d_buffer.get() + buf_size - 1); + } + + pythonbuf(pythonbuf&&) = default; + + /// Sync before destroy + ~pythonbuf() { + sync(); + } +}; + +PYBIND11_NAMESPACE_END(detail) + + +/** \rst + This a move-only guard that redirects output. + + .. code-block:: cpp + + #include + + ... + + { + py::scoped_ostream_redirect output; + std::cout << "Hello, World!"; // Python stdout + } // <-- return std::cout to normal + + You can explicitly pass the c++ stream and the python object, + for example to guard stderr instead. + + .. code-block:: cpp + + { + py::scoped_ostream_redirect output{std::cerr, py::module::import("sys").attr("stderr")}; + std::cerr << "Hello, World!"; + } + \endrst */ +class scoped_ostream_redirect { +protected: + std::streambuf *old; + std::ostream &costream; + detail::pythonbuf buffer; + +public: + scoped_ostream_redirect( + std::ostream &costream = std::cout, + object pyostream = module::import("sys").attr("stdout")) + : costream(costream), buffer(pyostream) { + old = costream.rdbuf(&buffer); + } + + ~scoped_ostream_redirect() { + costream.rdbuf(old); + } + + scoped_ostream_redirect(const scoped_ostream_redirect &) = delete; + scoped_ostream_redirect(scoped_ostream_redirect &&other) = default; + scoped_ostream_redirect &operator=(const scoped_ostream_redirect &) = delete; + scoped_ostream_redirect &operator=(scoped_ostream_redirect &&) = delete; +}; + + +/** \rst + Like `scoped_ostream_redirect`, but redirects cerr by default. This class + is provided primary to make ``py::call_guard`` easier to make. + + .. code-block:: cpp + + m.def("noisy_func", &noisy_func, + py::call_guard()); + +\endrst */ +class scoped_estream_redirect : public scoped_ostream_redirect { +public: + scoped_estream_redirect( + std::ostream &costream = std::cerr, + object pyostream = module::import("sys").attr("stderr")) + : scoped_ostream_redirect(costream,pyostream) {} +}; + + +PYBIND11_NAMESPACE_BEGIN(detail) + +// Class to redirect output as a context manager. C++ backend. +class OstreamRedirect { + bool do_stdout_; + bool do_stderr_; + std::unique_ptr redirect_stdout; + std::unique_ptr redirect_stderr; + +public: + OstreamRedirect(bool do_stdout = true, bool do_stderr = true) + : do_stdout_(do_stdout), do_stderr_(do_stderr) {} + + void enter() { + if (do_stdout_) + redirect_stdout.reset(new scoped_ostream_redirect()); + if (do_stderr_) + redirect_stderr.reset(new scoped_estream_redirect()); + } + + void exit() { + redirect_stdout.reset(); + redirect_stderr.reset(); + } +}; + +PYBIND11_NAMESPACE_END(detail) + +/** \rst + This is a helper function to add a C++ redirect context manager to Python + instead of using a C++ guard. To use it, add the following to your binding code: + + .. code-block:: cpp + + #include + + ... + + py::add_ostream_redirect(m, "ostream_redirect"); + + You now have a Python context manager that redirects your output: + + .. code-block:: python + + with m.ostream_redirect(): + m.print_to_cout_function() + + This manager can optionally be told which streams to operate on: + + .. code-block:: python + + with m.ostream_redirect(stdout=true, stderr=true): + m.noisy_function_with_error_printing() + + \endrst */ +inline class_ add_ostream_redirect(module m, std::string name = "ostream_redirect") { + return class_(m, name.c_str(), module_local()) + .def(init(), arg("stdout")=true, arg("stderr")=true) + .def("__enter__", &detail::OstreamRedirect::enter) + .def("__exit__", [](detail::OstreamRedirect &self_, args) { self_.exit(); }); +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/numpy.h b/diffvg/pybind11/include/pybind11/numpy.h new file mode 100644 index 0000000000000000000000000000000000000000..674450a631a49213a7fc83feed3a10e36934da61 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/numpy.h @@ -0,0 +1,1647 @@ +/* + pybind11/numpy.h: Basic NumPy support, vectorize() wrapper + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include "complex.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +#endif + +/* This will be true on all flat address space platforms and allows us to reduce the + whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size + and dimension types (e.g. shape, strides, indexing), instead of inflicting this + upon the library user. */ +static_assert(sizeof(ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t"); + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +class array; // Forward declaration + +PYBIND11_NAMESPACE_BEGIN(detail) + +template <> struct handle_type_name { static constexpr auto name = _("numpy.ndarray"); }; + +template struct npy_format_descriptor; + +struct PyArrayDescr_Proxy { + PyObject_HEAD + PyObject *typeobj; + char kind; + char type; + char byteorder; + char flags; + int type_num; + int elsize; + int alignment; + char *subarray; + PyObject *fields; + PyObject *names; +}; + +struct PyArray_Proxy { + PyObject_HEAD + char *data; + int nd; + ssize_t *dimensions; + ssize_t *strides; + PyObject *base; + PyObject *descr; + int flags; +}; + +struct PyVoidScalarObject_Proxy { + PyObject_VAR_HEAD + char *obval; + PyArrayDescr_Proxy *descr; + int flags; + PyObject *base; +}; + +struct numpy_type_info { + PyObject* dtype_ptr; + std::string format_str; +}; + +struct numpy_internals { + std::unordered_map registered_dtypes; + + numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) { + auto it = registered_dtypes.find(std::type_index(tinfo)); + if (it != registered_dtypes.end()) + return &(it->second); + if (throw_if_missing) + pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name()); + return nullptr; + } + + template numpy_type_info *get_type_info(bool throw_if_missing = true) { + return get_type_info(typeid(typename std::remove_cv::type), throw_if_missing); + } +}; + +inline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) { + ptr = &get_or_create_shared_data("_numpy_internals"); +} + +inline numpy_internals& get_numpy_internals() { + static numpy_internals* ptr = nullptr; + if (!ptr) + load_numpy_internals(ptr); + return *ptr; +} + +template struct same_size { + template using as = bool_constant; +}; + +template constexpr int platform_lookup() { return -1; } + +// Lookup a type according to its size, and return a value corresponding to the NumPy typenum. +template +constexpr int platform_lookup(int I, Ints... Is) { + return sizeof(Concrete) == sizeof(T) ? I : platform_lookup(Is...); +} + +struct npy_api { + enum constants { + NPY_ARRAY_C_CONTIGUOUS_ = 0x0001, + NPY_ARRAY_F_CONTIGUOUS_ = 0x0002, + NPY_ARRAY_OWNDATA_ = 0x0004, + NPY_ARRAY_FORCECAST_ = 0x0010, + NPY_ARRAY_ENSUREARRAY_ = 0x0040, + NPY_ARRAY_ALIGNED_ = 0x0100, + NPY_ARRAY_WRITEABLE_ = 0x0400, + NPY_BOOL_ = 0, + NPY_BYTE_, NPY_UBYTE_, + NPY_SHORT_, NPY_USHORT_, + NPY_INT_, NPY_UINT_, + NPY_LONG_, NPY_ULONG_, + NPY_LONGLONG_, NPY_ULONGLONG_, + NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_, + NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_, + NPY_OBJECT_ = 17, + NPY_STRING_, NPY_UNICODE_, NPY_VOID_, + // Platform-dependent normalization + NPY_INT8_ = NPY_BYTE_, + NPY_UINT8_ = NPY_UBYTE_, + NPY_INT16_ = NPY_SHORT_, + NPY_UINT16_ = NPY_USHORT_, + // `npy_common.h` defines the integer aliases. In order, it checks: + // NPY_BITSOF_LONG, NPY_BITSOF_LONGLONG, NPY_BITSOF_INT, NPY_BITSOF_SHORT, NPY_BITSOF_CHAR + // and assigns the alias to the first matching size, so we should check in this order. + NPY_INT32_ = platform_lookup( + NPY_LONG_, NPY_INT_, NPY_SHORT_), + NPY_UINT32_ = platform_lookup( + NPY_ULONG_, NPY_UINT_, NPY_USHORT_), + NPY_INT64_ = platform_lookup( + NPY_LONG_, NPY_LONGLONG_, NPY_INT_), + NPY_UINT64_ = platform_lookup( + NPY_ULONG_, NPY_ULONGLONG_, NPY_UINT_), + }; + + typedef struct { + Py_intptr_t *ptr; + int len; + } PyArray_Dims; + + static npy_api& get() { + static npy_api api = lookup(); + return api; + } + + bool PyArray_Check_(PyObject *obj) const { + return (bool) PyObject_TypeCheck(obj, PyArray_Type_); + } + bool PyArrayDescr_Check_(PyObject *obj) const { + return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_); + } + + unsigned int (*PyArray_GetNDArrayCFeatureVersion_)(); + PyObject *(*PyArray_DescrFromType_)(int); + PyObject *(*PyArray_NewFromDescr_) + (PyTypeObject *, PyObject *, int, Py_intptr_t const *, + Py_intptr_t const *, void *, int, PyObject *); + // Unused. Not removed because that affects ABI of the class. + PyObject *(*PyArray_DescrNewFromType_)(int); + int (*PyArray_CopyInto_)(PyObject *, PyObject *); + PyObject *(*PyArray_NewCopy_)(PyObject *, int); + PyTypeObject *PyArray_Type_; + PyTypeObject *PyVoidArrType_Type_; + PyTypeObject *PyArrayDescr_Type_; + PyObject *(*PyArray_DescrFromScalar_)(PyObject *); + PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *); + int (*PyArray_DescrConverter_) (PyObject *, PyObject **); + bool (*PyArray_EquivTypes_) (PyObject *, PyObject *); + int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, unsigned char, PyObject **, int *, + Py_intptr_t *, PyObject **, PyObject *); + PyObject *(*PyArray_Squeeze_)(PyObject *); + // Unused. Not removed because that affects ABI of the class. + int (*PyArray_SetBaseObject_)(PyObject *, PyObject *); + PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int); +private: + enum functions { + API_PyArray_GetNDArrayCFeatureVersion = 211, + API_PyArray_Type = 2, + API_PyArrayDescr_Type = 3, + API_PyVoidArrType_Type = 39, + API_PyArray_DescrFromType = 45, + API_PyArray_DescrFromScalar = 57, + API_PyArray_FromAny = 69, + API_PyArray_Resize = 80, + API_PyArray_CopyInto = 82, + API_PyArray_NewCopy = 85, + API_PyArray_NewFromDescr = 94, + API_PyArray_DescrNewFromType = 96, + API_PyArray_DescrConverter = 174, + API_PyArray_EquivTypes = 182, + API_PyArray_GetArrayParamsFromObject = 278, + API_PyArray_Squeeze = 136, + API_PyArray_SetBaseObject = 282 + }; + + static npy_api lookup() { + module m = module::import("numpy.core.multiarray"); + auto c = m.attr("_ARRAY_API"); +#if PY_MAJOR_VERSION >= 3 + void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL); +#else + void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr()); +#endif + npy_api api; +#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func]; + DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion); + if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7) + pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0"); + DECL_NPY_API(PyArray_Type); + DECL_NPY_API(PyVoidArrType_Type); + DECL_NPY_API(PyArrayDescr_Type); + DECL_NPY_API(PyArray_DescrFromType); + DECL_NPY_API(PyArray_DescrFromScalar); + DECL_NPY_API(PyArray_FromAny); + DECL_NPY_API(PyArray_Resize); + DECL_NPY_API(PyArray_CopyInto); + DECL_NPY_API(PyArray_NewCopy); + DECL_NPY_API(PyArray_NewFromDescr); + DECL_NPY_API(PyArray_DescrNewFromType); + DECL_NPY_API(PyArray_DescrConverter); + DECL_NPY_API(PyArray_EquivTypes); + DECL_NPY_API(PyArray_GetArrayParamsFromObject); + DECL_NPY_API(PyArray_Squeeze); + DECL_NPY_API(PyArray_SetBaseObject); +#undef DECL_NPY_API + return api; + } +}; + +inline PyArray_Proxy* array_proxy(void* ptr) { + return reinterpret_cast(ptr); +} + +inline const PyArray_Proxy* array_proxy(const void* ptr) { + return reinterpret_cast(ptr); +} + +inline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) { + return reinterpret_cast(ptr); +} + +inline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) { + return reinterpret_cast(ptr); +} + +inline bool check_flags(const void* ptr, int flag) { + return (flag == (array_proxy(ptr)->flags & flag)); +} + +template struct is_std_array : std::false_type { }; +template struct is_std_array> : std::true_type { }; +template struct is_complex : std::false_type { }; +template struct is_complex> : std::true_type { }; + +template struct array_info_scalar { + typedef T type; + static constexpr bool is_array = false; + static constexpr bool is_empty = false; + static constexpr auto extents = _(""); + static void append_extents(list& /* shape */) { } +}; +// Computes underlying type and a comma-separated list of extents for array +// types (any mix of std::array and built-in arrays). An array of char is +// treated as scalar because it gets special handling. +template struct array_info : array_info_scalar { }; +template struct array_info> { + using type = typename array_info::type; + static constexpr bool is_array = true; + static constexpr bool is_empty = (N == 0) || array_info::is_empty; + static constexpr size_t extent = N; + + // appends the extents to shape + static void append_extents(list& shape) { + shape.append(N); + array_info::append_extents(shape); + } + + static constexpr auto extents = _::is_array>( + concat(_(), array_info::extents), _() + ); +}; +// For numpy we have special handling for arrays of characters, so we don't include +// the size in the array extents. +template struct array_info : array_info_scalar { }; +template struct array_info> : array_info_scalar> { }; +template struct array_info : array_info> { }; +template using remove_all_extents_t = typename array_info::type; + +template using is_pod_struct = all_of< + std::is_standard_layout, // since we're accessing directly in memory we need a standard layout type +#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI) + // _GLIBCXX_USE_CXX11_ABI indicates that we're using libstdc++ from GCC 5 or newer, independent + // of the actual compiler (Clang can also use libstdc++, but it always defines __GNUC__ == 4). + std::is_trivially_copyable, +#else + // GCC 4 doesn't implement is_trivially_copyable, so approximate it + std::is_trivially_destructible, + satisfies_any_of, +#endif + satisfies_none_of +>; + +template ssize_t byte_offset_unsafe(const Strides &) { return 0; } +template +ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) { + return i * strides[Dim] + byte_offset_unsafe(strides, index...); +} + +/** + * Proxy class providing unsafe, unchecked const access to array data. This is constructed through + * the `unchecked()` method of `array` or the `unchecked()` method of `array_t`. `Dims` + * will be -1 for dimensions determined at runtime. + */ +template +class unchecked_reference { +protected: + static constexpr bool Dynamic = Dims < 0; + const unsigned char *data_; + // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to + // make large performance gains on big, nested loops, but requires compile-time dimensions + conditional_t> + shape_, strides_; + const ssize_t dims_; + + friend class pybind11::array; + // Constructor for compile-time dimensions: + template + unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t) + : data_{reinterpret_cast(data)}, dims_{Dims} { + for (size_t i = 0; i < (size_t) dims_; i++) { + shape_[i] = shape[i]; + strides_[i] = strides[i]; + } + } + // Constructor for runtime dimensions: + template + unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t dims) + : data_{reinterpret_cast(data)}, shape_{shape}, strides_{strides}, dims_{dims} {} + +public: + /** + * Unchecked const reference access to data at the given indices. For a compile-time known + * number of dimensions, this requires the correct number of arguments; for run-time + * dimensionality, this is not checked (and so is up to the caller to use safely). + */ + template const T &operator()(Ix... index) const { + static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic, + "Invalid number of indices for unchecked array reference"); + return *reinterpret_cast(data_ + byte_offset_unsafe(strides_, ssize_t(index)...)); + } + /** + * Unchecked const reference access to data; this operator only participates if the reference + * is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`. + */ + template > + const T &operator[](ssize_t index) const { return operator()(index); } + + /// Pointer access to the data at the given indices. + template const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); } + + /// Returns the item size, i.e. sizeof(T) + constexpr static ssize_t itemsize() { return sizeof(T); } + + /// Returns the shape (i.e. size) of dimension `dim` + ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; } + + /// Returns the number of dimensions of the array + ssize_t ndim() const { return dims_; } + + /// Returns the total number of elements in the referenced array, i.e. the product of the shapes + template + enable_if_t size() const { + return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies()); + } + template + enable_if_t size() const { + return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies()); + } + + /// Returns the total number of bytes used by the referenced data. Note that the actual span in + /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice). + ssize_t nbytes() const { + return size() * itemsize(); + } +}; + +template +class unchecked_mutable_reference : public unchecked_reference { + friend class pybind11::array; + using ConstBase = unchecked_reference; + using ConstBase::ConstBase; + using ConstBase::Dynamic; +public: + /// Mutable, unchecked access to data at the given indices. + template T& operator()(Ix... index) { + static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic, + "Invalid number of indices for unchecked array reference"); + return const_cast(ConstBase::operator()(index...)); + } + /** + * Mutable, unchecked access data at the given index; this operator only participates if the + * reference is to a 1-dimensional array (or has runtime dimensions). When present, this is + * exactly equivalent to `obj(index)`. + */ + template > + T &operator[](ssize_t index) { return operator()(index); } + + /// Mutable pointer access to the data at the given indices. + template T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); } +}; + +template +struct type_caster> { + static_assert(Dim == 0 && Dim > 0 /* always fail */, "unchecked array proxy object is not castable"); +}; +template +struct type_caster> : type_caster> {}; + +PYBIND11_NAMESPACE_END(detail) + +class dtype : public object { +public: + PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_); + + explicit dtype(const buffer_info &info) { + dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format))); + // If info.itemsize == 0, use the value calculated from the format string + m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr(); + } + + explicit dtype(const std::string &format) { + m_ptr = from_args(pybind11::str(format)).release().ptr(); + } + + dtype(const char *format) : dtype(std::string(format)) { } + + dtype(list names, list formats, list offsets, ssize_t itemsize) { + dict args; + args["names"] = names; + args["formats"] = formats; + args["offsets"] = offsets; + args["itemsize"] = pybind11::int_(itemsize); + m_ptr = from_args(args).release().ptr(); + } + + /// This is essentially the same as calling numpy.dtype(args) in Python. + static dtype from_args(object args) { + PyObject *ptr = nullptr; + if (!detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) || !ptr) + throw error_already_set(); + return reinterpret_steal(ptr); + } + + /// Return dtype associated with a C++ type. + template static dtype of() { + return detail::npy_format_descriptor::type>::dtype(); + } + + /// Size of the data type in bytes. + ssize_t itemsize() const { + return detail::array_descriptor_proxy(m_ptr)->elsize; + } + + /// Returns true for structured data types. + bool has_fields() const { + return detail::array_descriptor_proxy(m_ptr)->names != nullptr; + } + + /// Single-character type code. + char kind() const { + return detail::array_descriptor_proxy(m_ptr)->kind; + } + +private: + static object _dtype_from_pep3118() { + static PyObject *obj = module::import("numpy.core._internal") + .attr("_dtype_from_pep3118").cast().release().ptr(); + return reinterpret_borrow(obj); + } + + dtype strip_padding(ssize_t itemsize) { + // Recursively strip all void fields with empty names that are generated for + // padding fields (as of NumPy v1.11). + if (!has_fields()) + return *this; + + struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; }; + std::vector field_descriptors; + + for (auto field : attr("fields").attr("items")()) { + auto spec = field.cast(); + auto name = spec[0].cast(); + auto format = spec[1].cast()[0].cast(); + auto offset = spec[1].cast()[1].cast(); + if (!len(name) && format.kind() == 'V') + continue; + field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset}); + } + + std::sort(field_descriptors.begin(), field_descriptors.end(), + [](const field_descr& a, const field_descr& b) { + return a.offset.cast() < b.offset.cast(); + }); + + list names, formats, offsets; + for (auto& descr : field_descriptors) { + names.append(descr.name); + formats.append(descr.format); + offsets.append(descr.offset); + } + return dtype(names, formats, offsets, itemsize); + } +}; + +class array : public buffer { +public: + PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array) + + enum { + c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_, + f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_, + forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_ + }; + + array() : array(0, static_cast(nullptr)) {} + + using ShapeContainer = detail::any_container; + using StridesContainer = detail::any_container; + + // Constructs an array taking shape/strides from arbitrary container types + array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides, + const void *ptr = nullptr, handle base = handle()) { + + if (strides->empty()) + *strides = c_strides(*shape, dt.itemsize()); + + auto ndim = shape->size(); + if (ndim != strides->size()) + pybind11_fail("NumPy: shape ndim doesn't match strides ndim"); + auto descr = dt; + + int flags = 0; + if (base && ptr) { + if (isinstance(base)) + /* Copy flags from base (except ownership bit) */ + flags = reinterpret_borrow(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_; + else + /* Writable by default, easy to downgrade later on if needed */ + flags = detail::npy_api::NPY_ARRAY_WRITEABLE_; + } + + auto &api = detail::npy_api::get(); + auto tmp = reinterpret_steal(api.PyArray_NewFromDescr_( + api.PyArray_Type_, descr.release().ptr(), (int) ndim, shape->data(), strides->data(), + const_cast(ptr), flags, nullptr)); + if (!tmp) + throw error_already_set(); + if (ptr) { + if (base) { + api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr()); + } else { + tmp = reinterpret_steal(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */)); + } + } + m_ptr = tmp.release().ptr(); + } + + array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle()) + : array(dt, std::move(shape), {}, ptr, base) { } + + template ::value && !std::is_same::value>> + array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle()) + : array(dt, {{count}}, ptr, base) { } + + template + array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle()) + : array(pybind11::dtype::of(), std::move(shape), std::move(strides), ptr, base) { } + + template + array(ShapeContainer shape, const T *ptr, handle base = handle()) + : array(std::move(shape), {}, ptr, base) { } + + template + explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { } + + explicit array(const buffer_info &info, handle base = handle()) + : array(pybind11::dtype(info), info.shape, info.strides, info.ptr, base) { } + + /// Array descriptor (dtype) + pybind11::dtype dtype() const { + return reinterpret_borrow(detail::array_proxy(m_ptr)->descr); + } + + /// Total number of elements + ssize_t size() const { + return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies()); + } + + /// Byte size of a single element + ssize_t itemsize() const { + return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize; + } + + /// Total number of bytes + ssize_t nbytes() const { + return size() * itemsize(); + } + + /// Number of dimensions + ssize_t ndim() const { + return detail::array_proxy(m_ptr)->nd; + } + + /// Base object + object base() const { + return reinterpret_borrow(detail::array_proxy(m_ptr)->base); + } + + /// Dimensions of the array + const ssize_t* shape() const { + return detail::array_proxy(m_ptr)->dimensions; + } + + /// Dimension along a given axis + ssize_t shape(ssize_t dim) const { + if (dim >= ndim()) + fail_dim_check(dim, "invalid axis"); + return shape()[dim]; + } + + /// Strides of the array + const ssize_t* strides() const { + return detail::array_proxy(m_ptr)->strides; + } + + /// Stride along a given axis + ssize_t strides(ssize_t dim) const { + if (dim >= ndim()) + fail_dim_check(dim, "invalid axis"); + return strides()[dim]; + } + + /// Return the NumPy array flags + int flags() const { + return detail::array_proxy(m_ptr)->flags; + } + + /// If set, the array is writeable (otherwise the buffer is read-only) + bool writeable() const { + return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_); + } + + /// If set, the array owns the data (will be freed when the array is deleted) + bool owndata() const { + return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_); + } + + /// Pointer to the contained data. If index is not provided, points to the + /// beginning of the buffer. May throw if the index would lead to out of bounds access. + template const void* data(Ix... index) const { + return static_cast(detail::array_proxy(m_ptr)->data + offset_at(index...)); + } + + /// Mutable pointer to the contained data. If index is not provided, points to the + /// beginning of the buffer. May throw if the index would lead to out of bounds access. + /// May throw if the array is not writeable. + template void* mutable_data(Ix... index) { + check_writeable(); + return static_cast(detail::array_proxy(m_ptr)->data + offset_at(index...)); + } + + /// Byte offset from beginning of the array to a given index (full or partial). + /// May throw if the index would lead to out of bounds access. + template ssize_t offset_at(Ix... index) const { + if ((ssize_t) sizeof...(index) > ndim()) + fail_dim_check(sizeof...(index), "too many indices for an array"); + return byte_offset(ssize_t(index)...); + } + + ssize_t offset_at() const { return 0; } + + /// Item count from beginning of the array to a given index (full or partial). + /// May throw if the index would lead to out of bounds access. + template ssize_t index_at(Ix... index) const { + return offset_at(index...) / itemsize(); + } + + /** + * Returns a proxy object that provides access to the array's data without bounds or + * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with + * care: the array must not be destroyed or reshaped for the duration of the returned object, + * and the caller must take care not to access invalid dimensions or dimension indices. + */ + template detail::unchecked_mutable_reference mutable_unchecked() & { + if (Dims >= 0 && ndim() != Dims) + throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) + + "; expected " + std::to_string(Dims)); + return detail::unchecked_mutable_reference(mutable_data(), shape(), strides(), ndim()); + } + + /** + * Returns a proxy object that provides const access to the array's data without bounds or + * dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the + * underlying array have the `writable` flag. Use with care: the array must not be destroyed or + * reshaped for the duration of the returned object, and the caller must take care not to access + * invalid dimensions or dimension indices. + */ + template detail::unchecked_reference unchecked() const & { + if (Dims >= 0 && ndim() != Dims) + throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) + + "; expected " + std::to_string(Dims)); + return detail::unchecked_reference(data(), shape(), strides(), ndim()); + } + + /// Return a new view with all of the dimensions of length 1 removed + array squeeze() { + auto& api = detail::npy_api::get(); + return reinterpret_steal(api.PyArray_Squeeze_(m_ptr)); + } + + /// Resize array to given shape + /// If refcheck is true and more that one reference exist to this array + /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change + void resize(ShapeContainer new_shape, bool refcheck = true) { + detail::npy_api::PyArray_Dims d = { + new_shape->data(), int(new_shape->size()) + }; + // try to resize, set ordering param to -1 cause it's not used anyway + object new_array = reinterpret_steal( + detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1) + ); + if (!new_array) throw error_already_set(); + if (isinstance(new_array)) { *this = std::move(new_array); } + } + + /// Ensure that the argument is a NumPy array + /// In case of an error, nullptr is returned and the Python error is cleared. + static array ensure(handle h, int ExtraFlags = 0) { + auto result = reinterpret_steal(raw_array(h.ptr(), ExtraFlags)); + if (!result) + PyErr_Clear(); + return result; + } + +protected: + template friend struct detail::npy_format_descriptor; + + void fail_dim_check(ssize_t dim, const std::string& msg) const { + throw index_error(msg + ": " + std::to_string(dim) + + " (ndim = " + std::to_string(ndim()) + ")"); + } + + template ssize_t byte_offset(Ix... index) const { + check_dimensions(index...); + return detail::byte_offset_unsafe(strides(), ssize_t(index)...); + } + + void check_writeable() const { + if (!writeable()) + throw std::domain_error("array is not writeable"); + } + + // Default, C-style strides + static std::vector c_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + if (ndim > 0) + for (size_t i = ndim - 1; i > 0; --i) + strides[i - 1] = strides[i] * shape[i]; + return strides; + } + + // F-style strides; default when constructing an array_t with `ExtraFlags & f_style` + static std::vector f_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + for (size_t i = 1; i < ndim; ++i) + strides[i] = strides[i - 1] * shape[i - 1]; + return strides; + } + + template void check_dimensions(Ix... index) const { + check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...); + } + + void check_dimensions_impl(ssize_t, const ssize_t*) const { } + + template void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const { + if (i >= *shape) { + throw index_error(std::string("index ") + std::to_string(i) + + " is out of bounds for axis " + std::to_string(axis) + + " with size " + std::to_string(*shape)); + } + check_dimensions_impl(axis + 1, shape + 1, index...); + } + + /// Create array from any object -- always returns a new reference + static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) { + if (ptr == nullptr) { + PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr"); + return nullptr; + } + return detail::npy_api::get().PyArray_FromAny_( + ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr); + } +}; + +template class array_t : public array { +private: + struct private_ctor {}; + // Delegating constructor needed when both moving and accessing in the same constructor + array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base) + : array(std::move(shape), std::move(strides), ptr, base) {} +public: + static_assert(!detail::array_info::is_array, "Array types cannot be used with array_t"); + + using value_type = T; + + array_t() : array(0, static_cast(nullptr)) {} + array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { } + array_t(handle h, stolen_t) : array(h, stolen_t{}) { } + + PYBIND11_DEPRECATED("Use array_t::ensure() instead") + array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) { + if (!m_ptr) PyErr_Clear(); + if (!is_borrowed) Py_XDECREF(h.ptr()); + } + + array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) { + if (!m_ptr) throw error_already_set(); + } + + explicit array_t(const buffer_info& info, handle base = handle()) : array(info, base) { } + + array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle()) + : array(std::move(shape), std::move(strides), ptr, base) { } + + explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle()) + : array_t(private_ctor{}, std::move(shape), + ExtraFlags & f_style ? f_strides(*shape, itemsize()) : c_strides(*shape, itemsize()), + ptr, base) { } + + explicit array_t(ssize_t count, const T *ptr = nullptr, handle base = handle()) + : array({count}, {}, ptr, base) { } + + constexpr ssize_t itemsize() const { + return sizeof(T); + } + + template ssize_t index_at(Ix... index) const { + return offset_at(index...) / itemsize(); + } + + template const T* data(Ix... index) const { + return static_cast(array::data(index...)); + } + + template T* mutable_data(Ix... index) { + return static_cast(array::mutable_data(index...)); + } + + // Reference to element at a given index + template const T& at(Ix... index) const { + if ((ssize_t) sizeof...(index) != ndim()) + fail_dim_check(sizeof...(index), "index dimension mismatch"); + return *(static_cast(array::data()) + byte_offset(ssize_t(index)...) / itemsize()); + } + + // Mutable reference to element at a given index + template T& mutable_at(Ix... index) { + if ((ssize_t) sizeof...(index) != ndim()) + fail_dim_check(sizeof...(index), "index dimension mismatch"); + return *(static_cast(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize()); + } + + /** + * Returns a proxy object that provides access to the array's data without bounds or + * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with + * care: the array must not be destroyed or reshaped for the duration of the returned object, + * and the caller must take care not to access invalid dimensions or dimension indices. + */ + template detail::unchecked_mutable_reference mutable_unchecked() & { + return array::mutable_unchecked(); + } + + /** + * Returns a proxy object that provides const access to the array's data without bounds or + * dimensionality checking. Unlike `unchecked()`, this does not require that the underlying + * array have the `writable` flag. Use with care: the array must not be destroyed or reshaped + * for the duration of the returned object, and the caller must take care not to access invalid + * dimensions or dimension indices. + */ + template detail::unchecked_reference unchecked() const & { + return array::unchecked(); + } + + /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert + /// it). In case of an error, nullptr is returned and the Python error is cleared. + static array_t ensure(handle h) { + auto result = reinterpret_steal(raw_array_t(h.ptr())); + if (!result) + PyErr_Clear(); + return result; + } + + static bool check_(handle h) { + const auto &api = detail::npy_api::get(); + return api.PyArray_Check_(h.ptr()) + && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of().ptr()); + } + +protected: + /// Create array from any object -- always returns a new reference + static PyObject *raw_array_t(PyObject *ptr) { + if (ptr == nullptr) { + PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr"); + return nullptr; + } + return detail::npy_api::get().PyArray_FromAny_( + ptr, dtype::of().release().ptr(), 0, 0, + detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr); + } +}; + +template +struct format_descriptor::value>> { + static std::string format() { + return detail::npy_format_descriptor::type>::format(); + } +}; + +template struct format_descriptor { + static std::string format() { return std::to_string(N) + "s"; } +}; +template struct format_descriptor> { + static std::string format() { return std::to_string(N) + "s"; } +}; + +template +struct format_descriptor::value>> { + static std::string format() { + return format_descriptor< + typename std::remove_cv::type>::type>::format(); + } +}; + +template +struct format_descriptor::is_array>> { + static std::string format() { + using namespace detail; + static constexpr auto extents = _("(") + array_info::extents + _(")"); + return extents.text + format_descriptor>::format(); + } +}; + +PYBIND11_NAMESPACE_BEGIN(detail) +template +struct pyobject_caster> { + using type = array_t; + + bool load(handle src, bool convert) { + if (!convert && !type::check_(src)) + return false; + value = type::ensure(src); + return static_cast(value); + } + + static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) { + return src.inc_ref(); + } + PYBIND11_TYPE_CASTER(type, handle_type_name::name); +}; + +template +struct compare_buffer_info::value>> { + static bool compare(const buffer_info& b) { + return npy_api::get().PyArray_EquivTypes_(dtype::of().ptr(), dtype(b).ptr()); + } +}; + +template +struct npy_format_descriptor_name; + +template +struct npy_format_descriptor_name::value>> { + static constexpr auto name = _::value>( + _("bool"), _::value>("numpy.int", "numpy.uint") + _() + ); +}; + +template +struct npy_format_descriptor_name::value>> { + static constexpr auto name = _::value || std::is_same::value>( + _("numpy.float") + _(), _("numpy.longdouble") + ); +}; + +template +struct npy_format_descriptor_name::value>> { + static constexpr auto name = _::value + || std::is_same::value>( + _("numpy.complex") + _(), _("numpy.longcomplex") + ); +}; + +template +struct npy_format_descriptor::value>> + : npy_format_descriptor_name { +private: + // NB: the order here must match the one in common.h + constexpr static const int values[15] = { + npy_api::NPY_BOOL_, + npy_api::NPY_BYTE_, npy_api::NPY_UBYTE_, npy_api::NPY_INT16_, npy_api::NPY_UINT16_, + npy_api::NPY_INT32_, npy_api::NPY_UINT32_, npy_api::NPY_INT64_, npy_api::NPY_UINT64_, + npy_api::NPY_FLOAT_, npy_api::NPY_DOUBLE_, npy_api::NPY_LONGDOUBLE_, + npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_ + }; + +public: + static constexpr int value = values[detail::is_fmt_numeric::index]; + + static pybind11::dtype dtype() { + if (auto ptr = npy_api::get().PyArray_DescrFromType_(value)) + return reinterpret_steal(ptr); + pybind11_fail("Unsupported buffer format!"); + } +}; + +#define PYBIND11_DECL_CHAR_FMT \ + static constexpr auto name = _("S") + _(); \ + static pybind11::dtype dtype() { return pybind11::dtype(std::string("S") + std::to_string(N)); } +template struct npy_format_descriptor { PYBIND11_DECL_CHAR_FMT }; +template struct npy_format_descriptor> { PYBIND11_DECL_CHAR_FMT }; +#undef PYBIND11_DECL_CHAR_FMT + +template struct npy_format_descriptor::is_array>> { +private: + using base_descr = npy_format_descriptor::type>; +public: + static_assert(!array_info::is_empty, "Zero-sized arrays are not supported"); + + static constexpr auto name = _("(") + array_info::extents + _(")") + base_descr::name; + static pybind11::dtype dtype() { + list shape; + array_info::append_extents(shape); + return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape)); + } +}; + +template struct npy_format_descriptor::value>> { +private: + using base_descr = npy_format_descriptor::type>; +public: + static constexpr auto name = base_descr::name; + static pybind11::dtype dtype() { return base_descr::dtype(); } +}; + +struct field_descriptor { + const char *name; + ssize_t offset; + ssize_t size; + std::string format; + dtype descr; +}; + +inline PYBIND11_NOINLINE void register_structured_dtype( + any_container fields, + const std::type_info& tinfo, ssize_t itemsize, + bool (*direct_converter)(PyObject *, void *&)) { + + auto& numpy_internals = get_numpy_internals(); + if (numpy_internals.get_type_info(tinfo, false)) + pybind11_fail("NumPy: dtype is already registered"); + + // Use ordered fields because order matters as of NumPy 1.14: + // https://docs.scipy.org/doc/numpy/release.html#multiple-field-indexing-assignment-of-structured-arrays + std::vector ordered_fields(std::move(fields)); + std::sort(ordered_fields.begin(), ordered_fields.end(), + [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; }); + + list names, formats, offsets; + for (auto& field : ordered_fields) { + if (!field.descr) + pybind11_fail(std::string("NumPy: unsupported field dtype: `") + + field.name + "` @ " + tinfo.name()); + names.append(PYBIND11_STR_TYPE(field.name)); + formats.append(field.descr); + offsets.append(pybind11::int_(field.offset)); + } + auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr(); + + // There is an existing bug in NumPy (as of v1.11): trailing bytes are + // not encoded explicitly into the format string. This will supposedly + // get fixed in v1.12; for further details, see these: + // - https://github.com/numpy/numpy/issues/7797 + // - https://github.com/numpy/numpy/pull/7798 + // Because of this, we won't use numpy's logic to generate buffer format + // strings and will just do it ourselves. + ssize_t offset = 0; + std::ostringstream oss; + // mark the structure as unaligned with '^', because numpy and C++ don't + // always agree about alignment (particularly for complex), and we're + // explicitly listing all our padding. This depends on none of the fields + // overriding the endianness. Putting the ^ in front of individual fields + // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049 + oss << "^T{"; + for (auto& field : ordered_fields) { + if (field.offset > offset) + oss << (field.offset - offset) << 'x'; + oss << field.format << ':' << field.name << ':'; + offset = field.offset + field.size; + } + if (itemsize > offset) + oss << (itemsize - offset) << 'x'; + oss << '}'; + auto format_str = oss.str(); + + // Sanity check: verify that NumPy properly parses our buffer format string + auto& api = npy_api::get(); + auto arr = array(buffer_info(nullptr, itemsize, format_str, 1)); + if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr())) + pybind11_fail("NumPy: invalid buffer descriptor!"); + + auto tindex = std::type_index(tinfo); + numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str }; + get_internals().direct_conversions[tindex].push_back(direct_converter); +} + +template struct npy_format_descriptor { + static_assert(is_pod_struct::value, "Attempt to use a non-POD or unimplemented POD type as a numpy dtype"); + + static constexpr auto name = make_caster::name; + + static pybind11::dtype dtype() { + return reinterpret_borrow(dtype_ptr()); + } + + static std::string format() { + static auto format_str = get_numpy_internals().get_type_info(true)->format_str; + return format_str; + } + + static void register_dtype(any_container fields) { + register_structured_dtype(std::move(fields), typeid(typename std::remove_cv::type), + sizeof(T), &direct_converter); + } + +private: + static PyObject* dtype_ptr() { + static PyObject* ptr = get_numpy_internals().get_type_info(true)->dtype_ptr; + return ptr; + } + + static bool direct_converter(PyObject *obj, void*& value) { + auto& api = npy_api::get(); + if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_)) + return false; + if (auto descr = reinterpret_steal(api.PyArray_DescrFromScalar_(obj))) { + if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) { + value = ((PyVoidScalarObject_Proxy *) obj)->obval; + return true; + } + } + return false; + } +}; + +#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code) +# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0) +# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0) +#else + +#define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \ + ::pybind11::detail::field_descriptor { \ + Name, offsetof(T, Field), sizeof(decltype(std::declval().Field)), \ + ::pybind11::format_descriptor().Field)>::format(), \ + ::pybind11::detail::npy_format_descriptor().Field)>::dtype() \ + } + +// Extract name, offset and format descriptor for a struct field +#define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field) + +// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro +// (C) William Swanson, Paul Fultz +#define PYBIND11_EVAL0(...) __VA_ARGS__ +#define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__))) +#define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__))) +#define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__))) +#define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__))) +#define PYBIND11_EVAL(...) PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__))) +#define PYBIND11_MAP_END(...) +#define PYBIND11_MAP_OUT +#define PYBIND11_MAP_COMMA , +#define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END +#define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT +#define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0) +#define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next) +#if defined(_MSC_VER) && !defined(__clang__) // MSVC is not as eager to expand macros, hence this workaround +#define PYBIND11_MAP_LIST_NEXT1(test, next) \ + PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)) +#else +#define PYBIND11_MAP_LIST_NEXT1(test, next) \ + PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0) +#endif +#define PYBIND11_MAP_LIST_NEXT(test, next) \ + PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next) +#define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \ + f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__) +#define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \ + f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__) +// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ... +#define PYBIND11_MAP_LIST(f, t, ...) \ + PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0)) + +#define PYBIND11_NUMPY_DTYPE(Type, ...) \ + ::pybind11::detail::npy_format_descriptor::register_dtype \ + (::std::vector<::pybind11::detail::field_descriptor> \ + {PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)}) + +#if defined(_MSC_VER) && !defined(__clang__) +#define PYBIND11_MAP2_LIST_NEXT1(test, next) \ + PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)) +#else +#define PYBIND11_MAP2_LIST_NEXT1(test, next) \ + PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0) +#endif +#define PYBIND11_MAP2_LIST_NEXT(test, next) \ + PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next) +#define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \ + f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__) +#define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \ + f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__) +// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ... +#define PYBIND11_MAP2_LIST(f, t, ...) \ + PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0)) + +#define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \ + ::pybind11::detail::npy_format_descriptor::register_dtype \ + (::std::vector<::pybind11::detail::field_descriptor> \ + {PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)}) + +#endif // __CLION_IDE__ + +template +using array_iterator = typename std::add_pointer::type; + +template +array_iterator array_begin(const buffer_info& buffer) { + return array_iterator(reinterpret_cast(buffer.ptr)); +} + +template +array_iterator array_end(const buffer_info& buffer) { + return array_iterator(reinterpret_cast(buffer.ptr) + buffer.size); +} + +class common_iterator { +public: + using container_type = std::vector; + using value_type = container_type::value_type; + using size_type = container_type::size_type; + + common_iterator() : p_ptr(0), m_strides() {} + + common_iterator(void* ptr, const container_type& strides, const container_type& shape) + : p_ptr(reinterpret_cast(ptr)), m_strides(strides.size()) { + m_strides.back() = static_cast(strides.back()); + for (size_type i = m_strides.size() - 1; i != 0; --i) { + size_type j = i - 1; + value_type s = static_cast(shape[i]); + m_strides[j] = strides[j] + m_strides[i] - strides[i] * s; + } + } + + void increment(size_type dim) { + p_ptr += m_strides[dim]; + } + + void* data() const { + return p_ptr; + } + +private: + char* p_ptr; + container_type m_strides; +}; + +template class multi_array_iterator { +public: + using container_type = std::vector; + + multi_array_iterator(const std::array &buffers, + const container_type &shape) + : m_shape(shape.size()), m_index(shape.size(), 0), + m_common_iterator() { + + // Manual copy to avoid conversion warning if using std::copy + for (size_t i = 0; i < shape.size(); ++i) + m_shape[i] = shape[i]; + + container_type strides(shape.size()); + for (size_t i = 0; i < N; ++i) + init_common_iterator(buffers[i], shape, m_common_iterator[i], strides); + } + + multi_array_iterator& operator++() { + for (size_t j = m_index.size(); j != 0; --j) { + size_t i = j - 1; + if (++m_index[i] != m_shape[i]) { + increment_common_iterator(i); + break; + } else { + m_index[i] = 0; + } + } + return *this; + } + + template T* data() const { + return reinterpret_cast(m_common_iterator[K].data()); + } + +private: + + using common_iter = common_iterator; + + void init_common_iterator(const buffer_info &buffer, + const container_type &shape, + common_iter &iterator, + container_type &strides) { + auto buffer_shape_iter = buffer.shape.rbegin(); + auto buffer_strides_iter = buffer.strides.rbegin(); + auto shape_iter = shape.rbegin(); + auto strides_iter = strides.rbegin(); + + while (buffer_shape_iter != buffer.shape.rend()) { + if (*shape_iter == *buffer_shape_iter) + *strides_iter = *buffer_strides_iter; + else + *strides_iter = 0; + + ++buffer_shape_iter; + ++buffer_strides_iter; + ++shape_iter; + ++strides_iter; + } + + std::fill(strides_iter, strides.rend(), 0); + iterator = common_iter(buffer.ptr, strides, shape); + } + + void increment_common_iterator(size_t dim) { + for (auto &iter : m_common_iterator) + iter.increment(dim); + } + + container_type m_shape; + container_type m_index; + std::array m_common_iterator; +}; + +enum class broadcast_trivial { non_trivial, c_trivial, f_trivial }; + +// Populates the shape and number of dimensions for the set of buffers. Returns a broadcast_trivial +// enum value indicating whether the broadcast is "trivial"--that is, has each buffer being either a +// singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage +// buffer; returns `non_trivial` otherwise. +template +broadcast_trivial broadcast(const std::array &buffers, ssize_t &ndim, std::vector &shape) { + ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) { + return std::max(res, buf.ndim); + }); + + shape.clear(); + shape.resize((size_t) ndim, 1); + + // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or + // the full size). + for (size_t i = 0; i < N; ++i) { + auto res_iter = shape.rbegin(); + auto end = buffers[i].shape.rend(); + for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) { + const auto &dim_size_in = *shape_iter; + auto &dim_size_out = *res_iter; + + // Each input dimension can either be 1 or `n`, but `n` values must match across buffers + if (dim_size_out == 1) + dim_size_out = dim_size_in; + else if (dim_size_in != 1 && dim_size_in != dim_size_out) + pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!"); + } + } + + bool trivial_broadcast_c = true; + bool trivial_broadcast_f = true; + for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) { + if (buffers[i].size == 1) + continue; + + // Require the same number of dimensions: + if (buffers[i].ndim != ndim) + return broadcast_trivial::non_trivial; + + // Require all dimensions be full-size: + if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin())) + return broadcast_trivial::non_trivial; + + // Check for C contiguity (but only if previous inputs were also C contiguous) + if (trivial_broadcast_c) { + ssize_t expect_stride = buffers[i].itemsize; + auto end = buffers[i].shape.crend(); + for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin(); + trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) { + if (expect_stride == *stride_iter) + expect_stride *= *shape_iter; + else + trivial_broadcast_c = false; + } + } + + // Check for Fortran contiguity (if previous inputs were also F contiguous) + if (trivial_broadcast_f) { + ssize_t expect_stride = buffers[i].itemsize; + auto end = buffers[i].shape.cend(); + for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin(); + trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) { + if (expect_stride == *stride_iter) + expect_stride *= *shape_iter; + else + trivial_broadcast_f = false; + } + } + } + + return + trivial_broadcast_c ? broadcast_trivial::c_trivial : + trivial_broadcast_f ? broadcast_trivial::f_trivial : + broadcast_trivial::non_trivial; +} + +template +struct vectorize_arg { + static_assert(!std::is_rvalue_reference::value, "Functions with rvalue reference arguments cannot be vectorized"); + // The wrapped function gets called with this type: + using call_type = remove_reference_t; + // Is this a vectorized argument? + static constexpr bool vectorize = + satisfies_any_of::value && + satisfies_none_of::value && + (!std::is_reference::value || + (std::is_lvalue_reference::value && std::is_const::value)); + // Accept this type: an array for vectorized types, otherwise the type as-is: + using type = conditional_t, array::forcecast>, T>; +}; + +template +struct vectorize_helper { +private: + static constexpr size_t N = sizeof...(Args); + static constexpr size_t NVectorized = constexpr_sum(vectorize_arg::vectorize...); + static_assert(NVectorized >= 1, + "pybind11::vectorize(...) requires a function with at least one vectorizable argument"); + +public: + template + explicit vectorize_helper(T &&f) : f(std::forward(f)) { } + + object operator()(typename vectorize_arg::type... args) { + return run(args..., + make_index_sequence(), + select_indices::vectorize...>(), + make_index_sequence()); + } + +private: + remove_reference_t f; + + // Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling with "/permissive-" flag + // when arg_call_types is manually inlined. + using arg_call_types = std::tuple::call_type...>; + template using param_n_t = typename std::tuple_element::type; + + // Runs a vectorized function given arguments tuple and three index sequences: + // - Index is the full set of 0 ... (N-1) argument indices; + // - VIndex is the subset of argument indices with vectorized parameters, letting us access + // vectorized arguments (anything not in this sequence is passed through) + // - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that + // we can store vectorized buffer_infos in an array (argument VIndex has its buffer at + // index BIndex in the array). + template object run( + typename vectorize_arg::type &...args, + index_sequence i_seq, index_sequence vi_seq, index_sequence bi_seq) { + + // Pointers to values the function was called with; the vectorized ones set here will start + // out as array_t pointers, but they will be changed them to T pointers before we make + // call the wrapped function. Non-vectorized pointers are left as-is. + std::array params{{ &args... }}; + + // The array of `buffer_info`s of vectorized arguments: + std::array buffers{{ reinterpret_cast(params[VIndex])->request()... }}; + + /* Determine dimensions parameters of output array */ + ssize_t nd = 0; + std::vector shape(0); + auto trivial = broadcast(buffers, nd, shape); + size_t ndim = (size_t) nd; + + size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies()); + + // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e. + // not wrapped in an array). + if (size == 1 && ndim == 0) { + PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr); + return cast(f(*reinterpret_cast *>(params[Index])...)); + } + + array_t result; + if (trivial == broadcast_trivial::f_trivial) result = array_t(shape); + else result = array_t(shape); + + if (size == 0) return std::move(result); + + /* Call the function */ + if (trivial == broadcast_trivial::non_trivial) + apply_broadcast(buffers, params, result, i_seq, vi_seq, bi_seq); + else + apply_trivial(buffers, params, result.mutable_data(), size, i_seq, vi_seq, bi_seq); + + return std::move(result); + } + + template + void apply_trivial(std::array &buffers, + std::array ¶ms, + Return *out, + size_t size, + index_sequence, index_sequence, index_sequence) { + + // Initialize an array of mutable byte references and sizes with references set to the + // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size + // (except for singletons, which get an increment of 0). + std::array, NVectorized> vecparams{{ + std::pair( + reinterpret_cast(params[VIndex] = buffers[BIndex].ptr), + buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t) + )... + }}; + + for (size_t i = 0; i < size; ++i) { + out[i] = f(*reinterpret_cast *>(params[Index])...); + for (auto &x : vecparams) x.first += x.second; + } + } + + template + void apply_broadcast(std::array &buffers, + std::array ¶ms, + array_t &output_array, + index_sequence, index_sequence, index_sequence) { + + buffer_info output = output_array.request(); + multi_array_iterator input_iter(buffers, output.shape); + + for (array_iterator iter = array_begin(output), end = array_end(output); + iter != end; + ++iter, ++input_iter) { + PYBIND11_EXPAND_SIDE_EFFECTS(( + params[VIndex] = input_iter.template data() + )); + *iter = f(*reinterpret_cast *>(std::get(params))...); + } + } +}; + +template +vectorize_helper +vectorize_extractor(const Func &f, Return (*) (Args ...)) { + return detail::vectorize_helper(f); +} + +template struct handle_type_name> { + static constexpr auto name = _("numpy.ndarray[") + npy_format_descriptor::name + _("]"); +}; + +PYBIND11_NAMESPACE_END(detail) + +// Vanilla pointer vectorizer: +template +detail::vectorize_helper +vectorize(Return (*f) (Args ...)) { + return detail::vectorize_helper(f); +} + +// lambda vectorizer: +template ::value, int> = 0> +auto vectorize(Func &&f) -> decltype( + detail::vectorize_extractor(std::forward(f), (detail::function_signature_t *) nullptr)) { + return detail::vectorize_extractor(std::forward(f), (detail::function_signature_t *) nullptr); +} + +// Vectorize a class method (non-const): +template ())), Return, Class *, Args...>> +Helper vectorize(Return (Class::*f)(Args...)) { + return Helper(std::mem_fn(f)); +} + +// Vectorize a class method (const): +template ())), Return, const Class *, Args...>> +Helper vectorize(Return (Class::*f)(Args...) const) { + return Helper(std::mem_fn(f)); +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/diffvg/pybind11/include/pybind11/operators.h b/diffvg/pybind11/include/pybind11/operators.h new file mode 100644 index 0000000000000000000000000000000000000000..086cb4cfd838797767dd3b3caa275bdb4348fc8e --- /dev/null +++ b/diffvg/pybind11/include/pybind11/operators.h @@ -0,0 +1,173 @@ +/* + pybind11/operator.h: Metatemplates for operator overloading + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" + +#if defined(__clang__) && !defined(__INTEL_COMPILER) +# pragma clang diagnostic ignored "-Wunsequenced" // multiple unsequenced modifications to 'self' (when using def(py::self OP Type())) +#elif defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +/// Enumeration with all supported operator types +enum op_id : int { + op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift, + op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert, + op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le, + op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift, + op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero, + op_repr, op_truediv, op_itruediv, op_hash +}; + +enum op_type : int { + op_l, /* base type on left */ + op_r, /* base type on right */ + op_u /* unary operator */ +}; + +struct self_t { }; +static const self_t self = self_t(); + +/// Type for an unused type slot +struct undefined_t { }; + +/// Don't warn about an unused variable +inline self_t __self() { return self; } + +/// base template of operator implementations +template struct op_impl { }; + +/// Operator implementation generator +template struct op_ { + template void execute(Class &cl, const Extra&... extra) const { + using Base = typename Class::type; + using L_type = conditional_t::value, Base, L>; + using R_type = conditional_t::value, Base, R>; + using op = op_impl; + cl.def(op::name(), &op::execute, is_operator(), extra...); + #if PY_MAJOR_VERSION < 3 + if (id == op_truediv || id == op_itruediv) + cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", + &op::execute, is_operator(), extra...); + #endif + } + template void execute_cast(Class &cl, const Extra&... extra) const { + using Base = typename Class::type; + using L_type = conditional_t::value, Base, L>; + using R_type = conditional_t::value, Base, R>; + using op = op_impl; + cl.def(op::name(), &op::execute_cast, is_operator(), extra...); + #if PY_MAJOR_VERSION < 3 + if (id == op_truediv || id == op_itruediv) + cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", + &op::execute, is_operator(), extra...); + #endif + } +}; + +#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \ +template struct op_impl { \ + static char const* name() { return "__" #id "__"; } \ + static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \ + static B execute_cast(const L &l, const R &r) { return B(expr); } \ +}; \ +template struct op_impl { \ + static char const* name() { return "__" #rid "__"; } \ + static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \ + static B execute_cast(const R &r, const L &l) { return B(expr); } \ +}; \ +inline op_ op(const self_t &, const self_t &) { \ + return op_(); \ +} \ +template op_ op(const self_t &, const T &) { \ + return op_(); \ +} \ +template op_ op(const T &, const self_t &) { \ + return op_(); \ +} + +#define PYBIND11_INPLACE_OPERATOR(id, op, expr) \ +template struct op_impl { \ + static char const* name() { return "__" #id "__"; } \ + static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \ + static B execute_cast(L &l, const R &r) { return B(expr); } \ +}; \ +template op_ op(const self_t &, const T &) { \ + return op_(); \ +} + +#define PYBIND11_UNARY_OPERATOR(id, op, expr) \ +template struct op_impl { \ + static char const* name() { return "__" #id "__"; } \ + static auto execute(const L &l) -> decltype(expr) { return expr; } \ + static B execute_cast(const L &l) { return B(expr); } \ +}; \ +inline op_ op(const self_t &) { \ + return op_(); \ +} + +PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r) +PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r) +PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l * r) +PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r) +PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r) +PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r) +PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r) +PYBIND11_BINARY_OPERATOR(and, rand, operator&, l & r) +PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r) +PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r) +PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r) +PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r) +PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r) +PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r) +PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r) +PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r) +//PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r)) +PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r) +PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r) +PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r) +PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r) +PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r) +PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r) +PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r) +PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r) +PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r) +PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r) +PYBIND11_UNARY_OPERATOR(neg, operator-, -l) +PYBIND11_UNARY_OPERATOR(pos, operator+, +l) +// WARNING: This usage of `abs` should only be done for existing STL overloads. +// Adding overloads directly in to the `std::` namespace is advised against: +// https://en.cppreference.com/w/cpp/language/extending_std +PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l)) +PYBIND11_UNARY_OPERATOR(hash, hash, std::hash()(l)) +PYBIND11_UNARY_OPERATOR(invert, operator~, (~l)) +PYBIND11_UNARY_OPERATOR(bool, operator!, !!l) +PYBIND11_UNARY_OPERATOR(int, int_, (int) l) +PYBIND11_UNARY_OPERATOR(float, float_, (double) l) + +#undef PYBIND11_BINARY_OPERATOR +#undef PYBIND11_INPLACE_OPERATOR +#undef PYBIND11_UNARY_OPERATOR +PYBIND11_NAMESPACE_END(detail) + +using detail::self; +// Add named operators so that they are accessible via `py::`. +using detail::hash; + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif diff --git a/diffvg/pybind11/include/pybind11/options.h b/diffvg/pybind11/include/pybind11/options.h new file mode 100644 index 0000000000000000000000000000000000000000..d74db1c68dddb3436cc0fb2674a6ef32ac77d5fd --- /dev/null +++ b/diffvg/pybind11/include/pybind11/options.h @@ -0,0 +1,65 @@ +/* + pybind11/options.h: global settings that are configurable at runtime. + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +class options { +public: + + // Default RAII constructor, which leaves settings as they currently are. + options() : previous_state(global_state()) {} + + // Class is non-copyable. + options(const options&) = delete; + options& operator=(const options&) = delete; + + // Destructor, which restores settings that were in effect before. + ~options() { + global_state() = previous_state; + } + + // Setter methods (affect the global state): + + options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; } + + options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; } + + options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; } + + options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; } + + // Getter methods (return the global state): + + static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; } + + static bool show_function_signatures() { return global_state().show_function_signatures; } + + // This type is not meant to be allocated on the heap. + void* operator new(size_t) = delete; + +private: + + struct state { + bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings. + bool show_function_signatures = true; //< Include auto-generated function signatures in docstrings. + }; + + static state &global_state() { + static state instance; + return instance; + } + + state previous_state; +}; + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/pybind11.h b/diffvg/pybind11/include/pybind11/pybind11.h new file mode 100644 index 0000000000000000000000000000000000000000..3a7d7b88495afddabff7f9604c94e828eb780152 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/pybind11.h @@ -0,0 +1,2235 @@ +/* + pybind11/pybind11.h: Main header file of the C++11 python + binding generator library + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#if defined(__INTEL_COMPILER) +# pragma warning push +# pragma warning disable 68 // integer conversion resulted in a change of sign +# pragma warning disable 186 // pointless comparison of unsigned integer with zero +# pragma warning disable 878 // incompatible exception specifications +# pragma warning disable 1334 // the "template" keyword used for syntactic disambiguation may only be used within a template +# pragma warning disable 1682 // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) +# pragma warning disable 1786 // function "strdup" was declared deprecated +# pragma warning disable 1875 // offsetof applied to non-POD (Plain Old Data) types is nonstandard +# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline" +#elif defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4100) // warning C4100: Unreferenced formal parameter +# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +# pragma warning(disable: 4512) // warning C4512: Assignment operator was implicitly defined as deleted +# pragma warning(disable: 4800) // warning C4800: 'int': forcing value to bool 'true' or 'false' (performance warning) +# pragma warning(disable: 4996) // warning C4996: The POSIX name for this item is deprecated. Instead, use the ISO C and C++ conformant name +# pragma warning(disable: 4702) // warning C4702: unreachable code +# pragma warning(disable: 4522) // warning C4522: multiple assignment operators specified +#elif defined(__GNUG__) && !defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +# pragma GCC diagnostic ignored "-Wunused-but-set-variable" +# pragma GCC diagnostic ignored "-Wmissing-field-initializers" +# pragma GCC diagnostic ignored "-Wstrict-aliasing" +# pragma GCC diagnostic ignored "-Wattributes" +# if __GNUC__ >= 7 +# pragma GCC diagnostic ignored "-Wnoexcept-type" +# endif +#endif + +#include "attr.h" +#include "options.h" +#include "detail/class.h" +#include "detail/init.h" + +#if defined(__GNUG__) && !defined(__clang__) +# include +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object +class cpp_function : public function { +public: + cpp_function() { } + cpp_function(std::nullptr_t) { } + + /// Construct a cpp_function from a vanilla function pointer + template + cpp_function(Return (*f)(Args...), const Extra&... extra) { + initialize(f, f, extra...); + } + + /// Construct a cpp_function from a lambda function (possibly with internal state) + template ::value>> + cpp_function(Func &&f, const Extra&... extra) { + initialize(std::forward(f), + (detail::function_signature_t *) nullptr, extra...); + } + + /// Construct a cpp_function from a class method (non-const, no ref-qualifier) + template + cpp_function(Return (Class::*f)(Arg...), const Extra&... extra) { + initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward(args)...); }, + (Return (*) (Class *, Arg...)) nullptr, extra...); + } + + /// Construct a cpp_function from a class method (non-const, lvalue ref-qualifier) + /// A copy of the overload for non-const functions without explicit ref-qualifier + /// but with an added `&`. + template + cpp_function(Return (Class::*f)(Arg...)&, const Extra&... extra) { + initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(args...); }, + (Return (*) (Class *, Arg...)) nullptr, extra...); + } + + /// Construct a cpp_function from a class method (const, no ref-qualifier) + template + cpp_function(Return (Class::*f)(Arg...) const, const Extra&... extra) { + initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(std::forward(args)...); }, + (Return (*)(const Class *, Arg ...)) nullptr, extra...); + } + + /// Construct a cpp_function from a class method (const, lvalue ref-qualifier) + /// A copy of the overload for const functions without explicit ref-qualifier + /// but with an added `&`. + template + cpp_function(Return (Class::*f)(Arg...) const&, const Extra&... extra) { + initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(args...); }, + (Return (*)(const Class *, Arg ...)) nullptr, extra...); + } + + /// Return the function name + object name() const { return attr("__name__"); } + +protected: + /// Space optimization: don't inline this frequently instantiated fragment + PYBIND11_NOINLINE detail::function_record *make_function_record() { + return new detail::function_record(); + } + + /// Special internal constructor for functors, lambda functions, etc. + template + void initialize(Func &&f, Return (*)(Args...), const Extra&... extra) { + using namespace detail; + struct capture { remove_reference_t f; }; + + /* Store the function including any extra state it might have (e.g. a lambda capture object) */ + auto rec = make_function_record(); + + /* Store the capture object directly in the function record if there is enough space */ + if (sizeof(capture) <= sizeof(rec->data)) { + /* Without these pragmas, GCC warns that there might not be + enough space to use the placement new operator. However, the + 'if' statement above ensures that this is the case. */ +#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6 +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wplacement-new" +#endif + new ((capture *) &rec->data) capture { std::forward(f) }; +#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6 +# pragma GCC diagnostic pop +#endif + if (!std::is_trivially_destructible::value) + rec->free_data = [](function_record *r) { ((capture *) &r->data)->~capture(); }; + } else { + rec->data[0] = new capture { std::forward(f) }; + rec->free_data = [](function_record *r) { delete ((capture *) r->data[0]); }; + } + + /* Type casters for the function arguments and return value */ + using cast_in = argument_loader; + using cast_out = make_caster< + conditional_t::value, void_type, Return> + >; + + static_assert(expected_num_args(sizeof...(Args), cast_in::has_args, cast_in::has_kwargs), + "The number of argument annotations does not match the number of function arguments"); + + /* Dispatch code which converts function arguments and performs the actual function call */ + rec->impl = [](function_call &call) -> handle { + cast_in args_converter; + + /* Try to cast the function arguments into the C++ domain */ + if (!args_converter.load_args(call)) + return PYBIND11_TRY_NEXT_OVERLOAD; + + /* Invoke call policy pre-call hook */ + process_attributes::precall(call); + + /* Get a pointer to the capture object */ + auto data = (sizeof(capture) <= sizeof(call.func.data) + ? &call.func.data : call.func.data[0]); + capture *cap = const_cast(reinterpret_cast(data)); + + /* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */ + return_value_policy policy = return_value_policy_override::policy(call.func.policy); + + /* Function scope guard -- defaults to the compile-to-nothing `void_type` */ + using Guard = extract_guard_t; + + /* Perform the function call */ + handle result = cast_out::cast( + std::move(args_converter).template call(cap->f), policy, call.parent); + + /* Invoke call policy post-call hook */ + process_attributes::postcall(call, result); + + return result; + }; + + /* Process any user-provided function attributes */ + process_attributes::init(extra..., rec); + + { + constexpr bool has_kwonly_args = any_of...>::value, + has_args = any_of...>::value, + has_arg_annotations = any_of...>::value; + static_assert(has_arg_annotations || !has_kwonly_args, "py::kwonly requires the use of argument annotations"); + static_assert(!(has_args && has_kwonly_args), "py::kwonly cannot be combined with a py::args argument"); + } + + /* Generate a readable signature describing the function's arguments and return value types */ + static constexpr auto signature = _("(") + cast_in::arg_names + _(") -> ") + cast_out::name; + PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types(); + + /* Register the function with Python from generic (non-templated) code */ + initialize_generic(rec, signature.text, types.data(), sizeof...(Args)); + + if (cast_in::has_args) rec->has_args = true; + if (cast_in::has_kwargs) rec->has_kwargs = true; + + /* Stash some additional information used by an important optimization in 'functional.h' */ + using FunctionType = Return (*)(Args...); + constexpr bool is_function_ptr = + std::is_convertible::value && + sizeof(capture) == sizeof(void *); + if (is_function_ptr) { + rec->is_stateless = true; + rec->data[1] = const_cast(reinterpret_cast(&typeid(FunctionType))); + } + } + + /// Register a function call with Python (generic non-templated code goes here) + void initialize_generic(detail::function_record *rec, const char *text, + const std::type_info *const *types, size_t args) { + + /* Create copies of all referenced C-style strings */ + rec->name = strdup(rec->name ? rec->name : ""); + if (rec->doc) rec->doc = strdup(rec->doc); + for (auto &a: rec->args) { + if (a.name) + a.name = strdup(a.name); + if (a.descr) + a.descr = strdup(a.descr); + else if (a.value) + a.descr = strdup(repr(a.value).cast().c_str()); + } + + rec->is_constructor = !strcmp(rec->name, "__init__") || !strcmp(rec->name, "__setstate__"); + +#if !defined(NDEBUG) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING) + if (rec->is_constructor && !rec->is_new_style_constructor) { + const auto class_name = std::string(((PyTypeObject *) rec->scope.ptr())->tp_name); + const auto func_name = std::string(rec->name); + PyErr_WarnEx( + PyExc_FutureWarning, + ("pybind11-bound class '" + class_name + "' is using an old-style " + "placement-new '" + func_name + "' which has been deprecated. See " + "the upgrade guide in pybind11's docs. This message is only visible " + "when compiled in debug mode.").c_str(), 0 + ); + } +#endif + + /* Generate a proper function signature */ + std::string signature; + size_t type_index = 0, arg_index = 0; + for (auto *pc = text; *pc != '\0'; ++pc) { + const auto c = *pc; + + if (c == '{') { + // Write arg name for everything except *args and **kwargs. + if (*(pc + 1) == '*') + continue; + + if (arg_index < rec->args.size() && rec->args[arg_index].name) { + signature += rec->args[arg_index].name; + } else if (arg_index == 0 && rec->is_method) { + signature += "self"; + } else { + signature += "arg" + std::to_string(arg_index - (rec->is_method ? 1 : 0)); + } + signature += ": "; + } else if (c == '}') { + // Write default value if available. + if (arg_index < rec->args.size() && rec->args[arg_index].descr) { + signature += " = "; + signature += rec->args[arg_index].descr; + } + arg_index++; + } else if (c == '%') { + const std::type_info *t = types[type_index++]; + if (!t) + pybind11_fail("Internal error while parsing type signature (1)"); + if (auto tinfo = detail::get_type_info(*t)) { + handle th((PyObject *) tinfo->type); + signature += + th.attr("__module__").cast() + "." + + th.attr("__qualname__").cast(); // Python 3.3+, but we backport it to earlier versions + } else if (rec->is_new_style_constructor && arg_index == 0) { + // A new-style `__init__` takes `self` as `value_and_holder`. + // Rewrite it to the proper class type. + signature += + rec->scope.attr("__module__").cast() + "." + + rec->scope.attr("__qualname__").cast(); + } else { + std::string tname(t->name()); + detail::clean_type_id(tname); + signature += tname; + } + } else { + signature += c; + } + } + if (arg_index != args || types[type_index] != nullptr) + pybind11_fail("Internal error while parsing type signature (2)"); + +#if PY_MAJOR_VERSION < 3 + if (strcmp(rec->name, "__next__") == 0) { + std::free(rec->name); + rec->name = strdup("next"); + } else if (strcmp(rec->name, "__bool__") == 0) { + std::free(rec->name); + rec->name = strdup("__nonzero__"); + } +#endif + rec->signature = strdup(signature.c_str()); + rec->args.shrink_to_fit(); + rec->nargs = (std::uint16_t) args; + + if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr())) + rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr()); + + detail::function_record *chain = nullptr, *chain_start = rec; + if (rec->sibling) { + if (PyCFunction_Check(rec->sibling.ptr())) { + auto rec_capsule = reinterpret_borrow(PyCFunction_GET_SELF(rec->sibling.ptr())); + chain = (detail::function_record *) rec_capsule; + /* Never append a method to an overload chain of a parent class; + instead, hide the parent's overloads in this case */ + if (!chain->scope.is(rec->scope)) + chain = nullptr; + } + // Don't trigger for things like the default __init__, which are wrapper_descriptors that we are intentionally replacing + else if (!rec->sibling.is_none() && rec->name[0] != '_') + pybind11_fail("Cannot overload existing non-function object \"" + std::string(rec->name) + + "\" with a function of the same name"); + } + + if (!chain) { + /* No existing overload was found, create a new function object */ + rec->def = new PyMethodDef(); + std::memset(rec->def, 0, sizeof(PyMethodDef)); + rec->def->ml_name = rec->name; + rec->def->ml_meth = reinterpret_cast(reinterpret_cast(*dispatcher)); + rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS; + + capsule rec_capsule(rec, [](void *ptr) { + destruct((detail::function_record *) ptr); + }); + + object scope_module; + if (rec->scope) { + if (hasattr(rec->scope, "__module__")) { + scope_module = rec->scope.attr("__module__"); + } else if (hasattr(rec->scope, "__name__")) { + scope_module = rec->scope.attr("__name__"); + } + } + + m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr()); + if (!m_ptr) + pybind11_fail("cpp_function::cpp_function(): Could not allocate function object"); + } else { + /* Append at the end of the overload chain */ + m_ptr = rec->sibling.ptr(); + inc_ref(); + chain_start = chain; + if (chain->is_method != rec->is_method) + pybind11_fail("overloading a method with both static and instance methods is not supported; " + #if defined(NDEBUG) + "compile in debug mode for more details" + #else + "error while attempting to bind " + std::string(rec->is_method ? "instance" : "static") + " method " + + std::string(pybind11::str(rec->scope.attr("__name__"))) + "." + std::string(rec->name) + signature + #endif + ); + while (chain->next) + chain = chain->next; + chain->next = rec; + } + + std::string signatures; + int index = 0; + /* Create a nice pydoc rec including all signatures and + docstrings of the functions in the overload chain */ + if (chain && options::show_function_signatures()) { + // First a generic signature + signatures += rec->name; + signatures += "(*args, **kwargs)\n"; + signatures += "Overloaded function.\n\n"; + } + // Then specific overload signatures + bool first_user_def = true; + for (auto it = chain_start; it != nullptr; it = it->next) { + if (options::show_function_signatures()) { + if (index > 0) signatures += "\n"; + if (chain) + signatures += std::to_string(++index) + ". "; + signatures += rec->name; + signatures += it->signature; + signatures += "\n"; + } + if (it->doc && strlen(it->doc) > 0 && options::show_user_defined_docstrings()) { + // If we're appending another docstring, and aren't printing function signatures, we + // need to append a newline first: + if (!options::show_function_signatures()) { + if (first_user_def) first_user_def = false; + else signatures += "\n"; + } + if (options::show_function_signatures()) signatures += "\n"; + signatures += it->doc; + if (options::show_function_signatures()) signatures += "\n"; + } + } + + /* Install docstring */ + PyCFunctionObject *func = (PyCFunctionObject *) m_ptr; + if (func->m_ml->ml_doc) + std::free(const_cast(func->m_ml->ml_doc)); + func->m_ml->ml_doc = strdup(signatures.c_str()); + + if (rec->is_method) { + m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr()); + if (!m_ptr) + pybind11_fail("cpp_function::cpp_function(): Could not allocate instance method object"); + Py_DECREF(func); + } + } + + /// When a cpp_function is GCed, release any memory allocated by pybind11 + static void destruct(detail::function_record *rec) { + while (rec) { + detail::function_record *next = rec->next; + if (rec->free_data) + rec->free_data(rec); + std::free((char *) rec->name); + std::free((char *) rec->doc); + std::free((char *) rec->signature); + for (auto &arg: rec->args) { + std::free(const_cast(arg.name)); + std::free(const_cast(arg.descr)); + arg.value.dec_ref(); + } + if (rec->def) { + std::free(const_cast(rec->def->ml_doc)); + delete rec->def; + } + delete rec; + rec = next; + } + } + + /// Main dispatch logic for calls to functions bound using pybind11 + static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) { + using namespace detail; + + /* Iterator over the list of potentially admissible overloads */ + const function_record *overloads = (function_record *) PyCapsule_GetPointer(self, nullptr), + *it = overloads; + + /* Need to know how many arguments + keyword arguments there are to pick the right overload */ + const size_t n_args_in = (size_t) PyTuple_GET_SIZE(args_in); + + handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr, + result = PYBIND11_TRY_NEXT_OVERLOAD; + + auto self_value_and_holder = value_and_holder(); + if (overloads->is_constructor) { + const auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr()); + const auto pi = reinterpret_cast(parent.ptr()); + self_value_and_holder = pi->get_value_and_holder(tinfo, false); + + if (!self_value_and_holder.type || !self_value_and_holder.inst) { + PyErr_SetString(PyExc_TypeError, "__init__(self, ...) called with invalid `self` argument"); + return nullptr; + } + + // If this value is already registered it must mean __init__ is invoked multiple times; + // we really can't support that in C++, so just ignore the second __init__. + if (self_value_and_holder.instance_registered()) + return none().release().ptr(); + } + + try { + // We do this in two passes: in the first pass, we load arguments with `convert=false`; + // in the second, we allow conversion (except for arguments with an explicit + // py::arg().noconvert()). This lets us prefer calls without conversion, with + // conversion as a fallback. + std::vector second_pass; + + // However, if there are no overloads, we can just skip the no-convert pass entirely + const bool overloaded = it != nullptr && it->next != nullptr; + + for (; it != nullptr; it = it->next) { + + /* For each overload: + 1. Copy all positional arguments we were given, also checking to make sure that + named positional arguments weren't *also* specified via kwarg. + 2. If we weren't given enough, try to make up the omitted ones by checking + whether they were provided by a kwarg matching the `py::arg("name")` name. If + so, use it (and remove it from kwargs; if not, see if the function binding + provided a default that we can use. + 3. Ensure that either all keyword arguments were "consumed", or that the function + takes a kwargs argument to accept unconsumed kwargs. + 4. Any positional arguments still left get put into a tuple (for args), and any + leftover kwargs get put into a dict. + 5. Pack everything into a vector; if we have py::args or py::kwargs, they are an + extra tuple or dict at the end of the positional arguments. + 6. Call the function call dispatcher (function_record::impl) + + If one of these fail, move on to the next overload and keep trying until we get a + result other than PYBIND11_TRY_NEXT_OVERLOAD. + */ + + const function_record &func = *it; + size_t num_args = func.nargs; // Number of positional arguments that we need + if (func.has_args) --num_args; // (but don't count py::args + if (func.has_kwargs) --num_args; // or py::kwargs) + size_t pos_args = num_args - func.nargs_kwonly; + + if (!func.has_args && n_args_in > pos_args) + continue; // Too many positional arguments for this overload + + if (n_args_in < pos_args && func.args.size() < pos_args) + continue; // Not enough positional arguments given, and not enough defaults to fill in the blanks + + function_call call(func, parent); + + size_t args_to_copy = (std::min)(pos_args, n_args_in); // Protect std::min with parentheses + size_t args_copied = 0; + + // 0. Inject new-style `self` argument + if (func.is_new_style_constructor) { + // The `value` may have been preallocated by an old-style `__init__` + // if it was a preceding candidate for overload resolution. + if (self_value_and_holder) + self_value_and_holder.type->dealloc(self_value_and_holder); + + call.init_self = PyTuple_GET_ITEM(args_in, 0); + call.args.push_back(reinterpret_cast(&self_value_and_holder)); + call.args_convert.push_back(false); + ++args_copied; + } + + // 1. Copy any position arguments given. + bool bad_arg = false; + for (; args_copied < args_to_copy; ++args_copied) { + const argument_record *arg_rec = args_copied < func.args.size() ? &func.args[args_copied] : nullptr; + if (kwargs_in && arg_rec && arg_rec->name && PyDict_GetItemString(kwargs_in, arg_rec->name)) { + bad_arg = true; + break; + } + + handle arg(PyTuple_GET_ITEM(args_in, args_copied)); + if (arg_rec && !arg_rec->none && arg.is_none()) { + bad_arg = true; + break; + } + call.args.push_back(arg); + call.args_convert.push_back(arg_rec ? arg_rec->convert : true); + } + if (bad_arg) + continue; // Maybe it was meant for another overload (issue #688) + + // We'll need to copy this if we steal some kwargs for defaults + dict kwargs = reinterpret_borrow(kwargs_in); + + // 2. Check kwargs and, failing that, defaults that may help complete the list + if (args_copied < num_args) { + bool copied_kwargs = false; + + for (; args_copied < num_args; ++args_copied) { + const auto &arg = func.args[args_copied]; + + handle value; + if (kwargs_in && arg.name) + value = PyDict_GetItemString(kwargs.ptr(), arg.name); + + if (value) { + // Consume a kwargs value + if (!copied_kwargs) { + kwargs = reinterpret_steal(PyDict_Copy(kwargs.ptr())); + copied_kwargs = true; + } + PyDict_DelItemString(kwargs.ptr(), arg.name); + } else if (arg.value) { + value = arg.value; + } + + if (value) { + call.args.push_back(value); + call.args_convert.push_back(arg.convert); + } + else + break; + } + + if (args_copied < num_args) + continue; // Not enough arguments, defaults, or kwargs to fill the positional arguments + } + + // 3. Check everything was consumed (unless we have a kwargs arg) + if (kwargs && kwargs.size() > 0 && !func.has_kwargs) + continue; // Unconsumed kwargs, but no py::kwargs argument to accept them + + // 4a. If we have a py::args argument, create a new tuple with leftovers + if (func.has_args) { + tuple extra_args; + if (args_to_copy == 0) { + // We didn't copy out any position arguments from the args_in tuple, so we + // can reuse it directly without copying: + extra_args = reinterpret_borrow(args_in); + } else if (args_copied >= n_args_in) { + extra_args = tuple(0); + } else { + size_t args_size = n_args_in - args_copied; + extra_args = tuple(args_size); + for (size_t i = 0; i < args_size; ++i) { + extra_args[i] = PyTuple_GET_ITEM(args_in, args_copied + i); + } + } + call.args.push_back(extra_args); + call.args_convert.push_back(false); + call.args_ref = std::move(extra_args); + } + + // 4b. If we have a py::kwargs, pass on any remaining kwargs + if (func.has_kwargs) { + if (!kwargs.ptr()) + kwargs = dict(); // If we didn't get one, send an empty one + call.args.push_back(kwargs); + call.args_convert.push_back(false); + call.kwargs_ref = std::move(kwargs); + } + + // 5. Put everything in a vector. Not technically step 5, we've been building it + // in `call.args` all along. + #if !defined(NDEBUG) + if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs) + pybind11_fail("Internal error: function call dispatcher inserted wrong number of arguments!"); + #endif + + std::vector second_pass_convert; + if (overloaded) { + // We're in the first no-convert pass, so swap out the conversion flags for a + // set of all-false flags. If the call fails, we'll swap the flags back in for + // the conversion-allowed call below. + second_pass_convert.resize(func.nargs, false); + call.args_convert.swap(second_pass_convert); + } + + // 6. Call the function. + try { + loader_life_support guard{}; + result = func.impl(call); + } catch (reference_cast_error &) { + result = PYBIND11_TRY_NEXT_OVERLOAD; + } + + if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) + break; + + if (overloaded) { + // The (overloaded) call failed; if the call has at least one argument that + // permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`) + // then add this call to the list of second pass overloads to try. + for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) { + if (second_pass_convert[i]) { + // Found one: swap the converting flags back in and store the call for + // the second pass. + call.args_convert.swap(second_pass_convert); + second_pass.push_back(std::move(call)); + break; + } + } + } + } + + if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) { + // The no-conversion pass finished without success, try again with conversion allowed + for (auto &call : second_pass) { + try { + loader_life_support guard{}; + result = call.func.impl(call); + } catch (reference_cast_error &) { + result = PYBIND11_TRY_NEXT_OVERLOAD; + } + + if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) { + // The error reporting logic below expects 'it' to be valid, as it would be + // if we'd encountered this failure in the first-pass loop. + if (!result) + it = &call.func; + break; + } + } + } + } catch (error_already_set &e) { + e.restore(); + return nullptr; +#if defined(__GNUG__) && !defined(__clang__) + } catch ( abi::__forced_unwind& ) { + throw; +#endif + } catch (...) { + /* When an exception is caught, give each registered exception + translator a chance to translate it to a Python exception + in reverse order of registration. + + A translator may choose to do one of the following: + + - catch the exception and call PyErr_SetString or PyErr_SetObject + to set a standard (or custom) Python exception, or + - do nothing and let the exception fall through to the next translator, or + - delegate translation to the next translator by throwing a new type of exception. */ + + auto last_exception = std::current_exception(); + auto ®istered_exception_translators = get_internals().registered_exception_translators; + for (auto& translator : registered_exception_translators) { + try { + translator(last_exception); + } catch (...) { + last_exception = std::current_exception(); + continue; + } + return nullptr; + } + PyErr_SetString(PyExc_SystemError, "Exception escaped from default exception translator!"); + return nullptr; + } + + auto append_note_if_missing_header_is_suspected = [](std::string &msg) { + if (msg.find("std::") != std::string::npos) { + msg += "\n\n" + "Did you forget to `#include `? Or ,\n" + ", , etc. Some automatic\n" + "conversions are optional and require extra headers to be included\n" + "when compiling your pybind11 module."; + } + }; + + if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) { + if (overloads->is_operator) + return handle(Py_NotImplemented).inc_ref().ptr(); + + std::string msg = std::string(overloads->name) + "(): incompatible " + + std::string(overloads->is_constructor ? "constructor" : "function") + + " arguments. The following argument types are supported:\n"; + + int ctr = 0; + for (const function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) { + msg += " "+ std::to_string(++ctr) + ". "; + + bool wrote_sig = false; + if (overloads->is_constructor) { + // For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as `Object(arg0, ...)` + std::string sig = it2->signature; + size_t start = sig.find('(') + 7; // skip "(self: " + if (start < sig.size()) { + // End at the , for the next argument + size_t end = sig.find(", "), next = end + 2; + size_t ret = sig.rfind(" -> "); + // Or the ), if there is no comma: + if (end >= sig.size()) next = end = sig.find(')'); + if (start < end && next < sig.size()) { + msg.append(sig, start, end - start); + msg += '('; + msg.append(sig, next, ret - next); + wrote_sig = true; + } + } + } + if (!wrote_sig) msg += it2->signature; + + msg += "\n"; + } + msg += "\nInvoked with: "; + auto args_ = reinterpret_borrow(args_in); + bool some_args = false; + for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) { + if (!some_args) some_args = true; + else msg += ", "; + try { + msg += pybind11::repr(args_[ti]); + } catch (const error_already_set&) { + msg += ""; + } + } + if (kwargs_in) { + auto kwargs = reinterpret_borrow(kwargs_in); + if (kwargs.size() > 0) { + if (some_args) msg += "; "; + msg += "kwargs: "; + bool first = true; + for (auto kwarg : kwargs) { + if (first) first = false; + else msg += ", "; + msg += pybind11::str("{}=").format(kwarg.first); + try { + msg += pybind11::repr(kwarg.second); + } catch (const error_already_set&) { + msg += ""; + } + } + } + } + + append_note_if_missing_header_is_suspected(msg); + PyErr_SetString(PyExc_TypeError, msg.c_str()); + return nullptr; + } else if (!result) { + std::string msg = "Unable to convert function return value to a " + "Python type! The signature was\n\t"; + msg += it->signature; + append_note_if_missing_header_is_suspected(msg); + PyErr_SetString(PyExc_TypeError, msg.c_str()); + return nullptr; + } else { + if (overloads->is_constructor && !self_value_and_holder.holder_constructed()) { + auto *pi = reinterpret_cast(parent.ptr()); + self_value_and_holder.type->init_instance(pi, nullptr); + } + return result.ptr(); + } + } +}; + +/// Wrapper for Python extension modules +class module : public object { +public: + PYBIND11_OBJECT_DEFAULT(module, object, PyModule_Check) + + /// Create a new top-level Python module with the given name and docstring + explicit module(const char *name, const char *doc = nullptr) { + if (!options::show_user_defined_docstrings()) doc = nullptr; +#if PY_MAJOR_VERSION >= 3 + PyModuleDef *def = new PyModuleDef(); + std::memset(def, 0, sizeof(PyModuleDef)); + def->m_name = name; + def->m_doc = doc; + def->m_size = -1; + Py_INCREF(def); + m_ptr = PyModule_Create(def); +#else + m_ptr = Py_InitModule3(name, nullptr, doc); +#endif + if (m_ptr == nullptr) + pybind11_fail("Internal error in module::module()"); + inc_ref(); + } + + /** \rst + Create Python binding for a new function within the module scope. ``Func`` + can be a plain C++ function, a function pointer, or a lambda function. For + details on the ``Extra&& ... extra`` argument, see section :ref:`extras`. + \endrst */ + template + module &def(const char *name_, Func &&f, const Extra& ... extra) { + cpp_function func(std::forward(f), name(name_), scope(*this), + sibling(getattr(*this, name_, none())), extra...); + // NB: allow overwriting here because cpp_function sets up a chain with the intention of + // overwriting (and has already checked internally that it isn't overwriting non-functions). + add_object(name_, func, true /* overwrite */); + return *this; + } + + /** \rst + Create and return a new Python submodule with the given name and docstring. + This also works recursively, i.e. + + .. code-block:: cpp + + py::module m("example", "pybind11 example plugin"); + py::module m2 = m.def_submodule("sub", "A submodule of 'example'"); + py::module m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'"); + \endrst */ + module def_submodule(const char *name, const char *doc = nullptr) { + std::string full_name = std::string(PyModule_GetName(m_ptr)) + + std::string(".") + std::string(name); + auto result = reinterpret_borrow(PyImport_AddModule(full_name.c_str())); + if (doc && options::show_user_defined_docstrings()) + result.attr("__doc__") = pybind11::str(doc); + attr(name) = result; + return result; + } + + /// Import and return a module or throws `error_already_set`. + static module import(const char *name) { + PyObject *obj = PyImport_ImportModule(name); + if (!obj) + throw error_already_set(); + return reinterpret_steal(obj); + } + + /// Reload the module or throws `error_already_set`. + void reload() { + PyObject *obj = PyImport_ReloadModule(ptr()); + if (!obj) + throw error_already_set(); + *this = reinterpret_steal(obj); + } + + // Adds an object to the module using the given name. Throws if an object with the given name + // already exists. + // + // overwrite should almost always be false: attempting to overwrite objects that pybind11 has + // established will, in most cases, break things. + PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) { + if (!overwrite && hasattr(*this, name)) + pybind11_fail("Error during initialization: multiple incompatible definitions with name \"" + + std::string(name) + "\""); + + PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */); + } +}; + +/// \ingroup python_builtins +/// Return a dictionary representing the global variables in the current execution frame, +/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded). +inline dict globals() { + PyObject *p = PyEval_GetGlobals(); + return reinterpret_borrow(p ? p : module::import("__main__").attr("__dict__").ptr()); +} + +PYBIND11_NAMESPACE_BEGIN(detail) +/// Generic support for creating new Python heap types +class generic_type : public object { + template friend class class_; +public: + PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check) +protected: + void initialize(const type_record &rec) { + if (rec.scope && hasattr(rec.scope, rec.name)) + pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name) + + "\": an object with that name is already defined"); + + if (rec.module_local ? get_local_type_info(*rec.type) : get_global_type_info(*rec.type)) + pybind11_fail("generic_type: type \"" + std::string(rec.name) + + "\" is already registered!"); + + m_ptr = make_new_python_type(rec); + + /* Register supplemental type information in C++ dict */ + auto *tinfo = new detail::type_info(); + tinfo->type = (PyTypeObject *) m_ptr; + tinfo->cpptype = rec.type; + tinfo->type_size = rec.type_size; + tinfo->type_align = rec.type_align; + tinfo->operator_new = rec.operator_new; + tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size); + tinfo->init_instance = rec.init_instance; + tinfo->dealloc = rec.dealloc; + tinfo->simple_type = true; + tinfo->simple_ancestors = true; + tinfo->default_holder = rec.default_holder; + tinfo->module_local = rec.module_local; + + auto &internals = get_internals(); + auto tindex = std::type_index(*rec.type); + tinfo->direct_conversions = &internals.direct_conversions[tindex]; + if (rec.module_local) + registered_local_types_cpp()[tindex] = tinfo; + else + internals.registered_types_cpp[tindex] = tinfo; + internals.registered_types_py[(PyTypeObject *) m_ptr] = { tinfo }; + + if (rec.bases.size() > 1 || rec.multiple_inheritance) { + mark_parents_nonsimple(tinfo->type); + tinfo->simple_ancestors = false; + } + else if (rec.bases.size() == 1) { + auto parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr()); + tinfo->simple_ancestors = parent_tinfo->simple_ancestors; + } + + if (rec.module_local) { + // Stash the local typeinfo and loader so that external modules can access it. + tinfo->module_local_load = &type_caster_generic::local_load; + setattr(m_ptr, PYBIND11_MODULE_LOCAL_ID, capsule(tinfo)); + } + } + + /// Helper function which tags all parents of a type using mult. inheritance + void mark_parents_nonsimple(PyTypeObject *value) { + auto t = reinterpret_borrow(value->tp_bases); + for (handle h : t) { + auto tinfo2 = get_type_info((PyTypeObject *) h.ptr()); + if (tinfo2) + tinfo2->simple_type = false; + mark_parents_nonsimple((PyTypeObject *) h.ptr()); + } + } + + void install_buffer_funcs( + buffer_info *(*get_buffer)(PyObject *, void *), + void *get_buffer_data) { + PyHeapTypeObject *type = (PyHeapTypeObject*) m_ptr; + auto tinfo = detail::get_type_info(&type->ht_type); + + if (!type->ht_type.tp_as_buffer) + pybind11_fail( + "To be able to register buffer protocol support for the type '" + + std::string(tinfo->type->tp_name) + + "' the associated class<>(..) invocation must " + "include the pybind11::buffer_protocol() annotation!"); + + tinfo->get_buffer = get_buffer; + tinfo->get_buffer_data = get_buffer_data; + } + + // rec_func must be set for either fget or fset. + void def_property_static_impl(const char *name, + handle fget, handle fset, + detail::function_record *rec_func) { + const auto is_static = rec_func && !(rec_func->is_method && rec_func->scope); + const auto has_doc = rec_func && rec_func->doc && pybind11::options::show_user_defined_docstrings(); + auto property = handle((PyObject *) (is_static ? get_internals().static_property_type + : &PyProperty_Type)); + attr(name) = property(fget.ptr() ? fget : none(), + fset.ptr() ? fset : none(), + /*deleter*/none(), + pybind11::str(has_doc ? rec_func->doc : "")); + } +}; + +/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded. +template (T::operator new))>> +void set_operator_new(type_record *r) { r->operator_new = &T::operator new; } + +template void set_operator_new(...) { } + +template struct has_operator_delete : std::false_type { }; +template struct has_operator_delete(T::operator delete))>> + : std::true_type { }; +template struct has_operator_delete_size : std::false_type { }; +template struct has_operator_delete_size(T::operator delete))>> + : std::true_type { }; +/// Call class-specific delete if it exists or global otherwise. Can also be an overload set. +template ::value, int> = 0> +void call_operator_delete(T *p, size_t, size_t) { T::operator delete(p); } +template ::value && has_operator_delete_size::value, int> = 0> +void call_operator_delete(T *p, size_t s, size_t) { T::operator delete(p, s); } + +inline void call_operator_delete(void *p, size_t s, size_t a) { + (void)s; (void)a; + #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912) + if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__) { + #ifdef __cpp_sized_deallocation + ::operator delete(p, s, std::align_val_t(a)); + #else + ::operator delete(p, std::align_val_t(a)); + #endif + return; + } + #endif + #ifdef __cpp_sized_deallocation + ::operator delete(p, s); + #else + ::operator delete(p); + #endif +} + +inline void add_class_method(object& cls, const char *name_, const cpp_function &cf) { + cls.attr(cf.name()) = cf; + if (strcmp(name_, "__eq__") == 0 && !cls.attr("__dict__").contains("__hash__")) { + cls.attr("__hash__") = none(); + } +} + +PYBIND11_NAMESPACE_END(detail) + +/// Given a pointer to a member function, cast it to its `Derived` version. +/// Forward everything else unchanged. +template +auto method_adaptor(F &&f) -> decltype(std::forward(f)) { return std::forward(f); } + +template +auto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) { + static_assert(detail::is_accessible_base_of::value, + "Cannot bind an inaccessible base class method; use a lambda definition instead"); + return pmf; +} + +template +auto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const { + static_assert(detail::is_accessible_base_of::value, + "Cannot bind an inaccessible base class method; use a lambda definition instead"); + return pmf; +} + +template +class class_ : public detail::generic_type { + template using is_holder = detail::is_holder_type; + template using is_subtype = detail::is_strict_base_of; + template using is_base = detail::is_strict_base_of; + // struct instead of using here to help MSVC: + template struct is_valid_class_option : + detail::any_of, is_subtype, is_base> {}; + +public: + using type = type_; + using type_alias = detail::exactly_one_t; + constexpr static bool has_alias = !std::is_void::value; + using holder_type = detail::exactly_one_t, options...>; + + static_assert(detail::all_of...>::value, + "Unknown/invalid class_ template parameters provided"); + + static_assert(!has_alias || std::is_polymorphic::value, + "Cannot use an alias class with a non-polymorphic type"); + + PYBIND11_OBJECT(class_, generic_type, PyType_Check) + + template + class_(handle scope, const char *name, const Extra &... extra) { + using namespace detail; + + // MI can only be specified via class_ template options, not constructor parameters + static_assert( + none_of...>::value || // no base class arguments, or: + ( constexpr_sum(is_pyobject::value...) == 1 && // Exactly one base + constexpr_sum(is_base::value...) == 0 && // no template option bases + none_of...>::value), // no multiple_inheritance attr + "Error: multiple inheritance bases must be specified via class_ template options"); + + type_record record; + record.scope = scope; + record.name = name; + record.type = &typeid(type); + record.type_size = sizeof(conditional_t); + record.type_align = alignof(conditional_t&); + record.holder_size = sizeof(holder_type); + record.init_instance = init_instance; + record.dealloc = dealloc; + record.default_holder = detail::is_instantiation::value; + + set_operator_new(&record); + + /* Register base classes specified via template arguments to class_, if any */ + PYBIND11_EXPAND_SIDE_EFFECTS(add_base(record)); + + /* Process optional arguments, if any */ + process_attributes::init(extra..., &record); + + generic_type::initialize(record); + + if (has_alias) { + auto &instances = record.module_local ? registered_local_types_cpp() : get_internals().registered_types_cpp; + instances[std::type_index(typeid(type_alias))] = instances[std::type_index(typeid(type))]; + } + } + + template ::value, int> = 0> + static void add_base(detail::type_record &rec) { + rec.add_base(typeid(Base), [](void *src) -> void * { + return static_cast(reinterpret_cast(src)); + }); + } + + template ::value, int> = 0> + static void add_base(detail::type_record &) { } + + template + class_ &def(const char *name_, Func&& f, const Extra&... extra) { + cpp_function cf(method_adaptor(std::forward(f)), name(name_), is_method(*this), + sibling(getattr(*this, name_, none())), extra...); + add_class_method(*this, name_, cf); + return *this; + } + + template class_ & + def_static(const char *name_, Func &&f, const Extra&... extra) { + static_assert(!std::is_member_function_pointer::value, + "def_static(...) called with a non-static member function pointer"); + cpp_function cf(std::forward(f), name(name_), scope(*this), + sibling(getattr(*this, name_, none())), extra...); + attr(cf.name()) = staticmethod(cf); + return *this; + } + + template + class_ &def(const detail::op_ &op, const Extra&... extra) { + op.execute(*this, extra...); + return *this; + } + + template + class_ & def_cast(const detail::op_ &op, const Extra&... extra) { + op.execute_cast(*this, extra...); + return *this; + } + + template + class_ &def(const detail::initimpl::constructor &init, const Extra&... extra) { + init.execute(*this, extra...); + return *this; + } + + template + class_ &def(const detail::initimpl::alias_constructor &init, const Extra&... extra) { + init.execute(*this, extra...); + return *this; + } + + template + class_ &def(detail::initimpl::factory &&init, const Extra&... extra) { + std::move(init).execute(*this, extra...); + return *this; + } + + template + class_ &def(detail::initimpl::pickle_factory &&pf, const Extra &...extra) { + std::move(pf).execute(*this, extra...); + return *this; + } + + template class_& def_buffer(Func &&func) { + struct capture { Func func; }; + capture *ptr = new capture { std::forward(func) }; + install_buffer_funcs([](PyObject *obj, void *ptr) -> buffer_info* { + detail::make_caster caster; + if (!caster.load(obj, false)) + return nullptr; + return new buffer_info(((capture *) ptr)->func(caster)); + }, ptr); + return *this; + } + + template + class_ &def_buffer(Return (Class::*func)(Args...)) { + return def_buffer([func] (type &obj) { return (obj.*func)(); }); + } + + template + class_ &def_buffer(Return (Class::*func)(Args...) const) { + return def_buffer([func] (const type &obj) { return (obj.*func)(); }); + } + + template + class_ &def_readwrite(const char *name, D C::*pm, const Extra&... extra) { + static_assert(std::is_same::value || std::is_base_of::value, "def_readwrite() requires a class member (or base class member)"); + cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)), + fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this)); + def_property(name, fget, fset, return_value_policy::reference_internal, extra...); + return *this; + } + + template + class_ &def_readonly(const char *name, const D C::*pm, const Extra& ...extra) { + static_assert(std::is_same::value || std::is_base_of::value, "def_readonly() requires a class member (or base class member)"); + cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)); + def_property_readonly(name, fget, return_value_policy::reference_internal, extra...); + return *this; + } + + template + class_ &def_readwrite_static(const char *name, D *pm, const Extra& ...extra) { + cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)), + fset([pm](object, const D &value) { *pm = value; }, scope(*this)); + def_property_static(name, fget, fset, return_value_policy::reference, extra...); + return *this; + } + + template + class_ &def_readonly_static(const char *name, const D *pm, const Extra& ...extra) { + cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)); + def_property_readonly_static(name, fget, return_value_policy::reference, extra...); + return *this; + } + + /// Uses return_value_policy::reference_internal by default + template + class_ &def_property_readonly(const char *name, const Getter &fget, const Extra& ...extra) { + return def_property_readonly(name, cpp_function(method_adaptor(fget)), + return_value_policy::reference_internal, extra...); + } + + /// Uses cpp_function's return_value_policy by default + template + class_ &def_property_readonly(const char *name, const cpp_function &fget, const Extra& ...extra) { + return def_property(name, fget, nullptr, extra...); + } + + /// Uses return_value_policy::reference by default + template + class_ &def_property_readonly_static(const char *name, const Getter &fget, const Extra& ...extra) { + return def_property_readonly_static(name, cpp_function(fget), return_value_policy::reference, extra...); + } + + /// Uses cpp_function's return_value_policy by default + template + class_ &def_property_readonly_static(const char *name, const cpp_function &fget, const Extra& ...extra) { + return def_property_static(name, fget, nullptr, extra...); + } + + /// Uses return_value_policy::reference_internal by default + template + class_ &def_property(const char *name, const Getter &fget, const Setter &fset, const Extra& ...extra) { + return def_property(name, fget, cpp_function(method_adaptor(fset)), extra...); + } + template + class_ &def_property(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) { + return def_property(name, cpp_function(method_adaptor(fget)), fset, + return_value_policy::reference_internal, extra...); + } + + /// Uses cpp_function's return_value_policy by default + template + class_ &def_property(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) { + return def_property_static(name, fget, fset, is_method(*this), extra...); + } + + /// Uses return_value_policy::reference by default + template + class_ &def_property_static(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) { + return def_property_static(name, cpp_function(fget), fset, return_value_policy::reference, extra...); + } + + /// Uses cpp_function's return_value_policy by default + template + class_ &def_property_static(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) { + static_assert( 0 == detail::constexpr_sum(std::is_base_of::value...), + "Argument annotations are not allowed for properties"); + auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset); + auto *rec_active = rec_fget; + if (rec_fget) { + char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific documentation string */ + detail::process_attributes::init(extra..., rec_fget); + if (rec_fget->doc && rec_fget->doc != doc_prev) { + free(doc_prev); + rec_fget->doc = strdup(rec_fget->doc); + } + } + if (rec_fset) { + char *doc_prev = rec_fset->doc; + detail::process_attributes::init(extra..., rec_fset); + if (rec_fset->doc && rec_fset->doc != doc_prev) { + free(doc_prev); + rec_fset->doc = strdup(rec_fset->doc); + } + if (! rec_active) rec_active = rec_fset; + } + def_property_static_impl(name, fget, fset, rec_active); + return *this; + } + +private: + /// Initialize holder object, variant 1: object derives from enable_shared_from_this + template + static void init_holder(detail::instance *inst, detail::value_and_holder &v_h, + const holder_type * /* unused */, const std::enable_shared_from_this * /* dummy */) { + try { + auto sh = std::dynamic_pointer_cast( + v_h.value_ptr()->shared_from_this()); + if (sh) { + new (std::addressof(v_h.holder())) holder_type(std::move(sh)); + v_h.set_holder_constructed(); + } + } catch (const std::bad_weak_ptr &) {} + + if (!v_h.holder_constructed() && inst->owned) { + new (std::addressof(v_h.holder())) holder_type(v_h.value_ptr()); + v_h.set_holder_constructed(); + } + } + + static void init_holder_from_existing(const detail::value_and_holder &v_h, + const holder_type *holder_ptr, std::true_type /*is_copy_constructible*/) { + new (std::addressof(v_h.holder())) holder_type(*reinterpret_cast(holder_ptr)); + } + + static void init_holder_from_existing(const detail::value_and_holder &v_h, + const holder_type *holder_ptr, std::false_type /*is_copy_constructible*/) { + new (std::addressof(v_h.holder())) holder_type(std::move(*const_cast(holder_ptr))); + } + + /// Initialize holder object, variant 2: try to construct from existing holder object, if possible + static void init_holder(detail::instance *inst, detail::value_and_holder &v_h, + const holder_type *holder_ptr, const void * /* dummy -- not enable_shared_from_this) */) { + if (holder_ptr) { + init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible()); + v_h.set_holder_constructed(); + } else if (inst->owned || detail::always_construct_holder::value) { + new (std::addressof(v_h.holder())) holder_type(v_h.value_ptr()); + v_h.set_holder_constructed(); + } + } + + /// Performs instance initialization including constructing a holder and registering the known + /// instance. Should be called as soon as the `type` value_ptr is set for an instance. Takes an + /// optional pointer to an existing holder to use; if not specified and the instance is + /// `.owned`, a new holder will be constructed to manage the value pointer. + static void init_instance(detail::instance *inst, const void *holder_ptr) { + auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type))); + if (!v_h.instance_registered()) { + register_instance(inst, v_h.value_ptr(), v_h.type); + v_h.set_instance_registered(); + } + init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr()); + } + + /// Deallocates an instance; via holder, if constructed; otherwise via operator delete. + static void dealloc(detail::value_and_holder &v_h) { + // We could be deallocating because we are cleaning up after a Python exception. + // If so, the Python error indicator will be set. We need to clear that before + // running the destructor, in case the destructor code calls more Python. + // If we don't, the Python API will exit with an exception, and pybind11 will + // throw error_already_set from the C++ destructor which is forbidden and triggers + // std::terminate(). + error_scope scope; + if (v_h.holder_constructed()) { + v_h.holder().~holder_type(); + v_h.set_holder_constructed(false); + } + else { + detail::call_operator_delete(v_h.value_ptr(), + v_h.type->type_size, + v_h.type->type_align + ); + } + v_h.value_ptr() = nullptr; + } + + static detail::function_record *get_function_record(handle h) { + h = detail::get_function(h); + return h ? (detail::function_record *) reinterpret_borrow(PyCFunction_GET_SELF(h.ptr())) + : nullptr; + } +}; + +/// Binds an existing constructor taking arguments Args... +template detail::initimpl::constructor init() { return {}; } +/// Like `init()`, but the instance is always constructed through the alias class (even +/// when not inheriting on the Python side). +template detail::initimpl::alias_constructor init_alias() { return {}; } + +/// Binds a factory function as a constructor +template > +Ret init(Func &&f) { return {std::forward(f)}; } + +/// Dual-argument factory function: the first function is called when no alias is needed, the second +/// when an alias is needed (i.e. due to python-side inheritance). Arguments must be identical. +template > +Ret init(CFunc &&c, AFunc &&a) { + return {std::forward(c), std::forward(a)}; +} + +/// Binds pickling functions `__getstate__` and `__setstate__` and ensures that the type +/// returned by `__getstate__` is the same as the argument accepted by `__setstate__`. +template +detail::initimpl::pickle_factory pickle(GetState &&g, SetState &&s) { + return {std::forward(g), std::forward(s)}; +} + +PYBIND11_NAMESPACE_BEGIN(detail) +struct enum_base { + enum_base(handle base, handle parent) : m_base(base), m_parent(parent) { } + + PYBIND11_NOINLINE void init(bool is_arithmetic, bool is_convertible) { + m_base.attr("__entries") = dict(); + auto property = handle((PyObject *) &PyProperty_Type); + auto static_property = handle((PyObject *) get_internals().static_property_type); + + m_base.attr("__repr__") = cpp_function( + [](handle arg) -> str { + handle type = arg.get_type(); + object type_name = type.attr("__name__"); + dict entries = type.attr("__entries"); + for (const auto &kv : entries) { + object other = kv.second[int_(0)]; + if (other.equal(arg)) + return pybind11::str("{}.{}").format(type_name, kv.first); + } + return pybind11::str("{}.???").format(type_name); + }, name("__repr__"), is_method(m_base) + ); + + m_base.attr("name") = property(cpp_function( + [](handle arg) -> str { + dict entries = arg.get_type().attr("__entries"); + for (const auto &kv : entries) { + if (handle(kv.second[int_(0)]).equal(arg)) + return pybind11::str(kv.first); + } + return "???"; + }, name("name"), is_method(m_base) + )); + + m_base.attr("__doc__") = static_property(cpp_function( + [](handle arg) -> std::string { + std::string docstring; + dict entries = arg.attr("__entries"); + if (((PyTypeObject *) arg.ptr())->tp_doc) + docstring += std::string(((PyTypeObject *) arg.ptr())->tp_doc) + "\n\n"; + docstring += "Members:"; + for (const auto &kv : entries) { + auto key = std::string(pybind11::str(kv.first)); + auto comment = kv.second[int_(1)]; + docstring += "\n\n " + key; + if (!comment.is_none()) + docstring += " : " + (std::string) pybind11::str(comment); + } + return docstring; + }, name("__doc__") + ), none(), none(), ""); + + m_base.attr("__members__") = static_property(cpp_function( + [](handle arg) -> dict { + dict entries = arg.attr("__entries"), m; + for (const auto &kv : entries) + m[kv.first] = kv.second[int_(0)]; + return m; + }, name("__members__")), none(), none(), "" + ); + + #define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior) \ + m_base.attr(op) = cpp_function( \ + [](object a, object b) { \ + if (!a.get_type().is(b.get_type())) \ + strict_behavior; \ + return expr; \ + }, \ + name(op), is_method(m_base)) + + #define PYBIND11_ENUM_OP_CONV(op, expr) \ + m_base.attr(op) = cpp_function( \ + [](object a_, object b_) { \ + int_ a(a_), b(b_); \ + return expr; \ + }, \ + name(op), is_method(m_base)) + + #define PYBIND11_ENUM_OP_CONV_LHS(op, expr) \ + m_base.attr(op) = cpp_function( \ + [](object a_, object b) { \ + int_ a(a_); \ + return expr; \ + }, \ + name(op), is_method(m_base)) + + if (is_convertible) { + PYBIND11_ENUM_OP_CONV_LHS("__eq__", !b.is_none() && a.equal(b)); + PYBIND11_ENUM_OP_CONV_LHS("__ne__", b.is_none() || !a.equal(b)); + + if (is_arithmetic) { + PYBIND11_ENUM_OP_CONV("__lt__", a < b); + PYBIND11_ENUM_OP_CONV("__gt__", a > b); + PYBIND11_ENUM_OP_CONV("__le__", a <= b); + PYBIND11_ENUM_OP_CONV("__ge__", a >= b); + PYBIND11_ENUM_OP_CONV("__and__", a & b); + PYBIND11_ENUM_OP_CONV("__rand__", a & b); + PYBIND11_ENUM_OP_CONV("__or__", a | b); + PYBIND11_ENUM_OP_CONV("__ror__", a | b); + PYBIND11_ENUM_OP_CONV("__xor__", a ^ b); + PYBIND11_ENUM_OP_CONV("__rxor__", a ^ b); + m_base.attr("__invert__") = cpp_function( + [](object arg) { return ~(int_(arg)); }, name("__invert__"), is_method(m_base)); + } + } else { + PYBIND11_ENUM_OP_STRICT("__eq__", int_(a).equal(int_(b)), return false); + PYBIND11_ENUM_OP_STRICT("__ne__", !int_(a).equal(int_(b)), return true); + + if (is_arithmetic) { + #define PYBIND11_THROW throw type_error("Expected an enumeration of matching type!"); + PYBIND11_ENUM_OP_STRICT("__lt__", int_(a) < int_(b), PYBIND11_THROW); + PYBIND11_ENUM_OP_STRICT("__gt__", int_(a) > int_(b), PYBIND11_THROW); + PYBIND11_ENUM_OP_STRICT("__le__", int_(a) <= int_(b), PYBIND11_THROW); + PYBIND11_ENUM_OP_STRICT("__ge__", int_(a) >= int_(b), PYBIND11_THROW); + #undef PYBIND11_THROW + } + } + + #undef PYBIND11_ENUM_OP_CONV_LHS + #undef PYBIND11_ENUM_OP_CONV + #undef PYBIND11_ENUM_OP_STRICT + + m_base.attr("__getstate__") = cpp_function( + [](object arg) { return int_(arg); }, name("__getstate__"), is_method(m_base)); + + m_base.attr("__hash__") = cpp_function( + [](object arg) { return int_(arg); }, name("__hash__"), is_method(m_base)); + } + + PYBIND11_NOINLINE void value(char const* name_, object value, const char *doc = nullptr) { + dict entries = m_base.attr("__entries"); + str name(name_); + if (entries.contains(name)) { + std::string type_name = (std::string) str(m_base.attr("__name__")); + throw value_error(type_name + ": element \"" + std::string(name_) + "\" already exists!"); + } + + entries[name] = std::make_pair(value, doc); + m_base.attr(name) = value; + } + + PYBIND11_NOINLINE void export_values() { + dict entries = m_base.attr("__entries"); + for (const auto &kv : entries) + m_parent.attr(kv.first) = kv.second[int_(0)]; + } + + handle m_base; + handle m_parent; +}; + +PYBIND11_NAMESPACE_END(detail) + +/// Binds C++ enumerations and enumeration classes to Python +template class enum_ : public class_ { +public: + using Base = class_; + using Base::def; + using Base::attr; + using Base::def_property_readonly; + using Base::def_property_readonly_static; + using Scalar = typename std::underlying_type::type; + + template + enum_(const handle &scope, const char *name, const Extra&... extra) + : class_(scope, name, extra...), m_base(*this, scope) { + constexpr bool is_arithmetic = detail::any_of...>::value; + constexpr bool is_convertible = std::is_convertible::value; + m_base.init(is_arithmetic, is_convertible); + + def(init([](Scalar i) { return static_cast(i); })); + def("__int__", [](Type value) { return (Scalar) value; }); + #if PY_MAJOR_VERSION < 3 + def("__long__", [](Type value) { return (Scalar) value; }); + #endif + #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 8) + def("__index__", [](Type value) { return (Scalar) value; }); + #endif + + attr("__setstate__") = cpp_function( + [](detail::value_and_holder &v_h, Scalar arg) { + detail::initimpl::setstate(v_h, static_cast(arg), + Py_TYPE(v_h.inst) != v_h.type->type); }, + detail::is_new_style_constructor(), + pybind11::name("__setstate__"), is_method(*this)); + } + + /// Export enumeration entries into the parent scope + enum_& export_values() { + m_base.export_values(); + return *this; + } + + /// Add an enumeration entry + enum_& value(char const* name, Type value, const char *doc = nullptr) { + m_base.value(name, pybind11::cast(value, return_value_policy::copy), doc); + return *this; + } + +private: + detail::enum_base m_base; +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + + +inline void keep_alive_impl(handle nurse, handle patient) { + if (!nurse || !patient) + pybind11_fail("Could not activate keep_alive!"); + + if (patient.is_none() || nurse.is_none()) + return; /* Nothing to keep alive or nothing to be kept alive by */ + + auto tinfo = all_type_info(Py_TYPE(nurse.ptr())); + if (!tinfo.empty()) { + /* It's a pybind-registered type, so we can store the patient in the + * internal list. */ + add_patient(nurse.ptr(), patient.ptr()); + } + else { + /* Fall back to clever approach based on weak references taken from + * Boost.Python. This is not used for pybind-registered types because + * the objects can be destroyed out-of-order in a GC pass. */ + cpp_function disable_lifesupport( + [patient](handle weakref) { patient.dec_ref(); weakref.dec_ref(); }); + + weakref wr(nurse, disable_lifesupport); + + patient.inc_ref(); /* reference patient and leak the weak reference */ + (void) wr.release(); + } +} + +PYBIND11_NOINLINE inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) { + auto get_arg = [&](size_t n) { + if (n == 0) + return ret; + else if (n == 1 && call.init_self) + return call.init_self; + else if (n <= call.args.size()) + return call.args[n - 1]; + return handle(); + }; + + keep_alive_impl(get_arg(Nurse), get_arg(Patient)); +} + +inline std::pair all_type_info_get_cache(PyTypeObject *type) { + auto res = get_internals().registered_types_py +#ifdef __cpp_lib_unordered_map_try_emplace + .try_emplace(type); +#else + .emplace(type, std::vector()); +#endif + if (res.second) { + // New cache entry created; set up a weak reference to automatically remove it if the type + // gets destroyed: + weakref((PyObject *) type, cpp_function([type](handle wr) { + get_internals().registered_types_py.erase(type); + wr.dec_ref(); + })).release(); + } + + return res; +} + +template +struct iterator_state { + Iterator it; + Sentinel end; + bool first_or_done; +}; + +PYBIND11_NAMESPACE_END(detail) + +/// Makes a python iterator from a first and past-the-end C++ InputIterator. +template ()), + typename... Extra> +iterator make_iterator(Iterator first, Sentinel last, Extra &&... extra) { + typedef detail::iterator_state state; + + if (!detail::get_type_info(typeid(state), false)) { + class_(handle(), "iterator", pybind11::module_local()) + .def("__iter__", [](state &s) -> state& { return s; }) + .def("__next__", [](state &s) -> ValueType { + if (!s.first_or_done) + ++s.it; + else + s.first_or_done = false; + if (s.it == s.end) { + s.first_or_done = true; + throw stop_iteration(); + } + return *s.it; + }, std::forward(extra)..., Policy); + } + + return cast(state{first, last, true}); +} + +/// Makes an python iterator over the keys (`.first`) of a iterator over pairs from a +/// first and past-the-end InputIterator. +template ()).first), + typename... Extra> +iterator make_key_iterator(Iterator first, Sentinel last, Extra &&... extra) { + typedef detail::iterator_state state; + + if (!detail::get_type_info(typeid(state), false)) { + class_(handle(), "iterator", pybind11::module_local()) + .def("__iter__", [](state &s) -> state& { return s; }) + .def("__next__", [](state &s) -> KeyType { + if (!s.first_or_done) + ++s.it; + else + s.first_or_done = false; + if (s.it == s.end) { + s.first_or_done = true; + throw stop_iteration(); + } + return (*s.it).first; + }, std::forward(extra)..., Policy); + } + + return cast(state{first, last, true}); +} + +/// Makes an iterator over values of an stl container or other container supporting +/// `std::begin()`/`std::end()` +template iterator make_iterator(Type &value, Extra&&... extra) { + return make_iterator(std::begin(value), std::end(value), extra...); +} + +/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting +/// `std::begin()`/`std::end()` +template iterator make_key_iterator(Type &value, Extra&&... extra) { + return make_key_iterator(std::begin(value), std::end(value), extra...); +} + +template void implicitly_convertible() { + struct set_flag { + bool &flag; + set_flag(bool &flag) : flag(flag) { flag = true; } + ~set_flag() { flag = false; } + }; + auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * { + static bool currently_used = false; + if (currently_used) // implicit conversions are non-reentrant + return nullptr; + set_flag flag_helper(currently_used); + if (!detail::make_caster().load(obj, false)) + return nullptr; + tuple args(1); + args[0] = obj; + PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr); + if (result == nullptr) + PyErr_Clear(); + return result; + }; + + if (auto tinfo = detail::get_type_info(typeid(OutputType))) + tinfo->implicit_conversions.push_back(implicit_caster); + else + pybind11_fail("implicitly_convertible: Unable to find type " + type_id()); +} + +template +void register_exception_translator(ExceptionTranslator&& translator) { + detail::get_internals().registered_exception_translators.push_front( + std::forward(translator)); +} + +/** + * Wrapper to generate a new Python exception type. + * + * This should only be used with PyErr_SetString for now. + * It is not (yet) possible to use as a py::base. + * Template type argument is reserved for future use. + */ +template +class exception : public object { +public: + exception() = default; + exception(handle scope, const char *name, PyObject *base = PyExc_Exception) { + std::string full_name = scope.attr("__name__").cast() + + std::string(".") + name; + m_ptr = PyErr_NewException(const_cast(full_name.c_str()), base, NULL); + if (hasattr(scope, name)) + pybind11_fail("Error during initialization: multiple incompatible " + "definitions with name \"" + std::string(name) + "\""); + scope.attr(name) = *this; + } + + // Sets the current python exception to this exception object with the given message + void operator()(const char *message) { + PyErr_SetString(m_ptr, message); + } +}; + +PYBIND11_NAMESPACE_BEGIN(detail) +// Returns a reference to a function-local static exception object used in the simple +// register_exception approach below. (It would be simpler to have the static local variable +// directly in register_exception, but that makes clang <3.5 segfault - issue #1349). +template +exception &get_exception_object() { static exception ex; return ex; } +PYBIND11_NAMESPACE_END(detail) + +/** + * Registers a Python exception in `m` of the given `name` and installs an exception translator to + * translate the C++ exception to the created Python exception using the exceptions what() method. + * This is intended for simple exception translations; for more complex translation, register the + * exception object and translator directly. + */ +template +exception ®ister_exception(handle scope, + const char *name, + PyObject *base = PyExc_Exception) { + auto &ex = detail::get_exception_object(); + if (!ex) ex = exception(scope, name, base); + + register_exception_translator([](std::exception_ptr p) { + if (!p) return; + try { + std::rethrow_exception(p); + } catch (const CppException &e) { + detail::get_exception_object()(e.what()); + } + }); + return ex; +} + +PYBIND11_NAMESPACE_BEGIN(detail) +PYBIND11_NOINLINE inline void print(tuple args, dict kwargs) { + auto strings = tuple(args.size()); + for (size_t i = 0; i < args.size(); ++i) { + strings[i] = str(args[i]); + } + auto sep = kwargs.contains("sep") ? kwargs["sep"] : cast(" "); + auto line = sep.attr("join")(strings); + + object file; + if (kwargs.contains("file")) { + file = kwargs["file"].cast(); + } else { + try { + file = module::import("sys").attr("stdout"); + } catch (const error_already_set &) { + /* If print() is called from code that is executed as + part of garbage collection during interpreter shutdown, + importing 'sys' can fail. Give up rather than crashing the + interpreter in this case. */ + return; + } + } + + auto write = file.attr("write"); + write(line); + write(kwargs.contains("end") ? kwargs["end"] : cast("\n")); + + if (kwargs.contains("flush") && kwargs["flush"].cast()) + file.attr("flush")(); +} +PYBIND11_NAMESPACE_END(detail) + +template +void print(Args &&...args) { + auto c = detail::collect_arguments(std::forward(args)...); + detail::print(c.args(), c.kwargs()); +} + +#if defined(WITH_THREAD) && !defined(PYPY_VERSION) + +/* The functions below essentially reproduce the PyGILState_* API using a RAII + * pattern, but there are a few important differences: + * + * 1. When acquiring the GIL from an non-main thread during the finalization + * phase, the GILState API blindly terminates the calling thread, which + * is often not what is wanted. This API does not do this. + * + * 2. The gil_scoped_release function can optionally cut the relationship + * of a PyThreadState and its associated thread, which allows moving it to + * another thread (this is a fairly rare/advanced use case). + * + * 3. The reference count of an acquired thread state can be controlled. This + * can be handy to prevent cases where callbacks issued from an external + * thread would otherwise constantly construct and destroy thread state data + * structures. + * + * See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an + * example which uses features 2 and 3 to migrate the Python thread of + * execution to another thread (to run the event loop on the original thread, + * in this case). + */ + +class gil_scoped_acquire { +public: + PYBIND11_NOINLINE gil_scoped_acquire() { + auto const &internals = detail::get_internals(); + tstate = (PyThreadState *) PYBIND11_TLS_GET_VALUE(internals.tstate); + + if (!tstate) { + /* Check if the GIL was acquired using the PyGILState_* API instead (e.g. if + calling from a Python thread). Since we use a different key, this ensures + we don't create a new thread state and deadlock in PyEval_AcquireThread + below. Note we don't save this state with internals.tstate, since we don't + create it we would fail to clear it (its reference count should be > 0). */ + tstate = PyGILState_GetThisThreadState(); + } + + if (!tstate) { + tstate = PyThreadState_New(internals.istate); + #if !defined(NDEBUG) + if (!tstate) + pybind11_fail("scoped_acquire: could not create thread state!"); + #endif + tstate->gilstate_counter = 0; + PYBIND11_TLS_REPLACE_VALUE(internals.tstate, tstate); + } else { + release = detail::get_thread_state_unchecked() != tstate; + } + + if (release) { + /* Work around an annoying assertion in PyThreadState_Swap */ + #if defined(Py_DEBUG) + PyInterpreterState *interp = tstate->interp; + tstate->interp = nullptr; + #endif + PyEval_AcquireThread(tstate); + #if defined(Py_DEBUG) + tstate->interp = interp; + #endif + } + + inc_ref(); + } + + void inc_ref() { + ++tstate->gilstate_counter; + } + + PYBIND11_NOINLINE void dec_ref() { + --tstate->gilstate_counter; + #if !defined(NDEBUG) + if (detail::get_thread_state_unchecked() != tstate) + pybind11_fail("scoped_acquire::dec_ref(): thread state must be current!"); + if (tstate->gilstate_counter < 0) + pybind11_fail("scoped_acquire::dec_ref(): reference count underflow!"); + #endif + if (tstate->gilstate_counter == 0) { + #if !defined(NDEBUG) + if (!release) + pybind11_fail("scoped_acquire::dec_ref(): internal error!"); + #endif + PyThreadState_Clear(tstate); + PyThreadState_DeleteCurrent(); + PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate); + release = false; + } + } + + PYBIND11_NOINLINE ~gil_scoped_acquire() { + dec_ref(); + if (release) + PyEval_SaveThread(); + } +private: + PyThreadState *tstate = nullptr; + bool release = true; +}; + +class gil_scoped_release { +public: + explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) { + // `get_internals()` must be called here unconditionally in order to initialize + // `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an + // initialization race could occur as multiple threads try `gil_scoped_acquire`. + const auto &internals = detail::get_internals(); + tstate = PyEval_SaveThread(); + if (disassoc) { + auto key = internals.tstate; + PYBIND11_TLS_DELETE_VALUE(key); + } + } + ~gil_scoped_release() { + if (!tstate) + return; + PyEval_RestoreThread(tstate); + if (disassoc) { + auto key = detail::get_internals().tstate; + PYBIND11_TLS_REPLACE_VALUE(key, tstate); + } + } +private: + PyThreadState *tstate; + bool disassoc; +}; +#elif defined(PYPY_VERSION) +class gil_scoped_acquire { + PyGILState_STATE state; +public: + gil_scoped_acquire() { state = PyGILState_Ensure(); } + ~gil_scoped_acquire() { PyGILState_Release(state); } +}; + +class gil_scoped_release { + PyThreadState *state; +public: + gil_scoped_release() { state = PyEval_SaveThread(); } + ~gil_scoped_release() { PyEval_RestoreThread(state); } +}; +#else +class gil_scoped_acquire { }; +class gil_scoped_release { }; +#endif + +error_already_set::~error_already_set() { + if (m_type) { + gil_scoped_acquire gil; + error_scope scope; + m_type.release().dec_ref(); + m_value.release().dec_ref(); + m_trace.release().dec_ref(); + } +} + +inline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) { + handle self = detail::get_object_handle(this_ptr, this_type); + if (!self) + return function(); + handle type = self.get_type(); + auto key = std::make_pair(type.ptr(), name); + + /* Cache functions that aren't overloaded in Python to avoid + many costly Python dictionary lookups below */ + auto &cache = detail::get_internals().inactive_overload_cache; + if (cache.find(key) != cache.end()) + return function(); + + function overload = getattr(self, name, function()); + if (overload.is_cpp_function()) { + cache.insert(key); + return function(); + } + + /* Don't call dispatch code if invoked from overridden function. + Unfortunately this doesn't work on PyPy. */ +#if !defined(PYPY_VERSION) + PyFrameObject *frame = PyThreadState_Get()->frame; + if (frame && (std::string) str(frame->f_code->co_name) == name && + frame->f_code->co_argcount > 0) { + PyFrame_FastToLocals(frame); + PyObject *self_caller = PyDict_GetItem( + frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0)); + if (self_caller == self.ptr()) + return function(); + } +#else + /* PyPy currently doesn't provide a detailed cpyext emulation of + frame objects, so we have to emulate this using Python. This + is going to be slow..*/ + dict d; d["self"] = self; d["name"] = pybind11::str(name); + PyObject *result = PyRun_String( + "import inspect\n" + "frame = inspect.currentframe()\n" + "if frame is not None:\n" + " frame = frame.f_back\n" + " if frame is not None and str(frame.f_code.co_name) == name and " + "frame.f_code.co_argcount > 0:\n" + " self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\n" + " if self_caller == self:\n" + " self = None\n", + Py_file_input, d.ptr(), d.ptr()); + if (result == nullptr) + throw error_already_set(); + if (d["self"].is_none()) + return function(); + Py_DECREF(result); +#endif + + return overload; +} + +/** \rst + Try to retrieve a python method by the provided name from the instance pointed to by the this_ptr. + + :this_ptr: The pointer to the object the overload should be retrieved for. This should be the first + non-trampoline class encountered in the inheritance chain. + :name: The name of the overloaded Python method to retrieve. + :return: The Python method by this name from the object or an empty function wrapper. + \endrst */ +template function get_overload(const T *this_ptr, const char *name) { + auto tinfo = detail::get_type_info(typeid(T)); + return tinfo ? get_type_overload(this_ptr, tinfo, name) : function(); +} + +#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) { \ + pybind11::gil_scoped_acquire gil; \ + pybind11::function overload = pybind11::get_overload(static_cast(this), name); \ + if (overload) { \ + auto o = overload(__VA_ARGS__); \ + if (pybind11::detail::cast_is_temporary_value_reference::value) { \ + static pybind11::detail::overload_caster_t caster; \ + return pybind11::detail::cast_ref(std::move(o), caster); \ + } \ + else return pybind11::detail::cast_safe(std::move(o)); \ + } \ + } + +/** \rst + Macro to populate the virtual method in the trampoline class. This macro tries to look up a method named 'fn' + from the Python side, deals with the :ref:`gil` and necessary argument conversions to call this method and return + the appropriate type. See :ref:`overriding_virtuals` for more information. This macro should be used when the method + name in C is not the same as the method name in Python. For example with `__str__`. + + .. code-block:: cpp + + std::string toString() override { + PYBIND11_OVERLOAD_NAME( + std::string, // Return type (ret_type) + Animal, // Parent class (cname) + "__str__", // Name of method in Python (name) + toString, // Name of function in C++ (fn) + ); + } +\endrst */ +#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \ + PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \ + return cname::fn(__VA_ARGS__) + +/** \rst + Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERLOAD_NAME`, except that it + throws if no overload can be found. +\endrst */ +#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \ + PYBIND11_OVERLOAD_INT(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__) \ + pybind11::pybind11_fail("Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\""); + +/** \rst + Macro to populate the virtual method in the trampoline class. This macro tries to look up the method + from the Python side, deals with the :ref:`gil` and necessary argument conversions to call this method and return + the appropriate type. This macro should be used if the method name in C and in Python are identical. + See :ref:`overriding_virtuals` for more information. + + .. code-block:: cpp + + class PyAnimal : public Animal { + public: + // Inherit the constructors + using Animal::Animal; + + // Trampoline (need one for each virtual function) + std::string go(int n_times) override { + PYBIND11_OVERLOAD_PURE( + std::string, // Return type (ret_type) + Animal, // Parent class (cname) + go, // Name of function in C++ (must match Python name) (fn) + n_times // Argument(s) (...) + ); + } + }; +\endrst */ +#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \ + PYBIND11_OVERLOAD_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__) + +/** \rst + Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERLOAD`, except that it throws + if no overload can be found. +\endrst */ +#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \ + PYBIND11_OVERLOAD_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__) + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +# pragma warning(pop) +#elif defined(__GNUG__) && !defined(__clang__) +# pragma GCC diagnostic pop +#endif diff --git a/diffvg/pybind11/include/pybind11/pytypes.h b/diffvg/pybind11/include/pybind11/pytypes.h new file mode 100644 index 0000000000000000000000000000000000000000..bea34cd9365c5191be29e986480c8434a8e0201e --- /dev/null +++ b/diffvg/pybind11/include/pybind11/pytypes.h @@ -0,0 +1,1608 @@ +/* + pybind11/pytypes.h: Convenience wrapper classes for basic Python types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "buffer_info.h" +#include +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/* A few forward declarations */ +class handle; class object; +class str; class iterator; +struct arg; struct arg_v; + +PYBIND11_NAMESPACE_BEGIN(detail) +class args_proxy; +inline bool isinstance_generic(handle obj, const std::type_info &tp); + +// Accessor forward declarations +template class accessor; +namespace accessor_policies { + struct obj_attr; + struct str_attr; + struct generic_item; + struct sequence_item; + struct list_item; + struct tuple_item; +} +using obj_attr_accessor = accessor; +using str_attr_accessor = accessor; +using item_accessor = accessor; +using sequence_accessor = accessor; +using list_accessor = accessor; +using tuple_accessor = accessor; + +/// Tag and check to identify a class which implements the Python object API +class pyobject_tag { }; +template using is_pyobject = std::is_base_of>; + +/** \rst + A mixin class which adds common functions to `handle`, `object` and various accessors. + The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``. +\endrst */ +template +class object_api : public pyobject_tag { + const Derived &derived() const { return static_cast(*this); } + +public: + /** \rst + Return an iterator equivalent to calling ``iter()`` in Python. The object + must be a collection which supports the iteration protocol. + \endrst */ + iterator begin() const; + /// Return a sentinel which ends iteration. + iterator end() const; + + /** \rst + Return an internal functor to invoke the object's sequence protocol. Casting + the returned ``detail::item_accessor`` instance to a `handle` or `object` + subclass causes a corresponding call to ``__getitem__``. Assigning a `handle` + or `object` subclass causes a call to ``__setitem__``. + \endrst */ + item_accessor operator[](handle key) const; + /// See above (the only difference is that they key is provided as a string literal) + item_accessor operator[](const char *key) const; + + /** \rst + Return an internal functor to access the object's attributes. Casting the + returned ``detail::obj_attr_accessor`` instance to a `handle` or `object` + subclass causes a corresponding call to ``getattr``. Assigning a `handle` + or `object` subclass causes a call to ``setattr``. + \endrst */ + obj_attr_accessor attr(handle key) const; + /// See above (the only difference is that they key is provided as a string literal) + str_attr_accessor attr(const char *key) const; + + /** \rst + Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple`` + or ``list`` for a function call. Applying another * to the result yields + ** unpacking, e.g. to unpack a dict as function keyword arguments. + See :ref:`calling_python_functions`. + \endrst */ + args_proxy operator*() const; + + /// Check if the given item is contained within this object, i.e. ``item in obj``. + template bool contains(T &&item) const; + + /** \rst + Assuming the Python object is a function or implements the ``__call__`` + protocol, ``operator()`` invokes the underlying function, passing an + arbitrary set of parameters. The result is returned as a `object` and + may need to be converted back into a Python object using `handle::cast()`. + + When some of the arguments cannot be converted to Python objects, the + function will throw a `cast_error` exception. When the Python function + call fails, a `error_already_set` exception is thrown. + \endrst */ + template + object operator()(Args &&...args) const; + template + PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)") + object call(Args&&... args) const; + + /// Equivalent to ``obj is other`` in Python. + bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); } + /// Equivalent to ``obj is None`` in Python. + bool is_none() const { return derived().ptr() == Py_None; } + /// Equivalent to obj == other in Python + bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); } + bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); } + bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); } + bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); } + bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); } + bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); } + + object operator-() const; + object operator~() const; + object operator+(object_api const &other) const; + object operator+=(object_api const &other) const; + object operator-(object_api const &other) const; + object operator-=(object_api const &other) const; + object operator*(object_api const &other) const; + object operator*=(object_api const &other) const; + object operator/(object_api const &other) const; + object operator/=(object_api const &other) const; + object operator|(object_api const &other) const; + object operator|=(object_api const &other) const; + object operator&(object_api const &other) const; + object operator&=(object_api const &other) const; + object operator^(object_api const &other) const; + object operator^=(object_api const &other) const; + object operator<<(object_api const &other) const; + object operator<<=(object_api const &other) const; + object operator>>(object_api const &other) const; + object operator>>=(object_api const &other) const; + + PYBIND11_DEPRECATED("Use py::str(obj) instead") + pybind11::str str() const; + + /// Get or set the object's docstring, i.e. ``obj.__doc__``. + str_attr_accessor doc() const; + + /// Return the object's current reference count + int ref_count() const { return static_cast(Py_REFCNT(derived().ptr())); } + /// Return a handle to the Python type object underlying the instance + handle get_type() const; + +private: + bool rich_compare(object_api const &other, int value) const; +}; + +PYBIND11_NAMESPACE_END(detail) + +/** \rst + Holds a reference to a Python object (no reference counting) + + The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a + ``PyObject *`` in Python's C API). It does not perform any automatic reference + counting and merely provides a basic C++ interface to various Python API functions. + + .. seealso:: + The `object` class inherits from `handle` and adds automatic reference + counting features. +\endrst */ +class handle : public detail::object_api { +public: + /// The default constructor creates a handle with a ``nullptr``-valued pointer + handle() = default; + /// Creates a ``handle`` from the given raw Python object pointer + handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject* + + /// Return the underlying ``PyObject *`` pointer + PyObject *ptr() const { return m_ptr; } + PyObject *&ptr() { return m_ptr; } + + /** \rst + Manually increase the reference count of the Python object. Usually, it is + preferable to use the `object` class which derives from `handle` and calls + this function automatically. Returns a reference to itself. + \endrst */ + const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; } + + /** \rst + Manually decrease the reference count of the Python object. Usually, it is + preferable to use the `object` class which derives from `handle` and calls + this function automatically. Returns a reference to itself. + \endrst */ + const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; } + + /** \rst + Attempt to cast the Python object into the given C++ type. A `cast_error` + will be throw upon failure. + \endrst */ + template T cast() const; + /// Return ``true`` when the `handle` wraps a valid Python object + explicit operator bool() const { return m_ptr != nullptr; } + /** \rst + Deprecated: Check that the underlying pointers are the same. + Equivalent to ``obj1 is obj2`` in Python. + \endrst */ + PYBIND11_DEPRECATED("Use obj1.is(obj2) instead") + bool operator==(const handle &h) const { return m_ptr == h.m_ptr; } + PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead") + bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; } + PYBIND11_DEPRECATED("Use handle::operator bool() instead") + bool check() const { return m_ptr != nullptr; } +protected: + PyObject *m_ptr = nullptr; +}; + +/** \rst + Holds a reference to a Python object (with reference counting) + + Like `handle`, the `object` class is a thin wrapper around an arbitrary Python + object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it + optionally increases the object's reference count upon construction, and it + *always* decreases the reference count when the `object` instance goes out of + scope and is destructed. When using `object` instances consistently, it is much + easier to get reference counting right at the first attempt. +\endrst */ +class object : public handle { +public: + object() = default; + PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") + object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); } + /// Copy constructor; always increases the reference count + object(const object &o) : handle(o) { inc_ref(); } + /// Move constructor; steals the object from ``other`` and preserves its reference count + object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; } + /// Destructor; automatically calls `handle::dec_ref()` + ~object() { dec_ref(); } + + /** \rst + Resets the internal pointer to ``nullptr`` without decreasing the + object's reference count. The function returns a raw handle to the original + Python object. + \endrst */ + handle release() { + PyObject *tmp = m_ptr; + m_ptr = nullptr; + return handle(tmp); + } + + object& operator=(const object &other) { + other.inc_ref(); + dec_ref(); + m_ptr = other.m_ptr; + return *this; + } + + object& operator=(object &&other) noexcept { + if (this != &other) { + handle temp(m_ptr); + m_ptr = other.m_ptr; + other.m_ptr = nullptr; + temp.dec_ref(); + } + return *this; + } + + // Calling cast() on an object lvalue just copies (via handle::cast) + template T cast() const &; + // Calling on an object rvalue does a move, if needed and/or possible + template T cast() &&; + +protected: + // Tags for choosing constructors from raw PyObject * + struct borrowed_t { }; + struct stolen_t { }; + + template friend T reinterpret_borrow(handle); + template friend T reinterpret_steal(handle); + +public: + // Only accessible from derived classes and the reinterpret_* functions + object(handle h, borrowed_t) : handle(h) { inc_ref(); } + object(handle h, stolen_t) : handle(h) { } +}; + +/** \rst + Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference. + The target type ``T`` must be `object` or one of its derived classes. The function + doesn't do any conversions or checks. It's up to the user to make sure that the + target type is correct. + + .. code-block:: cpp + + PyObject *p = PyList_GetItem(obj, index); + py::object o = reinterpret_borrow(p); + // or + py::tuple t = reinterpret_borrow(p); // <-- `p` must be already be a `tuple` +\endrst */ +template T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; } + +/** \rst + Like `reinterpret_borrow`, but steals the reference. + + .. code-block:: cpp + + PyObject *p = PyObject_Str(obj); + py::str s = reinterpret_steal(p); // <-- `p` must be already be a `str` +\endrst */ +template T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; } + +PYBIND11_NAMESPACE_BEGIN(detail) +inline std::string error_string(); +PYBIND11_NAMESPACE_END(detail) + +/// Fetch and hold an error which was already set in Python. An instance of this is typically +/// thrown to propagate python-side errors back through C++ which can either be caught manually or +/// else falls back to the function dispatcher (which then raises the captured error back to +/// python). +class error_already_set : public std::runtime_error { +public: + /// Constructs a new exception from the current Python error indicator, if any. The current + /// Python error indicator will be cleared. + error_already_set() : std::runtime_error(detail::error_string()) { + PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr()); + } + + error_already_set(const error_already_set &) = default; + error_already_set(error_already_set &&) = default; + + inline ~error_already_set(); + + /// Give the currently-held error back to Python, if any. If there is currently a Python error + /// already set it is cleared first. After this call, the current object no longer stores the + /// error variables (but the `.what()` string is still available). + void restore() { PyErr_Restore(m_type.release().ptr(), m_value.release().ptr(), m_trace.release().ptr()); } + + /// If it is impossible to raise the currently-held error, such as in destructor, we can write + /// it out using Python's unraisable hook (sys.unraisablehook). The error context should be + /// some object whose repr() helps identify the location of the error. Python already knows the + /// type and value of the error, so there is no need to repeat that. For example, __func__ could + /// be helpful. After this call, the current object no longer stores the error variables, + /// and neither does Python. + void discard_as_unraisable(object err_context) { + restore(); + PyErr_WriteUnraisable(err_context.ptr()); + } + void discard_as_unraisable(const char *err_context) { + discard_as_unraisable(reinterpret_steal(PYBIND11_FROM_STRING(err_context))); + } + + // Does nothing; provided for backwards compatibility. + PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated") + void clear() {} + + /// Check if the currently trapped error type matches the given Python exception class (or a + /// subclass thereof). May also be passed a tuple to search for any exception class matches in + /// the given tuple. + bool matches(handle exc) const { return PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()); } + + const object& type() const { return m_type; } + const object& value() const { return m_value; } + const object& trace() const { return m_trace; } + +private: + object m_type, m_value, m_trace; +}; + +/** \defgroup python_builtins _ + Unless stated otherwise, the following C++ functions behave the same + as their Python counterparts. + */ + +/** \ingroup python_builtins + \rst + Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of + `object` or a class which was exposed to Python as ``py::class_``. +\endrst */ +template ::value, int> = 0> +bool isinstance(handle obj) { return T::check_(obj); } + +template ::value, int> = 0> +bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); } + +template <> inline bool isinstance(handle) = delete; +template <> inline bool isinstance(handle obj) { return obj.ptr() != nullptr; } + +/// \ingroup python_builtins +/// Return true if ``obj`` is an instance of the ``type``. +inline bool isinstance(handle obj, handle type) { + const auto result = PyObject_IsInstance(obj.ptr(), type.ptr()); + if (result == -1) + throw error_already_set(); + return result != 0; +} + +/// \addtogroup python_builtins +/// @{ +inline bool hasattr(handle obj, handle name) { + return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1; +} + +inline bool hasattr(handle obj, const char *name) { + return PyObject_HasAttrString(obj.ptr(), name) == 1; +} + +inline void delattr(handle obj, handle name) { + if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) { throw error_already_set(); } +} + +inline void delattr(handle obj, const char *name) { + if (PyObject_DelAttrString(obj.ptr(), name) != 0) { throw error_already_set(); } +} + +inline object getattr(handle obj, handle name) { + PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr()); + if (!result) { throw error_already_set(); } + return reinterpret_steal(result); +} + +inline object getattr(handle obj, const char *name) { + PyObject *result = PyObject_GetAttrString(obj.ptr(), name); + if (!result) { throw error_already_set(); } + return reinterpret_steal(result); +} + +inline object getattr(handle obj, handle name, handle default_) { + if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) { + return reinterpret_steal(result); + } else { + PyErr_Clear(); + return reinterpret_borrow(default_); + } +} + +inline object getattr(handle obj, const char *name, handle default_) { + if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) { + return reinterpret_steal(result); + } else { + PyErr_Clear(); + return reinterpret_borrow(default_); + } +} + +inline void setattr(handle obj, handle name, handle value) { + if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); } +} + +inline void setattr(handle obj, const char *name, handle value) { + if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); } +} + +inline ssize_t hash(handle obj) { + auto h = PyObject_Hash(obj.ptr()); + if (h == -1) { throw error_already_set(); } + return h; +} + +/// @} python_builtins + +PYBIND11_NAMESPACE_BEGIN(detail) +inline handle get_function(handle value) { + if (value) { +#if PY_MAJOR_VERSION >= 3 + if (PyInstanceMethod_Check(value.ptr())) + value = PyInstanceMethod_GET_FUNCTION(value.ptr()); + else +#endif + if (PyMethod_Check(value.ptr())) + value = PyMethod_GET_FUNCTION(value.ptr()); + } + return value; +} + +// Helper aliases/functions to support implicit casting of values given to python accessors/methods. +// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes +// through pybind11::cast(obj) to convert it to an `object`. +template ::value, int> = 0> +auto object_or_cast(T &&o) -> decltype(std::forward(o)) { return std::forward(o); } +// The following casting version is implemented in cast.h: +template ::value, int> = 0> +object object_or_cast(T &&o); +// Match a PyObject*, which we want to convert directly to handle via its converting constructor +inline handle object_or_cast(PyObject *ptr) { return ptr; } + +template +class accessor : public object_api> { + using key_type = typename Policy::key_type; + +public: + accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { } + accessor(const accessor &) = default; + accessor(accessor &&) = default; + + // accessor overload required to override default assignment operator (templates are not allowed + // to replace default compiler-generated assignments). + void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); } + void operator=(const accessor &a) & { operator=(handle(a)); } + + template void operator=(T &&value) && { + Policy::set(obj, key, object_or_cast(std::forward(value))); + } + template void operator=(T &&value) & { + get_cache() = reinterpret_borrow(object_or_cast(std::forward(value))); + } + + template + PYBIND11_DEPRECATED("Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)") + explicit operator enable_if_t::value || + std::is_same::value, bool>() const { + return hasattr(obj, key); + } + template + PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)") + explicit operator enable_if_t::value, bool>() const { + return obj.contains(key); + } + + operator object() const { return get_cache(); } + PyObject *ptr() const { return get_cache().ptr(); } + template T cast() const { return get_cache().template cast(); } + +private: + object &get_cache() const { + if (!cache) { cache = Policy::get(obj, key); } + return cache; + } + +private: + handle obj; + key_type key; + mutable object cache; +}; + +PYBIND11_NAMESPACE_BEGIN(accessor_policies) +struct obj_attr { + using key_type = object; + static object get(handle obj, handle key) { return getattr(obj, key); } + static void set(handle obj, handle key, handle val) { setattr(obj, key, val); } +}; + +struct str_attr { + using key_type = const char *; + static object get(handle obj, const char *key) { return getattr(obj, key); } + static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); } +}; + +struct generic_item { + using key_type = object; + + static object get(handle obj, handle key) { + PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr()); + if (!result) { throw error_already_set(); } + return reinterpret_steal(result); + } + + static void set(handle obj, handle key, handle val) { + if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); } + } +}; + +struct sequence_item { + using key_type = size_t; + + static object get(handle obj, size_t index) { + PyObject *result = PySequence_GetItem(obj.ptr(), static_cast(index)); + if (!result) { throw error_already_set(); } + return reinterpret_steal(result); + } + + static void set(handle obj, size_t index, handle val) { + // PySequence_SetItem does not steal a reference to 'val' + if (PySequence_SetItem(obj.ptr(), static_cast(index), val.ptr()) != 0) { + throw error_already_set(); + } + } +}; + +struct list_item { + using key_type = size_t; + + static object get(handle obj, size_t index) { + PyObject *result = PyList_GetItem(obj.ptr(), static_cast(index)); + if (!result) { throw error_already_set(); } + return reinterpret_borrow(result); + } + + static void set(handle obj, size_t index, handle val) { + // PyList_SetItem steals a reference to 'val' + if (PyList_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { + throw error_already_set(); + } + } +}; + +struct tuple_item { + using key_type = size_t; + + static object get(handle obj, size_t index) { + PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast(index)); + if (!result) { throw error_already_set(); } + return reinterpret_borrow(result); + } + + static void set(handle obj, size_t index, handle val) { + // PyTuple_SetItem steals a reference to 'val' + if (PyTuple_SetItem(obj.ptr(), static_cast(index), val.inc_ref().ptr()) != 0) { + throw error_already_set(); + } + } +}; +PYBIND11_NAMESPACE_END(accessor_policies) + +/// STL iterator template used for tuple, list, sequence and dict +template +class generic_iterator : public Policy { + using It = generic_iterator; + +public: + using difference_type = ssize_t; + using iterator_category = typename Policy::iterator_category; + using value_type = typename Policy::value_type; + using reference = typename Policy::reference; + using pointer = typename Policy::pointer; + + generic_iterator() = default; + generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { } + + reference operator*() const { return Policy::dereference(); } + reference operator[](difference_type n) const { return *(*this + n); } + pointer operator->() const { return **this; } + + It &operator++() { Policy::increment(); return *this; } + It operator++(int) { auto copy = *this; Policy::increment(); return copy; } + It &operator--() { Policy::decrement(); return *this; } + It operator--(int) { auto copy = *this; Policy::decrement(); return copy; } + It &operator+=(difference_type n) { Policy::advance(n); return *this; } + It &operator-=(difference_type n) { Policy::advance(-n); return *this; } + + friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; } + friend It operator+(difference_type n, const It &b) { return b + n; } + friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; } + friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); } + + friend bool operator==(const It &a, const It &b) { return a.equal(b); } + friend bool operator!=(const It &a, const It &b) { return !(a == b); } + friend bool operator< (const It &a, const It &b) { return b - a > 0; } + friend bool operator> (const It &a, const It &b) { return b < a; } + friend bool operator>=(const It &a, const It &b) { return !(a < b); } + friend bool operator<=(const It &a, const It &b) { return !(a > b); } +}; + +PYBIND11_NAMESPACE_BEGIN(iterator_policies) +/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers +template +struct arrow_proxy { + T value; + + arrow_proxy(T &&value) : value(std::move(value)) { } + T *operator->() const { return &value; } +}; + +/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS`` +class sequence_fast_readonly { +protected: + using iterator_category = std::random_access_iterator_tag; + using value_type = handle; + using reference = const handle; + using pointer = arrow_proxy; + + sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { } + + reference dereference() const { return *ptr; } + void increment() { ++ptr; } + void decrement() { --ptr; } + void advance(ssize_t n) { ptr += n; } + bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; } + ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; } + +private: + PyObject **ptr; +}; + +/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor`` +class sequence_slow_readwrite { +protected: + using iterator_category = std::random_access_iterator_tag; + using value_type = object; + using reference = sequence_accessor; + using pointer = arrow_proxy; + + sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { } + + reference dereference() const { return {obj, static_cast(index)}; } + void increment() { ++index; } + void decrement() { --index; } + void advance(ssize_t n) { index += n; } + bool equal(const sequence_slow_readwrite &b) const { return index == b.index; } + ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; } + +private: + handle obj; + ssize_t index; +}; + +/// Python's dictionary protocol permits this to be a forward iterator +class dict_readonly { +protected: + using iterator_category = std::forward_iterator_tag; + using value_type = std::pair; + using reference = const value_type; + using pointer = arrow_proxy; + + dict_readonly() = default; + dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); } + + reference dereference() const { return {key, value}; } + void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } } + bool equal(const dict_readonly &b) const { return pos == b.pos; } + +private: + handle obj; + PyObject *key = nullptr, *value = nullptr; + ssize_t pos = -1; +}; +PYBIND11_NAMESPACE_END(iterator_policies) + +#if !defined(PYPY_VERSION) +using tuple_iterator = generic_iterator; +using list_iterator = generic_iterator; +#else +using tuple_iterator = generic_iterator; +using list_iterator = generic_iterator; +#endif + +using sequence_iterator = generic_iterator; +using dict_iterator = generic_iterator; + +inline bool PyIterable_Check(PyObject *obj) { + PyObject *iter = PyObject_GetIter(obj); + if (iter) { + Py_DECREF(iter); + return true; + } else { + PyErr_Clear(); + return false; + } +} + +inline bool PyNone_Check(PyObject *o) { return o == Py_None; } +inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; } + +inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); } + +inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; } + +class kwargs_proxy : public handle { +public: + explicit kwargs_proxy(handle h) : handle(h) { } +}; + +class args_proxy : public handle { +public: + explicit args_proxy(handle h) : handle(h) { } + kwargs_proxy operator*() const { return kwargs_proxy(*this); } +}; + +/// Python argument categories (using PEP 448 terms) +template using is_keyword = std::is_base_of; +template using is_s_unpacking = std::is_same; // * unpacking +template using is_ds_unpacking = std::is_same; // ** unpacking +template using is_positional = satisfies_none_of; +template using is_keyword_or_ds = satisfies_any_of; + +// Call argument collector forward declarations +template +class simple_collector; +template +class unpacking_collector; + +PYBIND11_NAMESPACE_END(detail) + +// TODO: After the deprecated constructors are removed, this macro can be simplified by +// inheriting ctors: `using Parent::Parent`. It's not an option right now because +// the `using` statement triggers the parent deprecation warning even if the ctor +// isn't even used. +#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ + public: \ + PYBIND11_DEPRECATED("Use reinterpret_borrow<"#Name">() or reinterpret_steal<"#Name">()") \ + Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \ + Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \ + Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \ + PYBIND11_DEPRECATED("Use py::isinstance(obj) instead") \ + bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \ + static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \ + template \ + Name(const ::pybind11::detail::accessor &a) : Name(object(a)) { } + +#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \ + PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ + /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ + Name(const object &o) \ + : Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) \ + { if (!m_ptr) throw error_already_set(); } \ + Name(object &&o) \ + : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \ + { if (!m_ptr) throw error_already_set(); } + +#define PYBIND11_OBJECT(Name, Parent, CheckFun) \ + PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \ + /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \ + Name(const object &o) : Parent(o) { } \ + Name(object &&o) : Parent(std::move(o)) { } + +#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \ + PYBIND11_OBJECT(Name, Parent, CheckFun) \ + Name() : Parent() { } + +/// \addtogroup pytypes +/// @{ + +/** \rst + Wraps a Python iterator so that it can also be used as a C++ input iterator + + Caveat: copying an iterator does not (and cannot) clone the internal + state of the Python iterable. This also applies to the post-increment + operator. This iterator should only be used to retrieve the current + value using ``operator*()``. +\endrst */ +class iterator : public object { +public: + using iterator_category = std::input_iterator_tag; + using difference_type = ssize_t; + using value_type = handle; + using reference = const handle; + using pointer = const handle *; + + PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check) + + iterator& operator++() { + advance(); + return *this; + } + + iterator operator++(int) { + auto rv = *this; + advance(); + return rv; + } + + reference operator*() const { + if (m_ptr && !value.ptr()) { + auto& self = const_cast(*this); + self.advance(); + } + return value; + } + + pointer operator->() const { operator*(); return &value; } + + /** \rst + The value which marks the end of the iteration. ``it == iterator::sentinel()`` + is equivalent to catching ``StopIteration`` in Python. + + .. code-block:: cpp + + void foo(py::iterator it) { + while (it != py::iterator::sentinel()) { + // use `*it` + ++it; + } + } + \endrst */ + static iterator sentinel() { return {}; } + + friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); } + friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); } + +private: + void advance() { + value = reinterpret_steal(PyIter_Next(m_ptr)); + if (PyErr_Occurred()) { throw error_already_set(); } + } + +private: + object value = {}; +}; + +class iterable : public object { +public: + PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check) +}; + +class bytes; + +class str : public object { +public: + PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str) + + str(const char *c, size_t n) + : object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate string object!"); + } + + // 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects + str(const char *c = "") + : object(PyUnicode_FromString(c), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate string object!"); + } + + str(const std::string &s) : str(s.data(), s.size()) { } + + explicit str(const bytes &b); + + /** \rst + Return a string representation of the object. This is analogous to + the ``str()`` function in Python. + \endrst */ + explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { } + + operator std::string() const { + object temp = *this; + if (PyUnicode_Check(m_ptr)) { + temp = reinterpret_steal(PyUnicode_AsUTF8String(m_ptr)); + if (!temp) + pybind11_fail("Unable to extract string contents! (encoding issue)"); + } + char *buffer; + ssize_t length; + if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) + pybind11_fail("Unable to extract string contents! (invalid type)"); + return std::string(buffer, (size_t) length); + } + + template + str format(Args &&...args) const { + return attr("format")(std::forward(args)...); + } + +private: + /// Return string representation -- always returns a new reference, even if already a str + static PyObject *raw_str(PyObject *op) { + PyObject *str_value = PyObject_Str(op); + if (!str_value) throw error_already_set(); +#if PY_MAJOR_VERSION < 3 + PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); + Py_XDECREF(str_value); str_value = unicode; +#endif + return str_value; + } +}; +/// @} pytypes + +inline namespace literals { +/** \rst + String literal version of `str` + \endrst */ +inline str operator"" _s(const char *s, size_t size) { return {s, size}; } +} + +/// \addtogroup pytypes +/// @{ +class bytes : public object { +public: + PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK) + + // Allow implicit conversion: + bytes(const char *c = "") + : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); + } + + bytes(const char *c, size_t n) + : object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate bytes object!"); + } + + // Allow implicit conversion: + bytes(const std::string &s) : bytes(s.data(), s.size()) { } + + explicit bytes(const pybind11::str &s); + + operator std::string() const { + char *buffer; + ssize_t length; + if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length)) + pybind11_fail("Unable to extract bytes contents!"); + return std::string(buffer, (size_t) length); + } +}; +// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors +// are included in the doxygen group; close here and reopen after as a workaround +/// @} pytypes + +inline bytes::bytes(const pybind11::str &s) { + object temp = s; + if (PyUnicode_Check(s.ptr())) { + temp = reinterpret_steal(PyUnicode_AsUTF8String(s.ptr())); + if (!temp) + pybind11_fail("Unable to extract string contents! (encoding issue)"); + } + char *buffer; + ssize_t length; + if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length)) + pybind11_fail("Unable to extract string contents! (invalid type)"); + auto obj = reinterpret_steal(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length)); + if (!obj) + pybind11_fail("Could not allocate bytes object!"); + m_ptr = obj.release().ptr(); +} + +inline str::str(const bytes& b) { + char *buffer; + ssize_t length; + if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length)) + pybind11_fail("Unable to extract bytes contents!"); + auto obj = reinterpret_steal(PyUnicode_FromStringAndSize(buffer, (ssize_t) length)); + if (!obj) + pybind11_fail("Could not allocate string object!"); + m_ptr = obj.release().ptr(); +} + +/// \addtogroup pytypes +/// @{ +class none : public object { +public: + PYBIND11_OBJECT(none, object, detail::PyNone_Check) + none() : object(Py_None, borrowed_t{}) { } +}; + +class ellipsis : public object { +public: + PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check) + ellipsis() : object(Py_Ellipsis, borrowed_t{}) { } +}; + +class bool_ : public object { +public: + PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool) + bool_() : object(Py_False, borrowed_t{}) { } + // Allow implicit conversion from and to `bool`: + bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { } + operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; } + +private: + /// Return the truth value of an object -- always returns a new reference + static PyObject *raw_bool(PyObject *op) { + const auto value = PyObject_IsTrue(op); + if (value == -1) return nullptr; + return handle(value ? Py_True : Py_False).inc_ref().ptr(); + } +}; + +PYBIND11_NAMESPACE_BEGIN(detail) +// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1; +// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned). +// (The distinction is critically important when casting a returned -1 error value to some other +// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes). +template +Unsigned as_unsigned(PyObject *o) { + if (sizeof(Unsigned) <= sizeof(unsigned long) +#if PY_VERSION_HEX < 0x03000000 + || PyInt_Check(o) +#endif + ) { + unsigned long v = PyLong_AsUnsignedLong(o); + return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; + } + else { + unsigned long long v = PyLong_AsUnsignedLongLong(o); + return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v; + } +} +PYBIND11_NAMESPACE_END(detail) + +class int_ : public object { +public: + PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long) + int_() : object(PyLong_FromLong(0), stolen_t{}) { } + // Allow implicit conversion from C++ integral types: + template ::value, int> = 0> + int_(T value) { + if (sizeof(T) <= sizeof(long)) { + if (std::is_signed::value) + m_ptr = PyLong_FromLong((long) value); + else + m_ptr = PyLong_FromUnsignedLong((unsigned long) value); + } else { + if (std::is_signed::value) + m_ptr = PyLong_FromLongLong((long long) value); + else + m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value); + } + if (!m_ptr) pybind11_fail("Could not allocate int object!"); + } + + template ::value, int> = 0> + operator T() const { + return std::is_unsigned::value + ? detail::as_unsigned(m_ptr) + : sizeof(T) <= sizeof(long) + ? (T) PyLong_AsLong(m_ptr) + : (T) PYBIND11_LONG_AS_LONGLONG(m_ptr); + } +}; + +class float_ : public object { +public: + PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float) + // Allow implicit conversion from float/double: + float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate float object!"); + } + float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate float object!"); + } + operator float() const { return (float) PyFloat_AsDouble(m_ptr); } + operator double() const { return (double) PyFloat_AsDouble(m_ptr); } +}; + +class weakref : public object { +public: + PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check) + explicit weakref(handle obj, handle callback = {}) + : object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate weak reference!"); + } +}; + +class slice : public object { +public: + PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check) + slice(ssize_t start_, ssize_t stop_, ssize_t step_) { + int_ start(start_), stop(stop_), step(step_); + m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr()); + if (!m_ptr) pybind11_fail("Could not allocate slice object!"); + } + bool compute(size_t length, size_t *start, size_t *stop, size_t *step, + size_t *slicelength) const { + return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr, + (ssize_t) length, (ssize_t *) start, + (ssize_t *) stop, (ssize_t *) step, + (ssize_t *) slicelength) == 0; + } + bool compute(ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step, + ssize_t *slicelength) const { + return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr, + length, start, + stop, step, + slicelength) == 0; + } +}; + +class capsule : public object { +public: + PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact) + PYBIND11_DEPRECATED("Use reinterpret_borrow() or reinterpret_steal()") + capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { } + + explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr) + : object(PyCapsule_New(const_cast(value), name, destructor), stolen_t{}) { + if (!m_ptr) + pybind11_fail("Could not allocate capsule object!"); + } + + PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input") + capsule(const void *value, void (*destruct)(PyObject *)) + : object(PyCapsule_New(const_cast(value), nullptr, destruct), stolen_t{}) { + if (!m_ptr) + pybind11_fail("Could not allocate capsule object!"); + } + + capsule(const void *value, void (*destructor)(void *)) { + m_ptr = PyCapsule_New(const_cast(value), nullptr, [](PyObject *o) { + auto destructor = reinterpret_cast(PyCapsule_GetContext(o)); + void *ptr = PyCapsule_GetPointer(o, nullptr); + destructor(ptr); + }); + + if (!m_ptr) + pybind11_fail("Could not allocate capsule object!"); + + if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0) + pybind11_fail("Could not set capsule context!"); + } + + capsule(void (*destructor)()) { + m_ptr = PyCapsule_New(reinterpret_cast(destructor), nullptr, [](PyObject *o) { + auto destructor = reinterpret_cast(PyCapsule_GetPointer(o, nullptr)); + destructor(); + }); + + if (!m_ptr) + pybind11_fail("Could not allocate capsule object!"); + } + + template operator T *() const { + auto name = this->name(); + T * result = static_cast(PyCapsule_GetPointer(m_ptr, name)); + if (!result) pybind11_fail("Unable to extract capsule contents!"); + return result; + } + + const char *name() const { return PyCapsule_GetName(m_ptr); } +}; + +class tuple : public object { +public: + PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple) + explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate tuple object!"); + } + size_t size() const { return (size_t) PyTuple_Size(m_ptr); } + bool empty() const { return size() == 0; } + detail::tuple_accessor operator[](size_t index) const { return {*this, index}; } + detail::item_accessor operator[](handle h) const { return object::operator[](h); } + detail::tuple_iterator begin() const { return {*this, 0}; } + detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; } +}; + +class dict : public object { +public: + PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict) + dict() : object(PyDict_New(), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate dict object!"); + } + template ...>::value>, + // MSVC workaround: it can't compile an out-of-line definition, so defer the collector + typename collector = detail::deferred_t, Args...>> + explicit dict(Args &&...args) : dict(collector(std::forward(args)...).kwargs()) { } + + size_t size() const { return (size_t) PyDict_Size(m_ptr); } + bool empty() const { return size() == 0; } + detail::dict_iterator begin() const { return {*this, 0}; } + detail::dict_iterator end() const { return {}; } + void clear() const { PyDict_Clear(ptr()); } + template bool contains(T &&key) const { + return PyDict_Contains(m_ptr, detail::object_or_cast(std::forward(key)).ptr()) == 1; + } + +private: + /// Call the `dict` Python type -- always returns a new reference + static PyObject *raw_dict(PyObject *op) { + if (PyDict_Check(op)) + return handle(op).inc_ref().ptr(); + return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr); + } +}; + +class sequence : public object { +public: + PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check) + size_t size() const { + ssize_t result = PySequence_Size(m_ptr); + if (result == -1) + throw error_already_set(); + return (size_t) result; + } + bool empty() const { return size() == 0; } + detail::sequence_accessor operator[](size_t index) const { return {*this, index}; } + detail::item_accessor operator[](handle h) const { return object::operator[](h); } + detail::sequence_iterator begin() const { return {*this, 0}; } + detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; } +}; + +class list : public object { +public: + PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List) + explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate list object!"); + } + size_t size() const { return (size_t) PyList_Size(m_ptr); } + bool empty() const { return size() == 0; } + detail::list_accessor operator[](size_t index) const { return {*this, index}; } + detail::item_accessor operator[](handle h) const { return object::operator[](h); } + detail::list_iterator begin() const { return {*this, 0}; } + detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; } + template void append(T &&val) const { + PyList_Append(m_ptr, detail::object_or_cast(std::forward(val)).ptr()); + } + template void insert(size_t index, T &&val) const { + PyList_Insert(m_ptr, static_cast(index), + detail::object_or_cast(std::forward(val)).ptr()); + } +}; + +class args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) }; +class kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check) }; + +class set : public object { +public: + PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New) + set() : object(PySet_New(nullptr), stolen_t{}) { + if (!m_ptr) pybind11_fail("Could not allocate set object!"); + } + size_t size() const { return (size_t) PySet_Size(m_ptr); } + bool empty() const { return size() == 0; } + template bool add(T &&val) const { + return PySet_Add(m_ptr, detail::object_or_cast(std::forward(val)).ptr()) == 0; + } + void clear() const { PySet_Clear(m_ptr); } + template bool contains(T &&val) const { + return PySet_Contains(m_ptr, detail::object_or_cast(std::forward(val)).ptr()) == 1; + } +}; + +class function : public object { +public: + PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check) + handle cpp_function() const { + handle fun = detail::get_function(m_ptr); + if (fun && PyCFunction_Check(fun.ptr())) + return fun; + return handle(); + } + bool is_cpp_function() const { return (bool) cpp_function(); } +}; + +class staticmethod : public object { +public: + PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New) +}; + +class buffer : public object { +public: + PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer) + + buffer_info request(bool writable = false) const { + int flags = PyBUF_STRIDES | PyBUF_FORMAT; + if (writable) flags |= PyBUF_WRITABLE; + Py_buffer *view = new Py_buffer(); + if (PyObject_GetBuffer(m_ptr, view, flags) != 0) { + delete view; + throw error_already_set(); + } + return buffer_info(view); + } +}; + +class memoryview : public object { +public: + PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject) + + /** \rst + Creates ``memoryview`` from ``buffer_info``. + + ``buffer_info`` must be created from ``buffer::request()``. Otherwise + throws an exception. + + For creating a ``memoryview`` from objects that support buffer protocol, + use ``memoryview(const object& obj)`` instead of this constructor. + \endrst */ + explicit memoryview(const buffer_info& info) { + if (!info.view()) + pybind11_fail("Prohibited to create memoryview without Py_buffer"); + // Note: PyMemoryView_FromBuffer never increments obj reference. + m_ptr = (info.view()->obj) ? + PyMemoryView_FromObject(info.view()->obj) : + PyMemoryView_FromBuffer(info.view()); + if (!m_ptr) + pybind11_fail("Unable to create memoryview from buffer descriptor"); + } + + /** \rst + Creates ``memoryview`` from static buffer. + + This method is meant for providing a ``memoryview`` for C/C++ buffer not + managed by Python. The caller is responsible for managing the lifetime + of ``ptr`` and ``format``, which MUST outlive the memoryview constructed + here. + + See also: Python C API documentation for `PyMemoryView_FromBuffer`_. + + .. _PyMemoryView_FromBuffer: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer + + :param ptr: Pointer to the buffer. + :param itemsize: Byte size of an element. + :param format: Pointer to the null-terminated format string. For + homogeneous Buffers, this should be set to + ``format_descriptor::value``. + :param shape: Shape of the tensor (1 entry per dimension). + :param strides: Number of bytes between adjacent entries (for each + per dimension). + :param readonly: Flag to indicate if the underlying storage may be + written to. + \endrst */ + static memoryview from_buffer( + void *ptr, ssize_t itemsize, const char *format, + detail::any_container shape, + detail::any_container strides, bool readonly = false); + + static memoryview from_buffer( + const void *ptr, ssize_t itemsize, const char *format, + detail::any_container shape, + detail::any_container strides) { + return memoryview::from_buffer( + const_cast(ptr), itemsize, format, shape, strides, true); + } + + template + static memoryview from_buffer( + T *ptr, detail::any_container shape, + detail::any_container strides, bool readonly = false) { + return memoryview::from_buffer( + reinterpret_cast(ptr), sizeof(T), + format_descriptor::value, shape, strides, readonly); + } + + template + static memoryview from_buffer( + const T *ptr, detail::any_container shape, + detail::any_container strides) { + return memoryview::from_buffer( + const_cast(ptr), shape, strides, true); + } + +#if PY_MAJOR_VERSION >= 3 + /** \rst + Creates ``memoryview`` from static memory. + + This method is meant for providing a ``memoryview`` for C/C++ buffer not + managed by Python. The caller is responsible for managing the lifetime + of ``mem``, which MUST outlive the memoryview constructed here. + + This method is not available in Python 2. + + See also: Python C API documentation for `PyMemoryView_FromBuffer`_. + + .. _PyMemoryView_FromMemory: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory + \endrst */ + static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) { + PyObject* ptr = PyMemoryView_FromMemory( + reinterpret_cast(mem), size, + (readonly) ? PyBUF_READ : PyBUF_WRITE); + if (!ptr) + pybind11_fail("Could not allocate memoryview object!"); + return memoryview(object(ptr, stolen_t{})); + } + + static memoryview from_memory(const void *mem, ssize_t size) { + return memoryview::from_memory(const_cast(mem), size, true); + } +#endif +}; + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +inline memoryview memoryview::from_buffer( + void *ptr, ssize_t itemsize, const char* format, + detail::any_container shape, + detail::any_container strides, bool readonly) { + size_t ndim = shape->size(); + if (ndim != strides->size()) + pybind11_fail("memoryview: shape length doesn't match strides length"); + ssize_t size = ndim ? 1 : 0; + for (size_t i = 0; i < ndim; ++i) + size *= (*shape)[i]; + Py_buffer view; + view.buf = ptr; + view.obj = nullptr; + view.len = size * itemsize; + view.readonly = static_cast(readonly); + view.itemsize = itemsize; + view.format = const_cast(format); + view.ndim = static_cast(ndim); + view.shape = shape->data(); + view.strides = strides->data(); + view.suboffsets = nullptr; + view.internal = nullptr; + PyObject* obj = PyMemoryView_FromBuffer(&view); + if (!obj) + throw error_already_set(); + return memoryview(object(obj, stolen_t{})); +} +#endif // DOXYGEN_SHOULD_SKIP_THIS +/// @} pytypes + +/// \addtogroup python_builtins +/// @{ +inline size_t len(handle h) { + ssize_t result = PyObject_Length(h.ptr()); + if (result < 0) + pybind11_fail("Unable to compute length of object"); + return (size_t) result; +} + +inline size_t len_hint(handle h) { +#if PY_VERSION_HEX >= 0x03040000 + ssize_t result = PyObject_LengthHint(h.ptr(), 0); +#else + ssize_t result = PyObject_Length(h.ptr()); +#endif + if (result < 0) { + // Sometimes a length can't be determined at all (eg generators) + // In which case simply return 0 + PyErr_Clear(); + return 0; + } + return (size_t) result; +} + +inline str repr(handle h) { + PyObject *str_value = PyObject_Repr(h.ptr()); + if (!str_value) throw error_already_set(); +#if PY_MAJOR_VERSION < 3 + PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr); + Py_XDECREF(str_value); str_value = unicode; + if (!str_value) throw error_already_set(); +#endif + return reinterpret_steal(str_value); +} + +inline iterator iter(handle obj) { + PyObject *result = PyObject_GetIter(obj.ptr()); + if (!result) { throw error_already_set(); } + return reinterpret_steal(result); +} +/// @} python_builtins + +PYBIND11_NAMESPACE_BEGIN(detail) +template iterator object_api::begin() const { return iter(derived()); } +template iterator object_api::end() const { return iterator::sentinel(); } +template item_accessor object_api::operator[](handle key) const { + return {derived(), reinterpret_borrow(key)}; +} +template item_accessor object_api::operator[](const char *key) const { + return {derived(), pybind11::str(key)}; +} +template obj_attr_accessor object_api::attr(handle key) const { + return {derived(), reinterpret_borrow(key)}; +} +template str_attr_accessor object_api::attr(const char *key) const { + return {derived(), key}; +} +template args_proxy object_api::operator*() const { + return args_proxy(derived().ptr()); +} +template template bool object_api::contains(T &&item) const { + return attr("__contains__")(std::forward(item)).template cast(); +} + +template +pybind11::str object_api::str() const { return pybind11::str(derived()); } + +template +str_attr_accessor object_api::doc() const { return attr("__doc__"); } + +template +handle object_api::get_type() const { return (PyObject *) Py_TYPE(derived().ptr()); } + +template +bool object_api::rich_compare(object_api const &other, int value) const { + int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value); + if (rv == -1) + throw error_already_set(); + return rv == 1; +} + +#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \ + template object object_api::op() const { \ + object result = reinterpret_steal(fn(derived().ptr())); \ + if (!result.ptr()) \ + throw error_already_set(); \ + return result; \ + } + +#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \ + template \ + object object_api::op(object_api const &other) const { \ + object result = reinterpret_steal( \ + fn(derived().ptr(), other.derived().ptr())); \ + if (!result.ptr()) \ + throw error_already_set(); \ + return result; \ + } + +PYBIND11_MATH_OPERATOR_UNARY (operator~, PyNumber_Invert) +PYBIND11_MATH_OPERATOR_UNARY (operator-, PyNumber_Negative) +PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add) +PYBIND11_MATH_OPERATOR_BINARY(operator+=, PyNumber_InPlaceAdd) +PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract) +PYBIND11_MATH_OPERATOR_BINARY(operator-=, PyNumber_InPlaceSubtract) +PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply) +PYBIND11_MATH_OPERATOR_BINARY(operator*=, PyNumber_InPlaceMultiply) +PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide) +PYBIND11_MATH_OPERATOR_BINARY(operator/=, PyNumber_InPlaceTrueDivide) +PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or) +PYBIND11_MATH_OPERATOR_BINARY(operator|=, PyNumber_InPlaceOr) +PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And) +PYBIND11_MATH_OPERATOR_BINARY(operator&=, PyNumber_InPlaceAnd) +PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor) +PYBIND11_MATH_OPERATOR_BINARY(operator^=, PyNumber_InPlaceXor) +PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift) +PYBIND11_MATH_OPERATOR_BINARY(operator<<=, PyNumber_InPlaceLshift) +PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift) +PYBIND11_MATH_OPERATOR_BINARY(operator>>=, PyNumber_InPlaceRshift) + +#undef PYBIND11_MATH_OPERATOR_UNARY +#undef PYBIND11_MATH_OPERATOR_BINARY + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/include/pybind11/stl.h b/diffvg/pybind11/include/pybind11/stl.h new file mode 100644 index 0000000000000000000000000000000000000000..6c2bebda87f1c703888307c5b4bac277655b52d6 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/stl.h @@ -0,0 +1,388 @@ +/* + pybind11/stl.h: Transparent conversion for STL data types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "pybind11.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +#endif + +#ifdef __has_include +// std::optional (but including it in c++14 mode isn't allowed) +# if defined(PYBIND11_CPP17) && __has_include() +# include +# define PYBIND11_HAS_OPTIONAL 1 +# endif +// std::experimental::optional (but not allowed in c++11 mode) +# if defined(PYBIND11_CPP14) && (__has_include() && \ + !__has_include()) +# include +# define PYBIND11_HAS_EXP_OPTIONAL 1 +# endif +// std::variant +# if defined(PYBIND11_CPP17) && __has_include() +# include +# define PYBIND11_HAS_VARIANT 1 +# endif +#elif defined(_MSC_VER) && defined(PYBIND11_CPP17) +# include +# include +# define PYBIND11_HAS_OPTIONAL 1 +# define PYBIND11_HAS_VARIANT 1 +#endif + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for +/// forwarding a container element). Typically used indirect via forwarded_type(), below. +template +using forwarded_type = conditional_t< + std::is_lvalue_reference::value, remove_reference_t &, remove_reference_t &&>; + +/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically +/// used for forwarding a container's elements. +template +forwarded_type forward_like(U &&u) { + return std::forward>(std::forward(u)); +} + +template struct set_caster { + using type = Type; + using key_conv = make_caster; + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + value.clear(); + for (auto entry : s) { + key_conv conv; + if (!conv.load(entry, convert)) + return false; + value.insert(cast_op(std::move(conv))); + } + return true; + } + + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + pybind11::set s; + for (auto &&value : src) { + auto value_ = reinterpret_steal(key_conv::cast(forward_like(value), policy, parent)); + if (!value_ || !s.add(value_)) + return handle(); + } + return s.release(); + } + + PYBIND11_TYPE_CASTER(type, _("Set[") + key_conv::name + _("]")); +}; + +template struct map_caster { + using key_conv = make_caster; + using value_conv = make_caster; + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto d = reinterpret_borrow(src); + value.clear(); + for (auto it : d) { + key_conv kconv; + value_conv vconv; + if (!kconv.load(it.first.ptr(), convert) || + !vconv.load(it.second.ptr(), convert)) + return false; + value.emplace(cast_op(std::move(kconv)), cast_op(std::move(vconv))); + } + return true; + } + + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + dict d; + return_value_policy policy_key = policy; + return_value_policy policy_value = policy; + if (!std::is_lvalue_reference::value) { + policy_key = return_value_policy_override::policy(policy_key); + policy_value = return_value_policy_override::policy(policy_value); + } + for (auto &&kv : src) { + auto key = reinterpret_steal(key_conv::cast(forward_like(kv.first), policy_key, parent)); + auto value = reinterpret_steal(value_conv::cast(forward_like(kv.second), policy_value, parent)); + if (!key || !value) + return handle(); + d[key] = value; + } + return d.release(); + } + + PYBIND11_TYPE_CASTER(Type, _("Dict[") + key_conv::name + _(", ") + value_conv::name + _("]")); +}; + +template struct list_caster { + using value_conv = make_caster; + + bool load(handle src, bool convert) { + if (!isinstance(src) || isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + value.clear(); + reserve_maybe(s, &value); + for (auto it : s) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + value.push_back(cast_op(std::move(conv))); + } + return true; + } + +private: + template ().reserve(0)), void>::value, int> = 0> + void reserve_maybe(sequence s, Type *) { value.reserve(s.size()); } + void reserve_maybe(sequence, void *) { } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + list l(src.size()); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal(value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference + } + return l.release(); + } + + PYBIND11_TYPE_CASTER(Type, _("List[") + value_conv::name + _("]")); +}; + +template struct type_caster> + : list_caster, Type> { }; + +template struct type_caster> + : list_caster, Type> { }; + +template struct type_caster> + : list_caster, Type> { }; + +template struct array_caster { + using value_conv = make_caster; + +private: + template + bool require_size(enable_if_t size) { + if (value.size() != size) + value.resize(size); + return true; + } + template + bool require_size(enable_if_t size) { + return size == Size; + } + +public: + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto l = reinterpret_borrow(src); + if (!require_size(l.size())) + return false; + size_t ctr = 0; + for (auto it : l) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + value[ctr++] = cast_op(std::move(conv)); + } + return true; + } + + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + list l(src.size()); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal(value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference + } + return l.release(); + } + + PYBIND11_TYPE_CASTER(ArrayType, _("List[") + value_conv::name + _(_(""), _("[") + _() + _("]")) + _("]")); +}; + +template struct type_caster> + : array_caster, Type, false, Size> { }; + +template struct type_caster> + : array_caster, Type, true> { }; + +template struct type_caster> + : set_caster, Key> { }; + +template struct type_caster> + : set_caster, Key> { }; + +template struct type_caster> + : map_caster, Key, Value> { }; + +template struct type_caster> + : map_caster, Key, Value> { }; + +// This type caster is intended to be used for std::optional and std::experimental::optional +template struct optional_caster { + using value_conv = make_caster; + + template + static handle cast(T_ &&src, return_value_policy policy, handle parent) { + if (!src) + return none().inc_ref(); + if (!std::is_lvalue_reference::value) { + policy = return_value_policy_override::policy(policy); + } + return value_conv::cast(*std::forward(src), policy, parent); + } + + bool load(handle src, bool convert) { + if (!src) { + return false; + } else if (src.is_none()) { + return true; // default-constructed value is already empty + } + value_conv inner_caster; + if (!inner_caster.load(src, convert)) + return false; + + value.emplace(cast_op(std::move(inner_caster))); + return true; + } + + PYBIND11_TYPE_CASTER(T, _("Optional[") + value_conv::name + _("]")); +}; + +#if PYBIND11_HAS_OPTIONAL +template struct type_caster> + : public optional_caster> {}; + +template<> struct type_caster + : public void_caster {}; +#endif + +#if PYBIND11_HAS_EXP_OPTIONAL +template struct type_caster> + : public optional_caster> {}; + +template<> struct type_caster + : public void_caster {}; +#endif + +/// Visit a variant and cast any found type to Python +struct variant_caster_visitor { + return_value_policy policy; + handle parent; + + using result_type = handle; // required by boost::variant in C++11 + + template + result_type operator()(T &&src) const { + return make_caster::cast(std::forward(src), policy, parent); + } +}; + +/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar +/// `namespace::variant` types which provide a `namespace::visit()` function are handled here +/// automatically using argument-dependent lookup. Users can provide specializations for other +/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`. +template class Variant> +struct visit_helper { + template + static auto call(Args &&...args) -> decltype(visit(std::forward(args)...)) { + return visit(std::forward(args)...); + } +}; + +/// Generic variant caster +template struct variant_caster; + +template class V, typename... Ts> +struct variant_caster> { + static_assert(sizeof...(Ts) > 0, "Variant must consist of at least one alternative."); + + template + bool load_alternative(handle src, bool convert, type_list) { + auto caster = make_caster(); + if (caster.load(src, convert)) { + value = cast_op(caster); + return true; + } + return load_alternative(src, convert, type_list{}); + } + + bool load_alternative(handle, bool, type_list<>) { return false; } + + bool load(handle src, bool convert) { + // Do a first pass without conversions to improve constructor resolution. + // E.g. `py::int_(1).cast>()` needs to fill the `int` + // slot of the variant. Without two-pass loading `double` would be filled + // because it appears first and a conversion is possible. + if (convert && load_alternative(src, false, type_list{})) + return true; + return load_alternative(src, convert, type_list{}); + } + + template + static handle cast(Variant &&src, return_value_policy policy, handle parent) { + return visit_helper::call(variant_caster_visitor{policy, parent}, + std::forward(src)); + } + + using Type = V; + PYBIND11_TYPE_CASTER(Type, _("Union[") + detail::concat(make_caster::name...) + _("]")); +}; + +#if PYBIND11_HAS_VARIANT +template +struct type_caster> : variant_caster> { }; +#endif + +PYBIND11_NAMESPACE_END(detail) + +inline std::ostream &operator<<(std::ostream &os, const handle &obj) { + os << (std::string) str(obj); + return os; +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/diffvg/pybind11/include/pybind11/stl_bind.h b/diffvg/pybind11/include/pybind11/stl_bind.h new file mode 100644 index 0000000000000000000000000000000000000000..47368f0280154db9ab5c64ac88a1a1fa655752e6 --- /dev/null +++ b/diffvg/pybind11/include/pybind11/stl_bind.h @@ -0,0 +1,661 @@ +/* + pybind11/std_bind.h: Binding generators for STL data types + + Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "operators.h" + +#include +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) +PYBIND11_NAMESPACE_BEGIN(detail) + +/* SFINAE helper class used by 'is_comparable */ +template struct container_traits { + template static std::true_type test_comparable(decltype(std::declval() == std::declval())*); + template static std::false_type test_comparable(...); + template static std::true_type test_value(typename T2::value_type *); + template static std::false_type test_value(...); + template static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *); + template static std::false_type test_pair(...); + + static constexpr const bool is_comparable = std::is_same(nullptr))>::value; + static constexpr const bool is_pair = std::is_same(nullptr, nullptr))>::value; + static constexpr const bool is_vector = std::is_same(nullptr))>::value; + static constexpr const bool is_element = !is_pair && !is_vector; +}; + +/* Default: is_comparable -> std::false_type */ +template +struct is_comparable : std::false_type { }; + +/* For non-map data structures, check whether operator== can be instantiated */ +template +struct is_comparable< + T, enable_if_t::is_element && + container_traits::is_comparable>> + : std::true_type { }; + +/* For a vector/map data structure, recursively check the value type (which is std::pair for maps) */ +template +struct is_comparable::is_vector>> { + static constexpr const bool value = + is_comparable::value; +}; + +/* For pairs, recursively check the two data types */ +template +struct is_comparable::is_pair>> { + static constexpr const bool value = + is_comparable::value && + is_comparable::value; +}; + +/* Fallback functions */ +template void vector_if_copy_constructible(const Args &...) { } +template void vector_if_equal_operator(const Args &...) { } +template void vector_if_insertion_operator(const Args &...) { } +template void vector_modifiers(const Args &...) { } + +template +void vector_if_copy_constructible(enable_if_t::value, Class_> &cl) { + cl.def(init(), "Copy constructor"); +} + +template +void vector_if_equal_operator(enable_if_t::value, Class_> &cl) { + using T = typename Vector::value_type; + + cl.def(self == self); + cl.def(self != self); + + cl.def("count", + [](const Vector &v, const T &x) { + return std::count(v.begin(), v.end(), x); + }, + arg("x"), + "Return the number of times ``x`` appears in the list" + ); + + cl.def("remove", [](Vector &v, const T &x) { + auto p = std::find(v.begin(), v.end(), x); + if (p != v.end()) + v.erase(p); + else + throw value_error(); + }, + arg("x"), + "Remove the first item from the list whose value is x. " + "It is an error if there is no such item." + ); + + cl.def("__contains__", + [](const Vector &v, const T &x) { + return std::find(v.begin(), v.end(), x) != v.end(); + }, + arg("x"), + "Return true the container contains ``x``" + ); +} + +// Vector modifiers -- requires a copyable vector_type: +// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it seems +// silly to allow deletion but not insertion, so include them here too.) +template +void vector_modifiers(enable_if_t::value, Class_> &cl) { + using T = typename Vector::value_type; + using SizeType = typename Vector::size_type; + using DiffType = typename Vector::difference_type; + + auto wrap_i = [](DiffType i, SizeType n) { + if (i < 0) + i += n; + if (i < 0 || (SizeType)i >= n) + throw index_error(); + return i; + }; + + cl.def("append", + [](Vector &v, const T &value) { v.push_back(value); }, + arg("x"), + "Add an item to the end of the list"); + + cl.def(init([](iterable it) { + auto v = std::unique_ptr(new Vector()); + v->reserve(len_hint(it)); + for (handle h : it) + v->push_back(h.cast()); + return v.release(); + })); + + cl.def("clear", + [](Vector &v) { + v.clear(); + }, + "Clear the contents" + ); + + cl.def("extend", + [](Vector &v, const Vector &src) { + v.insert(v.end(), src.begin(), src.end()); + }, + arg("L"), + "Extend the list by appending all the items in the given list" + ); + + cl.def("extend", + [](Vector &v, iterable it) { + const size_t old_size = v.size(); + v.reserve(old_size + len_hint(it)); + try { + for (handle h : it) { + v.push_back(h.cast()); + } + } catch (const cast_error &) { + v.erase(v.begin() + static_cast(old_size), v.end()); + try { + v.shrink_to_fit(); + } catch (const std::exception &) { + // Do nothing + } + throw; + } + }, + arg("L"), + "Extend the list by appending all the items in the given list" + ); + + cl.def("insert", + [](Vector &v, DiffType i, const T &x) { + // Can't use wrap_i; i == v.size() is OK + if (i < 0) + i += v.size(); + if (i < 0 || (SizeType)i > v.size()) + throw index_error(); + v.insert(v.begin() + i, x); + }, + arg("i") , arg("x"), + "Insert an item at a given position." + ); + + cl.def("pop", + [](Vector &v) { + if (v.empty()) + throw index_error(); + T t = v.back(); + v.pop_back(); + return t; + }, + "Remove and return the last item" + ); + + cl.def("pop", + [wrap_i](Vector &v, DiffType i) { + i = wrap_i(i, v.size()); + T t = v[(SizeType) i]; + v.erase(v.begin() + i); + return t; + }, + arg("i"), + "Remove and return the item at index ``i``" + ); + + cl.def("__setitem__", + [wrap_i](Vector &v, DiffType i, const T &t) { + i = wrap_i(i, v.size()); + v[(SizeType)i] = t; + } + ); + + /// Slicing protocol + cl.def("__getitem__", + [](const Vector &v, slice slice) -> Vector * { + size_t start, stop, step, slicelength; + + if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) + throw error_already_set(); + + Vector *seq = new Vector(); + seq->reserve((size_t) slicelength); + + for (size_t i=0; ipush_back(v[start]); + start += step; + } + return seq; + }, + arg("s"), + "Retrieve list elements using a slice object" + ); + + cl.def("__setitem__", + [](Vector &v, slice slice, const Vector &value) { + size_t start, stop, step, slicelength; + if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) + throw error_already_set(); + + if (slicelength != value.size()) + throw std::runtime_error("Left and right hand size of slice assignment have different sizes!"); + + for (size_t i=0; i), +// we have to access by copying; otherwise we return by reference. +template using vector_needs_copy = negation< + std::is_same()[typename Vector::size_type()]), typename Vector::value_type &>>; + +// The usual case: access and iterate by reference +template +void vector_accessor(enable_if_t::value, Class_> &cl) { + using T = typename Vector::value_type; + using SizeType = typename Vector::size_type; + using DiffType = typename Vector::difference_type; + using ItType = typename Vector::iterator; + + auto wrap_i = [](DiffType i, SizeType n) { + if (i < 0) + i += n; + if (i < 0 || (SizeType)i >= n) + throw index_error(); + return i; + }; + + cl.def("__getitem__", + [wrap_i](Vector &v, DiffType i) -> T & { + i = wrap_i(i, v.size()); + return v[(SizeType)i]; + }, + return_value_policy::reference_internal // ref + keepalive + ); + + cl.def("__iter__", + [](Vector &v) { + return make_iterator< + return_value_policy::reference_internal, ItType, ItType, T&>( + v.begin(), v.end()); + }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); +} + +// The case for special objects, like std::vector, that have to be returned-by-copy: +template +void vector_accessor(enable_if_t::value, Class_> &cl) { + using T = typename Vector::value_type; + using SizeType = typename Vector::size_type; + using DiffType = typename Vector::difference_type; + using ItType = typename Vector::iterator; + cl.def("__getitem__", + [](const Vector &v, DiffType i) -> T { + if (i < 0 && (i += v.size()) < 0) + throw index_error(); + if ((SizeType)i >= v.size()) + throw index_error(); + return v[(SizeType)i]; + } + ); + + cl.def("__iter__", + [](Vector &v) { + return make_iterator< + return_value_policy::copy, ItType, ItType, T>( + v.begin(), v.end()); + }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); +} + +template auto vector_if_insertion_operator(Class_ &cl, std::string const &name) + -> decltype(std::declval() << std::declval(), void()) { + using size_type = typename Vector::size_type; + + cl.def("__repr__", + [name](Vector &v) { + std::ostringstream s; + s << name << '['; + for (size_type i=0; i < v.size(); ++i) { + s << v[i]; + if (i != v.size() - 1) + s << ", "; + } + s << ']'; + return s.str(); + }, + "Return the canonical string representation of this list." + ); +} + +// Provide the buffer interface for vectors if we have data() and we have a format for it +// GCC seems to have "void std::vector::data()" - doing SFINAE on the existence of data() is insufficient, we need to check it returns an appropriate pointer +template +struct vector_has_data_and_format : std::false_type {}; +template +struct vector_has_data_and_format::format(), std::declval().data()), typename Vector::value_type*>::value>> : std::true_type {}; + +// Add the buffer interface to a vector +template +enable_if_t...>::value> +vector_buffer(Class_& cl) { + using T = typename Vector::value_type; + + static_assert(vector_has_data_and_format::value, "There is not an appropriate format descriptor for this vector"); + + // numpy.h declares this for arbitrary types, but it may raise an exception and crash hard at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here + format_descriptor::format(); + + cl.def_buffer([](Vector& v) -> buffer_info { + return buffer_info(v.data(), static_cast(sizeof(T)), format_descriptor::format(), 1, {v.size()}, {sizeof(T)}); + }); + + cl.def(init([](buffer buf) { + auto info = buf.request(); + if (info.ndim != 1 || info.strides[0] % static_cast(sizeof(T))) + throw type_error("Only valid 1D buffers can be copied to a vector"); + if (!detail::compare_buffer_info::compare(info) || (ssize_t) sizeof(T) != info.itemsize) + throw type_error("Format mismatch (Python: " + info.format + " C++: " + format_descriptor::format() + ")"); + + T *p = static_cast(info.ptr); + ssize_t step = info.strides[0] / static_cast(sizeof(T)); + T *end = p + info.shape[0] * step; + if (step == 1) { + return Vector(p, end); + } + else { + Vector vec; + vec.reserve((size_t) info.shape[0]); + for (; p != end; p += step) + vec.push_back(*p); + return vec; + } + })); + + return; +} + +template +enable_if_t...>::value> vector_buffer(Class_&) {} + +PYBIND11_NAMESPACE_END(detail) + +// +// std::vector +// +template , typename... Args> +class_ bind_vector(handle scope, std::string const &name, Args&&... args) { + using Class_ = class_; + + // If the value_type is unregistered (e.g. a converting type) or is itself registered + // module-local then make the vector binding module-local as well: + using vtype = typename Vector::value_type; + auto vtype_info = detail::get_type_info(typeid(vtype)); + bool local = !vtype_info || vtype_info->module_local; + + Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward(args)...); + + // Declare the buffer interface if a buffer_protocol() is passed in + detail::vector_buffer(cl); + + cl.def(init<>()); + + // Register copy constructor (if possible) + detail::vector_if_copy_constructible(cl); + + // Register comparison-related operators and functions (if possible) + detail::vector_if_equal_operator(cl); + + // Register stream insertion operator (if possible) + detail::vector_if_insertion_operator(cl, name); + + // Modifiers require copyable vector value type + detail::vector_modifiers(cl); + + // Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive + detail::vector_accessor(cl); + + cl.def("__bool__", + [](const Vector &v) -> bool { + return !v.empty(); + }, + "Check whether the list is nonempty" + ); + + cl.def("__len__", &Vector::size); + + + + +#if 0 + // C++ style functions deprecated, leaving it here as an example + cl.def(init()); + + cl.def("resize", + (void (Vector::*) (size_type count)) & Vector::resize, + "changes the number of elements stored"); + + cl.def("erase", + [](Vector &v, SizeType i) { + if (i >= v.size()) + throw index_error(); + v.erase(v.begin() + i); + }, "erases element at index ``i``"); + + cl.def("empty", &Vector::empty, "checks whether the container is empty"); + cl.def("size", &Vector::size, "returns the number of elements"); + cl.def("push_back", (void (Vector::*)(const T&)) &Vector::push_back, "adds an element to the end"); + cl.def("pop_back", &Vector::pop_back, "removes the last element"); + + cl.def("max_size", &Vector::max_size, "returns the maximum possible number of elements"); + cl.def("reserve", &Vector::reserve, "reserves storage"); + cl.def("capacity", &Vector::capacity, "returns the number of elements that can be held in currently allocated storage"); + cl.def("shrink_to_fit", &Vector::shrink_to_fit, "reduces memory usage by freeing unused memory"); + + cl.def("clear", &Vector::clear, "clears the contents"); + cl.def("swap", &Vector::swap, "swaps the contents"); + + cl.def("front", [](Vector &v) { + if (v.size()) return v.front(); + else throw index_error(); + }, "access the first element"); + + cl.def("back", [](Vector &v) { + if (v.size()) return v.back(); + else throw index_error(); + }, "access the last element "); + +#endif + + return cl; +} + + + +// +// std::map, std::unordered_map +// + +PYBIND11_NAMESPACE_BEGIN(detail) + +/* Fallback functions */ +template void map_if_insertion_operator(const Args &...) { } +template void map_assignment(const Args &...) { } + +// Map assignment when copy-assignable: just copy the value +template +void map_assignment(enable_if_t::value, Class_> &cl) { + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + + cl.def("__setitem__", + [](Map &m, const KeyType &k, const MappedType &v) { + auto it = m.find(k); + if (it != m.end()) it->second = v; + else m.emplace(k, v); + } + ); +} + +// Not copy-assignable, but still copy-constructible: we can update the value by erasing and reinserting +template +void map_assignment(enable_if_t< + !is_copy_assignable::value && + is_copy_constructible::value, + Class_> &cl) { + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + + cl.def("__setitem__", + [](Map &m, const KeyType &k, const MappedType &v) { + // We can't use m[k] = v; because value type might not be default constructable + auto r = m.emplace(k, v); + if (!r.second) { + // value type is not copy assignable so the only way to insert it is to erase it first... + m.erase(r.first); + m.emplace(k, v); + } + } + ); +} + + +template auto map_if_insertion_operator(Class_ &cl, std::string const &name) +-> decltype(std::declval() << std::declval() << std::declval(), void()) { + + cl.def("__repr__", + [name](Map &m) { + std::ostringstream s; + s << name << '{'; + bool f = false; + for (auto const &kv : m) { + if (f) + s << ", "; + s << kv.first << ": " << kv.second; + f = true; + } + s << '}'; + return s.str(); + }, + "Return the canonical string representation of this map." + ); +} + + +PYBIND11_NAMESPACE_END(detail) + +template , typename... Args> +class_ bind_map(handle scope, const std::string &name, Args&&... args) { + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + using Class_ = class_; + + // If either type is a non-module-local bound type then make the map binding non-local as well; + // otherwise (e.g. both types are either module-local or converting) the map will be + // module-local. + auto tinfo = detail::get_type_info(typeid(MappedType)); + bool local = !tinfo || tinfo->module_local; + if (local) { + tinfo = detail::get_type_info(typeid(KeyType)); + local = !tinfo || tinfo->module_local; + } + + Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward(args)...); + + cl.def(init<>()); + + // Register stream insertion operator (if possible) + detail::map_if_insertion_operator(cl, name); + + cl.def("__bool__", + [](const Map &m) -> bool { return !m.empty(); }, + "Check whether the map is nonempty" + ); + + cl.def("__iter__", + [](Map &m) { return make_key_iterator(m.begin(), m.end()); }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); + + cl.def("items", + [](Map &m) { return make_iterator(m.begin(), m.end()); }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); + + cl.def("__getitem__", + [](Map &m, const KeyType &k) -> MappedType & { + auto it = m.find(k); + if (it == m.end()) + throw key_error(); + return it->second; + }, + return_value_policy::reference_internal // ref + keepalive + ); + + cl.def("__contains__", + [](Map &m, const KeyType &k) -> bool { + auto it = m.find(k); + if (it == m.end()) + return false; + return true; + } + ); + + // Assignment provided only if the type is copyable + detail::map_assignment(cl); + + cl.def("__delitem__", + [](Map &m, const KeyType &k) { + auto it = m.find(k); + if (it == m.end()) + throw key_error(); + m.erase(it); + } + ); + + cl.def("__len__", &Map::size); + + return cl; +} + +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/diffvg/pybind11/pybind11/__init__.py b/diffvg/pybind11/pybind11/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b2f83d5cd93c073ad130cc113bab25a1d03255b --- /dev/null +++ b/diffvg/pybind11/pybind11/__init__.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from ._version import version_info, __version__ # noqa: F401 imported but unused + + +def get_include(user=False): + import os + d = os.path.dirname(__file__) + if os.path.exists(os.path.join(d, "include")): + # Package is installed + return os.path.join(d, "include") + else: + # Package is from a source directory + return os.path.join(os.path.dirname(d), "include") diff --git a/diffvg/pybind11/pybind11/__main__.py b/diffvg/pybind11/pybind11/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5e393cc8f103dc42531d2967d5c05a1edcf2cfa1 --- /dev/null +++ b/diffvg/pybind11/pybind11/__main__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function + +import argparse +import sys +import sysconfig + +from . import get_include + + +def print_includes(): + dirs = [sysconfig.get_path('include'), + sysconfig.get_path('platinclude'), + get_include()] + + # Make unique but preserve order + unique_dirs = [] + for d in dirs: + if d not in unique_dirs: + unique_dirs.append(d) + + print(' '.join('-I' + d for d in unique_dirs)) + + +def main(): + parser = argparse.ArgumentParser(prog='python -m pybind11') + parser.add_argument('--includes', action='store_true', + help='Include flags for both pybind11 and Python headers.') + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.includes: + print_includes() + + +if __name__ == '__main__': + main() diff --git a/diffvg/pybind11/pybind11/_version.py b/diffvg/pybind11/pybind11/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2f254ce5e262fa7fb4770e1b770935ea46ecc0 --- /dev/null +++ b/diffvg/pybind11/pybind11/_version.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +version_info = (2, 5, 'dev1') +__version__ = '.'.join(map(str, version_info)) diff --git a/diffvg/pybind11/setup.cfg b/diffvg/pybind11/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..002f38d10e46472657ff8228139e0f92b0d5bc10 --- /dev/null +++ b/diffvg/pybind11/setup.cfg @@ -0,0 +1,12 @@ +[bdist_wheel] +universal=1 + +[flake8] +max-line-length = 99 +show_source = True +exclude = .git, __pycache__, build, dist, docs, tools, venv +ignore = + # required for pretty matrix formatting: multiple spaces after `,` and `[` + E201, E241, W504, + # camelcase 'cPickle' imported as lowercase 'pickle' + N813 diff --git a/diffvg/pybind11/setup.py b/diffvg/pybind11/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..577a6b6c37c9d284b0d5b7453de62aaa71c50869 --- /dev/null +++ b/diffvg/pybind11/setup.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Setup script for PyPI; use CMakeFile.txt to build extension modules + +from setuptools import setup +from distutils.command.install_headers import install_headers +from distutils.command.build_py import build_py +from pybind11 import __version__ +import os + +package_data = [ + 'include/pybind11/detail/class.h', + 'include/pybind11/detail/common.h', + 'include/pybind11/detail/descr.h', + 'include/pybind11/detail/init.h', + 'include/pybind11/detail/internals.h', + 'include/pybind11/detail/typeid.h', + 'include/pybind11/attr.h', + 'include/pybind11/buffer_info.h', + 'include/pybind11/cast.h', + 'include/pybind11/chrono.h', + 'include/pybind11/common.h', + 'include/pybind11/complex.h', + 'include/pybind11/eigen.h', + 'include/pybind11/embed.h', + 'include/pybind11/eval.h', + 'include/pybind11/functional.h', + 'include/pybind11/iostream.h', + 'include/pybind11/numpy.h', + 'include/pybind11/operators.h', + 'include/pybind11/options.h', + 'include/pybind11/pybind11.h', + 'include/pybind11/pytypes.h', + 'include/pybind11/stl.h', + 'include/pybind11/stl_bind.h', +] + +# Prevent installation of pybind11 headers by setting +# PYBIND11_USE_CMAKE. +if os.environ.get('PYBIND11_USE_CMAKE'): + headers = [] +else: + headers = package_data + + +class InstallHeaders(install_headers): + """Use custom header installer because the default one flattens subdirectories""" + def run(self): + if not self.distribution.headers: + return + + for header in self.distribution.headers: + subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11')) + install_dir = os.path.join(self.install_dir, subdir) + self.mkpath(install_dir) + + (out, _) = self.copy_file(header, install_dir) + self.outfiles.append(out) + + +# Install the headers inside the package as well +class BuildPy(build_py): + def build_package_data(self): + build_py.build_package_data(self) + for header in package_data: + target = os.path.join(self.build_lib, 'pybind11', header) + self.mkpath(os.path.dirname(target)) + self.copy_file(header, target, preserve_mode=False) + + def get_outputs(self, include_bytecode=1): + outputs = build_py.get_outputs(self, include_bytecode=include_bytecode) + for header in package_data: + target = os.path.join(self.build_lib, 'pybind11', header) + outputs.append(target) + return outputs + + +setup( + name='pybind11', + version=__version__, + description='Seamless operability between C++11 and Python', + author='Wenzel Jakob', + author_email='wenzel.jakob@epfl.ch', + url='https://github.com/pybind/pybind11', + download_url='https://github.com/pybind/pybind11/tarball/v' + __version__, + packages=['pybind11'], + license='BSD', + headers=headers, + zip_safe=False, + cmdclass=dict(install_headers=InstallHeaders, build_py=BuildPy), + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Utilities', + 'Programming Language :: C++', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'License :: OSI Approved :: BSD License' + ], + keywords='C++11, Python bindings', + long_description="""pybind11 is a lightweight header-only library that +exposes C++ types in Python and vice versa, mainly to create Python bindings of +existing C++ code. Its goals and syntax are similar to the excellent +Boost.Python by David Abrahams: to minimize boilerplate code in traditional +extension modules by inferring type information using compile-time +introspection. + +The main issue with Boost.Python-and the reason for creating such a similar +project-is Boost. Boost is an enormously large and complex suite of utility +libraries that works with almost every C++ compiler in existence. This +compatibility has its cost: arcane template tricks and workarounds are +necessary to support the oldest and buggiest of compiler specimens. Now that +C++11-compatible compilers are widely available, this heavy machinery has +become an excessively large and unnecessary dependency. + +Think of this library as a tiny self-contained version of Boost.Python with +everything stripped away that isn't relevant for binding generation. Without +comments, the core header files only require ~4K lines of code and depend on +Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This +compact implementation was possible thanks to some of the new C++11 language +features (specifically: tuples, lambda functions and variadic templates). Since +its creation, this library has grown beyond Boost.Python in many ways, leading +to dramatically simpler binding code in many common situations.""") diff --git a/diffvg/pybind11/tests/CMakeLists.txt b/diffvg/pybind11/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..72de21018a85ff3d5e443628252255334073b914 --- /dev/null +++ b/diffvg/pybind11/tests/CMakeLists.txt @@ -0,0 +1,361 @@ +# CMakeLists.txt -- Build system for the pybind11 test suite +# +# Copyright (c) 2015 Wenzel Jakob +# +# All rights reserved. Use of this source code is governed by a +# BSD-style license that can be found in the LICENSE file. + +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +# New Python support +if(DEFINED Python_EXECUTABLE) + set(PYTHON_EXECUTABLE "${Python_EXECUTABLE}") + set(PYTHON_VERSION "${Python_VERSION}") +endif() + +# There's no harm in including a project in a project +project(pybind11_tests CXX) + +# Access FindCatch and more +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/../tools") + +option(PYBIND11_WERROR "Report all warnings as errors" OFF) +option(DOWNLOAD_EIGEN "Download EIGEN (requires CMake 3.11+)" OFF) +set(PYBIND11_TEST_OVERRIDE + "" + CACHE STRING "Tests from ;-separated list of *.cpp files will be built instead of all tests") + +if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) + # We're being loaded directly, i.e. not via add_subdirectory, so make this + # work as its own project and load the pybind11Config to get the tools we need + find_package(pybind11 REQUIRED CONFIG) +endif() + +if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) + message(STATUS "Setting tests build type to MinSizeRel as none was specified") + set(CMAKE_BUILD_TYPE + MinSizeRel + CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" + "RelWithDebInfo") +endif() + +# Full set of test files (you can override these; see below) +set(PYBIND11_TEST_FILES + test_async.cpp + test_buffers.cpp + test_builtin_casters.cpp + test_call_policies.cpp + test_callbacks.cpp + test_chrono.cpp + test_class.cpp + test_constants_and_functions.cpp + test_copy_move.cpp + test_custom_type_casters.cpp + test_docstring_options.cpp + test_eigen.cpp + test_enum.cpp + test_eval.cpp + test_exceptions.cpp + test_factory_constructors.cpp + test_gil_scoped.cpp + test_iostream.cpp + test_kwargs_and_defaults.cpp + test_local_bindings.cpp + test_methods_and_attributes.cpp + test_modules.cpp + test_multiple_inheritance.cpp + test_numpy_array.cpp + test_numpy_dtypes.cpp + test_numpy_vectorize.cpp + test_opaque_types.cpp + test_operator_overloading.cpp + test_pickling.cpp + test_pytypes.cpp + test_sequences_and_iterators.cpp + test_smart_ptr.cpp + test_stl.cpp + test_stl_binders.cpp + test_tagbased_polymorphic.cpp + test_union.cpp + test_virtual_functions.cpp) + +# Invoking cmake with something like: +# cmake -DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp" .. +# lets you override the tests that get compiled and run. You can restore to all tests with: +# cmake -DPYBIND11_TEST_OVERRIDE= .. +if(PYBIND11_TEST_OVERRIDE) + set(PYBIND11_TEST_FILES ${PYBIND11_TEST_OVERRIDE}) +endif() + +# Skip test_async for Python < 3.5 +list(FIND PYBIND11_TEST_FILES test_async.cpp PYBIND11_TEST_FILES_ASYNC_I) +if((PYBIND11_TEST_FILES_ASYNC_I GREATER -1) AND (PYTHON_VERSION VERSION_LESS 3.5)) + message(STATUS "Skipping test_async because Python version ${PYTHON_VERSION} < 3.5") + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_ASYNC_I}) +endif() + +string(REPLACE ".cpp" ".py" PYBIND11_PYTEST_FILES "${PYBIND11_TEST_FILES}") + +# Contains the set of test files that require pybind11_cross_module_tests to be +# built; if none of these are built (i.e. because TEST_OVERRIDE is used and +# doesn't include them) the second module doesn't get built. +set(PYBIND11_CROSS_MODULE_TESTS test_exceptions.py test_local_bindings.py test_stl.py + test_stl_binders.py) + +set(PYBIND11_CROSS_MODULE_GIL_TESTS test_gil_scoped.py) + +# Check if Eigen is available; if not, remove from PYBIND11_TEST_FILES (but +# keep it in PYBIND11_PYTEST_FILES, so that we get the "eigen is not installed" +# skip message). +list(FIND PYBIND11_TEST_FILES test_eigen.cpp PYBIND11_TEST_FILES_EIGEN_I) +if(PYBIND11_TEST_FILES_EIGEN_I GREATER -1) + # Try loading via newer Eigen's Eigen3Config first (bypassing tools/FindEigen3.cmake). + # Eigen 3.3.1+ exports a cmake 3.0+ target for handling dependency requirements, but also + # produces a fatal error if loaded from a pre-3.0 cmake. + if(DOWNLOAD_EIGEN) + if(CMAKE_VERSION VERSION_LESS 3.11) + message(FATAL_ERROR "CMake 3.11+ required when using DOWNLOAD_EIGEN") + endif() + + set(EIGEN3_VERSION_STRING "3.3.7") + + include(FetchContent) + FetchContent_Declare( + eigen + GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git + GIT_TAG ${EIGEN3_VERSION_STRING}) + + FetchContent_GetProperties(eigen) + if(NOT eigen_POPULATED) + message(STATUS "Downloading Eigen") + FetchContent_Populate(eigen) + endif() + + set(EIGEN3_INCLUDE_DIR ${eigen_SOURCE_DIR}) + set(EIGEN3_FOUND TRUE) + + else() + find_package(Eigen3 3.2.7 QUIET CONFIG) + + if(NOT EIGEN3_FOUND) + # Couldn't load via target, so fall back to allowing module mode finding, which will pick up + # tools/FindEigen3.cmake + find_package(Eigen3 3.2.7 QUIET) + endif() + endif() + + if(EIGEN3_FOUND) + if(NOT TARGET Eigen3::Eigen) + add_library(Eigen3::Eigen IMPORTED INTERFACE) + set_property(TARGET Eigen3::Eigen PROPERTY INTERFACE_INCLUDE_DIRECTORIES + "${EIGEN3_INCLUDE_DIR}") + endif() + + # Eigen 3.3.1+ cmake sets EIGEN3_VERSION_STRING (and hard codes the version when installed + # rather than looking it up in the cmake script); older versions, and the + # tools/FindEigen3.cmake, set EIGEN3_VERSION instead. + if(NOT EIGEN3_VERSION AND EIGEN3_VERSION_STRING) + set(EIGEN3_VERSION ${EIGEN3_VERSION_STRING}) + endif() + message(STATUS "Building tests with Eigen v${EIGEN3_VERSION}") + else() + list(REMOVE_AT PYBIND11_TEST_FILES ${PYBIND11_TEST_FILES_EIGEN_I}) + message(STATUS "Building tests WITHOUT Eigen, use -DDOWNLOAD_EIGEN on CMake 3.11+ to download") + endif() +endif() + +# Optional dependency for some tests (boost::variant is only supported with version >= 1.56) +find_package(Boost 1.56) + +if(Boost_FOUND) + if(NOT TARGET Boost::headers) + if(TARGET Boost::boost) + # Classic FindBoost + add_library(Boost::headers ALIAS Boost::boost) + else() + # Very old FindBoost, or newer Boost than CMake in older CMakes + add_library(Boost::headers IMPORTED INTERFACE) + set_property(TARGET Boost::headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${Boost_INCLUDE_DIRS}) + endif() + endif() +endif() + +# Compile with compiler warnings turned on +function(pybind11_enable_warnings target_name) + if(MSVC) + target_compile_options(${target_name} PRIVATE /W4) + elseif(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Intel|Clang)") + target_compile_options(${target_name} PRIVATE -Wall -Wextra -Wconversion -Wcast-qual + -Wdeprecated) + endif() + + if(PYBIND11_WERROR) + if(MSVC) + target_compile_options(${target_name} PRIVATE /WX) + elseif(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Intel|Clang)") + target_compile_options(${target_name} PRIVATE -Werror) + endif() + endif() + + # Needs to be readded since the ordering requires these to be after the ones above + if(CMAKE_CXX_STANDARD + AND CMAKE_CXX_COMPILER_ID MATCHES "Clang" + AND PYTHON_VERSION VERSION_LESS 3.0) + if(CMAKE_CXX_STANDARD LESS 17) + target_compile_options(${target_name} PUBLIC -Wno-deprecated-register) + else() + target_compile_options(${target_name} PUBLIC -Wno-register) + endif() + endif() +endfunction() + +set(test_targets pybind11_tests) + +# Build pybind11_cross_module_tests if any test_whatever.py are being built that require it +foreach(t ${PYBIND11_CROSS_MODULE_TESTS}) + list(FIND PYBIND11_PYTEST_FILES ${t} i) + if(i GREATER -1) + list(APPEND test_targets pybind11_cross_module_tests) + break() + endif() +endforeach() + +foreach(t ${PYBIND11_CROSS_MODULE_GIL_TESTS}) + list(FIND PYBIND11_PYTEST_FILES ${t} i) + if(i GREATER -1) + list(APPEND test_targets cross_module_gil_utils) + break() + endif() +endforeach() + +foreach(target ${test_targets}) + set(test_files ${PYBIND11_TEST_FILES}) + if(NOT "${target}" STREQUAL "pybind11_tests") + set(test_files "") + endif() + + # Create the binding library + pybind11_add_module(${target} THIN_LTO ${target}.cpp ${test_files} ${PYBIND11_HEADERS}) + pybind11_enable_warnings(${target}) + + if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) + get_property( + suffix + TARGET ${target} + PROPERTY SUFFIX) + set(source_output "${CMAKE_CURRENT_SOURCE_DIR}/${target}${suffix}") + if(suffix AND EXISTS "${source_output}") + message(WARNING "Output file also in source directory; " + "please remove to avoid confusion: ${source_output}") + endif() + endif() + + if(MSVC) + target_compile_options(${target} PRIVATE /utf-8) + endif() + + if(EIGEN3_FOUND) + target_link_libraries(${target} PRIVATE Eigen3::Eigen) + target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_EIGEN) + endif() + + if(Boost_FOUND) + target_link_libraries(${target} PRIVATE Boost::headers) + target_compile_definitions(${target} PRIVATE -DPYBIND11_TEST_BOOST) + endif() + + # Always write the output file directly into the 'tests' directory (even on MSVC) + if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY) + set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY + "${CMAKE_CURRENT_BINARY_DIR}") + foreach(config ${CMAKE_CONFIGURATION_TYPES}) + string(TOUPPER ${config} config) + set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config} + "${CMAKE_CURRENT_BINARY_DIR}") + endforeach() + endif() +endforeach() + +# Make sure pytest is found or produce a fatal error +if(NOT PYBIND11_PYTEST_FOUND) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c "import pytest; print(pytest.__version__)" + RESULT_VARIABLE pytest_not_found + OUTPUT_VARIABLE pytest_version + ERROR_QUIET) + if(pytest_not_found) + message(FATAL_ERROR "Running the tests requires pytest. Please install it manually" + " (try: ${PYTHON_EXECUTABLE} -m pip install pytest)") + elseif(pytest_version VERSION_LESS 3.1) + message(FATAL_ERROR "Running the tests requires pytest >= 3.1. Found: ${pytest_version}" + "Please update it (try: ${PYTHON_EXECUTABLE} -m pip install -U pytest)") + endif() + set(PYBIND11_PYTEST_FOUND + TRUE + CACHE INTERNAL "") +endif() + +if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) + # This is not used later in the build, so it's okay to regenerate each time. + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/pytest.ini" "${CMAKE_CURRENT_BINARY_DIR}/pytest.ini" + COPYONLY) + file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/pytest.ini" + "\ntestpaths = \"${CMAKE_CURRENT_SOURCE_DIR}\"") + +endif() + +# cmake 3.12 added list(transform prepend +# but we can't use it yet +string(REPLACE "test_" "${CMAKE_CURRENT_BINARY_DIR}/test_" PYBIND11_BINARY_TEST_FILES + "${PYBIND11_PYTEST_FILES}") + +# A single command to compile and run the tests +add_custom_target( + pytest + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${PYBIND11_BINARY_PYTEST_FILES} + DEPENDS ${test_targets} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + USES_TERMINAL) + +if(PYBIND11_TEST_OVERRIDE) + add_custom_command( + TARGET pytest + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E echo + "Note: not all tests run: -DPYBIND11_TEST_OVERRIDE is in effect") +endif() + +# Add a check target to run all the tests, starting with pytest (we add dependencies to this below) +add_custom_target(check DEPENDS pytest) + +# The remaining tests only apply when being built as part of the pybind11 project, but not if the +# tests are being built independently. +if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) + return() +endif() + +# Add a post-build comment to show the primary test suite .so size and, if a previous size, compare it: +add_custom_command( + TARGET pybind11_tests + POST_BUILD + COMMAND + ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/../tools/libsize.py + $ + ${CMAKE_CURRENT_BINARY_DIR}/sosize-$.txt) + +# Test embedding the interpreter. Provides the `cpptest` target. +add_subdirectory(test_embed) + +# Test CMake build using functions and targets from subdirectory or installed location +add_subdirectory(test_cmake_build) diff --git a/diffvg/pybind11/tests/conftest.py b/diffvg/pybind11/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..a2350d041f5d3d57dede9ff23c3177eae2914048 --- /dev/null +++ b/diffvg/pybind11/tests/conftest.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +"""pytest configuration + +Extends output capture as needed by pybind11: ignore constructors, optional unordered lines. +Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences. +""" + +import contextlib +import difflib +import gc +import re +import textwrap + +import pytest + +import env + +# Early diagnostic for failed imports +import pybind11_tests # noqa: F401 + +_unicode_marker = re.compile(r'u(\'[^\']*\')') +_long_marker = re.compile(r'([0-9])L') +_hexadecimal = re.compile(r'0x[0-9a-fA-F]+') + +# Avoid collecting Python3 only files +collect_ignore = [] +if env.PY2: + collect_ignore.append("test_async.py") + + +def _strip_and_dedent(s): + """For triple-quote strings""" + return textwrap.dedent(s.lstrip('\n').rstrip()) + + +def _split_and_sort(s): + """For output which does not require specific line order""" + return sorted(_strip_and_dedent(s).splitlines()) + + +def _make_explanation(a, b): + """Explanation for a failed assert -- the a and b arguments are List[str]""" + return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)] + + +class Output(object): + """Basic output post-processing and comparison""" + def __init__(self, string): + self.string = string + self.explanation = [] + + def __str__(self): + return self.string + + def __eq__(self, other): + # Ignore constructor/destructor output which is prefixed with "###" + a = [line for line in self.string.strip().splitlines() if not line.startswith("###")] + b = _strip_and_dedent(other).splitlines() + if a == b: + return True + else: + self.explanation = _make_explanation(a, b) + return False + + +class Unordered(Output): + """Custom comparison for output without strict line ordering""" + def __eq__(self, other): + a = _split_and_sort(self.string) + b = _split_and_sort(other) + if a == b: + return True + else: + self.explanation = _make_explanation(a, b) + return False + + +class Capture(object): + def __init__(self, capfd): + self.capfd = capfd + self.out = "" + self.err = "" + + def __enter__(self): + self.capfd.readouterr() + return self + + def __exit__(self, *args): + self.out, self.err = self.capfd.readouterr() + + def __eq__(self, other): + a = Output(self.out) + b = other + if a == b: + return True + else: + self.explanation = a.explanation + return False + + def __str__(self): + return self.out + + def __contains__(self, item): + return item in self.out + + @property + def unordered(self): + return Unordered(self.out) + + @property + def stderr(self): + return Output(self.err) + + +@pytest.fixture +def capture(capsys): + """Extended `capsys` with context manager and custom equality operators""" + return Capture(capsys) + + +class SanitizedString(object): + def __init__(self, sanitizer): + self.sanitizer = sanitizer + self.string = "" + self.explanation = [] + + def __call__(self, thing): + self.string = self.sanitizer(thing) + return self + + def __eq__(self, other): + a = self.string + b = _strip_and_dedent(other) + if a == b: + return True + else: + self.explanation = _make_explanation(a.splitlines(), b.splitlines()) + return False + + +def _sanitize_general(s): + s = s.strip() + s = s.replace("pybind11_tests.", "m.") + s = s.replace("unicode", "str") + s = _long_marker.sub(r"\1", s) + s = _unicode_marker.sub(r"\1", s) + return s + + +def _sanitize_docstring(thing): + s = thing.__doc__ + s = _sanitize_general(s) + return s + + +@pytest.fixture +def doc(): + """Sanitize docstrings and add custom failure explanation""" + return SanitizedString(_sanitize_docstring) + + +def _sanitize_message(thing): + s = str(thing) + s = _sanitize_general(s) + s = _hexadecimal.sub("0", s) + return s + + +@pytest.fixture +def msg(): + """Sanitize messages and add custom failure explanation""" + return SanitizedString(_sanitize_message) + + +# noinspection PyUnusedLocal +def pytest_assertrepr_compare(op, left, right): + """Hook to insert custom failure explanation""" + if hasattr(left, 'explanation'): + return left.explanation + + +@contextlib.contextmanager +def suppress(exception): + """Suppress the desired exception""" + try: + yield + except exception: + pass + + +def gc_collect(): + ''' Run the garbage collector twice (needed when running + reference counting tests with PyPy) ''' + gc.collect() + gc.collect() + + +def pytest_configure(): + pytest.suppress = suppress + pytest.gc_collect = gc_collect diff --git a/diffvg/pybind11/tests/constructor_stats.h b/diffvg/pybind11/tests/constructor_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..abfaf9161406798eeaa79a0d6c22e023de893495 --- /dev/null +++ b/diffvg/pybind11/tests/constructor_stats.h @@ -0,0 +1,275 @@ +#pragma once +/* + tests/constructor_stats.h -- framework for printing and tracking object + instance lifetimes in example/test code. + + Copyright (c) 2016 Jason Rhinelander + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. + +This header provides a few useful tools for writing examples or tests that want to check and/or +display object instance lifetimes. It requires that you include this header and add the following +function calls to constructors: + + class MyClass { + MyClass() { ...; print_default_created(this); } + ~MyClass() { ...; print_destroyed(this); } + MyClass(const MyClass &c) { ...; print_copy_created(this); } + MyClass(MyClass &&c) { ...; print_move_created(this); } + MyClass(int a, int b) { ...; print_created(this, a, b); } + MyClass &operator=(const MyClass &c) { ...; print_copy_assigned(this); } + MyClass &operator=(MyClass &&c) { ...; print_move_assigned(this); } + + ... + } + +You can find various examples of these in several of the existing testing .cpp files. (Of course +you don't need to add any of the above constructors/operators that you don't actually have, except +for the destructor). + +Each of these will print an appropriate message such as: + + ### MyClass @ 0x2801910 created via default constructor + ### MyClass @ 0x27fa780 created 100 200 + ### MyClass @ 0x2801910 destroyed + ### MyClass @ 0x27fa780 destroyed + +You can also include extra arguments (such as the 100, 200 in the output above, coming from the +value constructor) for all of the above methods which will be included in the output. + +For testing, each of these also keeps track the created instances and allows you to check how many +of the various constructors have been invoked from the Python side via code such as: + + from pybind11_tests import ConstructorStats + cstats = ConstructorStats.get(MyClass) + print(cstats.alive()) + print(cstats.default_constructions) + +Note that `.alive()` should usually be the first thing you call as it invokes Python's garbage +collector to actually destroy objects that aren't yet referenced. + +For everything except copy and move constructors and destructors, any extra values given to the +print_...() function is stored in a class-specific values list which you can retrieve and inspect +from the ConstructorStats instance `.values()` method. + +In some cases, when you need to track instances of a C++ class not registered with pybind11, you +need to add a function returning the ConstructorStats for the C++ class; this can be done with: + + m.def("get_special_cstats", &ConstructorStats::get, py::return_value_policy::reference) + +Finally, you can suppress the output messages, but keep the constructor tracking (for +inspection/testing in python) by using the functions with `print_` replaced with `track_` (e.g. +`track_copy_created(this)`). + +*/ + +#include "pybind11_tests.h" +#include +#include +#include +#include + +class ConstructorStats { +protected: + std::unordered_map _instances; // Need a map rather than set because members can shared address with parents + std::list _values; // Used to track values (e.g. of value constructors) +public: + int default_constructions = 0; + int copy_constructions = 0; + int move_constructions = 0; + int copy_assignments = 0; + int move_assignments = 0; + + void copy_created(void *inst) { + created(inst); + copy_constructions++; + } + + void move_created(void *inst) { + created(inst); + move_constructions++; + } + + void default_created(void *inst) { + created(inst); + default_constructions++; + } + + void created(void *inst) { + ++_instances[inst]; + } + + void destroyed(void *inst) { + if (--_instances[inst] < 0) + throw std::runtime_error("cstats.destroyed() called with unknown " + "instance; potential double-destruction " + "or a missing cstats.created()"); + } + + static void gc() { + // Force garbage collection to ensure any pending destructors are invoked: +#if defined(PYPY_VERSION) + PyObject *globals = PyEval_GetGlobals(); + PyObject *result = PyRun_String( + "import gc\n" + "for i in range(2):" + " gc.collect()\n", + Py_file_input, globals, globals); + if (result == nullptr) + throw py::error_already_set(); + Py_DECREF(result); +#else + py::module::import("gc").attr("collect")(); +#endif + } + + int alive() { + gc(); + int total = 0; + for (const auto &p : _instances) + if (p.second > 0) + total += p.second; + return total; + } + + void value() {} // Recursion terminator + // Takes one or more values, converts them to strings, then stores them. + template void value(const T &v, Tmore &&...args) { + std::ostringstream oss; + oss << v; + _values.push_back(oss.str()); + value(std::forward(args)...); + } + + // Move out stored values + py::list values() { + py::list l; + for (const auto &v : _values) l.append(py::cast(v)); + _values.clear(); + return l; + } + + // Gets constructor stats from a C++ type index + static ConstructorStats& get(std::type_index type) { + static std::unordered_map all_cstats; + return all_cstats[type]; + } + + // Gets constructor stats from a C++ type + template static ConstructorStats& get() { +#if defined(PYPY_VERSION) + gc(); +#endif + return get(typeid(T)); + } + + // Gets constructor stats from a Python class + static ConstructorStats& get(py::object class_) { + auto &internals = py::detail::get_internals(); + const std::type_index *t1 = nullptr, *t2 = nullptr; + try { + auto *type_info = internals.registered_types_py.at((PyTypeObject *) class_.ptr()).at(0); + for (auto &p : internals.registered_types_cpp) { + if (p.second == type_info) { + if (t1) { + t2 = &p.first; + break; + } + t1 = &p.first; + } + } + } + catch (const std::out_of_range&) {} + if (!t1) throw std::runtime_error("Unknown class passed to ConstructorStats::get()"); + auto &cs1 = get(*t1); + // If we have both a t1 and t2 match, one is probably the trampoline class; return whichever + // has more constructions (typically one or the other will be 0) + if (t2) { + auto &cs2 = get(*t2); + int cs1_total = cs1.default_constructions + cs1.copy_constructions + cs1.move_constructions + (int) cs1._values.size(); + int cs2_total = cs2.default_constructions + cs2.copy_constructions + cs2.move_constructions + (int) cs2._values.size(); + if (cs2_total > cs1_total) return cs2; + } + return cs1; + } +}; + +// To track construction/destruction, you need to call these methods from the various +// constructors/operators. The ones that take extra values record the given values in the +// constructor stats values for later inspection. +template void track_copy_created(T *inst) { ConstructorStats::get().copy_created(inst); } +template void track_move_created(T *inst) { ConstructorStats::get().move_created(inst); } +template void track_copy_assigned(T *, Values &&...values) { + auto &cst = ConstructorStats::get(); + cst.copy_assignments++; + cst.value(std::forward(values)...); +} +template void track_move_assigned(T *, Values &&...values) { + auto &cst = ConstructorStats::get(); + cst.move_assignments++; + cst.value(std::forward(values)...); +} +template void track_default_created(T *inst, Values &&...values) { + auto &cst = ConstructorStats::get(); + cst.default_created(inst); + cst.value(std::forward(values)...); +} +template void track_created(T *inst, Values &&...values) { + auto &cst = ConstructorStats::get(); + cst.created(inst); + cst.value(std::forward(values)...); +} +template void track_destroyed(T *inst) { + ConstructorStats::get().destroyed(inst); +} +template void track_values(T *, Values &&...values) { + ConstructorStats::get().value(std::forward(values)...); +} + +/// Don't cast pointers to Python, print them as strings +inline const char *format_ptrs(const char *p) { return p; } +template +py::str format_ptrs(T *p) { return "{:#x}"_s.format(reinterpret_cast(p)); } +template +auto format_ptrs(T &&x) -> decltype(std::forward(x)) { return std::forward(x); } + +template +void print_constr_details(T *inst, const std::string &action, Output &&...output) { + py::print("###", py::type_id(), "@", format_ptrs(inst), action, + format_ptrs(std::forward(output))...); +} + +// Verbose versions of the above: +template void print_copy_created(T *inst, Values &&...values) { // NB: this prints, but doesn't store, given values + print_constr_details(inst, "created via copy constructor", values...); + track_copy_created(inst); +} +template void print_move_created(T *inst, Values &&...values) { // NB: this prints, but doesn't store, given values + print_constr_details(inst, "created via move constructor", values...); + track_move_created(inst); +} +template void print_copy_assigned(T *inst, Values &&...values) { + print_constr_details(inst, "assigned via copy assignment", values...); + track_copy_assigned(inst, values...); +} +template void print_move_assigned(T *inst, Values &&...values) { + print_constr_details(inst, "assigned via move assignment", values...); + track_move_assigned(inst, values...); +} +template void print_default_created(T *inst, Values &&...values) { + print_constr_details(inst, "created via default constructor", values...); + track_default_created(inst, values...); +} +template void print_created(T *inst, Values &&...values) { + print_constr_details(inst, "created", values...); + track_created(inst, values...); +} +template void print_destroyed(T *inst, Values &&...values) { // Prints but doesn't store given values + print_constr_details(inst, "destroyed", values...); + track_destroyed(inst); +} +template void print_values(T *inst, Values &&...values) { + print_constr_details(inst, ":", values...); + track_values(inst, values...); +} diff --git a/diffvg/pybind11/tests/cross_module_gil_utils.cpp b/diffvg/pybind11/tests/cross_module_gil_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..07db9f6e48a10dfd2d4370c3daff6e793d6675d2 --- /dev/null +++ b/diffvg/pybind11/tests/cross_module_gil_utils.cpp @@ -0,0 +1,73 @@ +/* + tests/cross_module_gil_utils.cpp -- tools for acquiring GIL from a different module + + Copyright (c) 2019 Google LLC + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ +#include +#include + +// This file mimics a DSO that makes pybind11 calls but does not define a +// PYBIND11_MODULE. The purpose is to test that such a DSO can create a +// py::gil_scoped_acquire when the running thread is in a GIL-released state. +// +// Note that we define a Python module here for convenience, but in general +// this need not be the case. The typical scenario would be a DSO that implements +// shared logic used internally by multiple pybind11 modules. + +namespace { + +namespace py = pybind11; +void gil_acquire() { py::gil_scoped_acquire gil; } + +constexpr char kModuleName[] = "cross_module_gil_utils"; + +#if PY_MAJOR_VERSION >= 3 +struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + kModuleName, + NULL, + 0, + NULL, + NULL, + NULL, + NULL, + NULL +}; +#else +PyMethodDef module_methods[] = { + {NULL, NULL, 0, NULL} +}; +#endif + +} // namespace + +extern "C" PYBIND11_EXPORT +#if PY_MAJOR_VERSION >= 3 +PyObject* PyInit_cross_module_gil_utils() +#else +void initcross_module_gil_utils() +#endif +{ + + PyObject* m = +#if PY_MAJOR_VERSION >= 3 + PyModule_Create(&moduledef); +#else + Py_InitModule(kModuleName, module_methods); +#endif + + if (m != NULL) { + static_assert( + sizeof(&gil_acquire) == sizeof(void*), + "Function pointer must have the same size as void*"); + PyModule_AddObject(m, "gil_acquire_funcaddr", + PyLong_FromVoidPtr(reinterpret_cast(&gil_acquire))); + } + +#if PY_MAJOR_VERSION >= 3 + return m; +#endif +} diff --git a/diffvg/pybind11/tests/env.py b/diffvg/pybind11/tests/env.py new file mode 100644 index 0000000000000000000000000000000000000000..5cded441271c61af72fc0be0de79332dc6279d72 --- /dev/null +++ b/diffvg/pybind11/tests/env.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +import platform +import sys + +LINUX = sys.platform.startswith("linux") +MACOS = sys.platform.startswith("darwin") +WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin") + +CPYTHON = platform.python_implementation() == "CPython" +PYPY = platform.python_implementation() == "PyPy" + +PY2 = sys.version_info.major == 2 + +PY = sys.version_info diff --git a/diffvg/pybind11/tests/local_bindings.h b/diffvg/pybind11/tests/local_bindings.h new file mode 100644 index 0000000000000000000000000000000000000000..b6afb808664de1fdbde011a9bf7c38d3a8794127 --- /dev/null +++ b/diffvg/pybind11/tests/local_bindings.h @@ -0,0 +1,64 @@ +#pragma once +#include "pybind11_tests.h" + +/// Simple class used to test py::local: +template class LocalBase { +public: + LocalBase(int i) : i(i) { } + int i = -1; +}; + +/// Registered with py::module_local in both main and secondary modules: +using LocalType = LocalBase<0>; +/// Registered without py::module_local in both modules: +using NonLocalType = LocalBase<1>; +/// A second non-local type (for stl_bind tests): +using NonLocal2 = LocalBase<2>; +/// Tests within-module, different-compilation-unit local definition conflict: +using LocalExternal = LocalBase<3>; +/// Mixed: registered local first, then global +using MixedLocalGlobal = LocalBase<4>; +/// Mixed: global first, then local +using MixedGlobalLocal = LocalBase<5>; + +/// Registered with py::module_local only in the secondary module: +using ExternalType1 = LocalBase<6>; +using ExternalType2 = LocalBase<7>; + +using LocalVec = std::vector; +using LocalVec2 = std::vector; +using LocalMap = std::unordered_map; +using NonLocalVec = std::vector; +using NonLocalVec2 = std::vector; +using NonLocalMap = std::unordered_map; +using NonLocalMap2 = std::unordered_map; + +PYBIND11_MAKE_OPAQUE(LocalVec); +PYBIND11_MAKE_OPAQUE(LocalVec2); +PYBIND11_MAKE_OPAQUE(LocalMap); +PYBIND11_MAKE_OPAQUE(NonLocalVec); +//PYBIND11_MAKE_OPAQUE(NonLocalVec2); // same type as LocalVec2 +PYBIND11_MAKE_OPAQUE(NonLocalMap); +PYBIND11_MAKE_OPAQUE(NonLocalMap2); + + +// Simple bindings (used with the above): +template +py::class_ bind_local(Args && ...args) { + return py::class_(std::forward(args)...) + .def(py::init()) + .def("get", [](T &i) { return i.i + Adjust; }); +}; + +// Simulate a foreign library base class (to match the example in the docs): +namespace pets { +class Pet { +public: + Pet(std::string name) : name_(name) {} + std::string name_; + const std::string &name() { return name_; } +}; +} + +struct MixGL { int i; MixGL(int i) : i{i} {} }; +struct MixGL2 { int i; MixGL2(int i) : i{i} {} }; diff --git a/diffvg/pybind11/tests/object.h b/diffvg/pybind11/tests/object.h new file mode 100644 index 0000000000000000000000000000000000000000..9235f19c20bff3afb59c6880a84c809205eff6ea --- /dev/null +++ b/diffvg/pybind11/tests/object.h @@ -0,0 +1,175 @@ +#if !defined(__OBJECT_H) +#define __OBJECT_H + +#include +#include "constructor_stats.h" + +/// Reference counted object base class +class Object { +public: + /// Default constructor + Object() { print_default_created(this); } + + /// Copy constructor + Object(const Object &) : m_refCount(0) { print_copy_created(this); } + + /// Return the current reference count + int getRefCount() const { return m_refCount; }; + + /// Increase the object's reference count by one + void incRef() const { ++m_refCount; } + + /** \brief Decrease the reference count of + * the object and possibly deallocate it. + * + * The object will automatically be deallocated once + * the reference count reaches zero. + */ + void decRef(bool dealloc = true) const { + --m_refCount; + if (m_refCount == 0 && dealloc) + delete this; + else if (m_refCount < 0) + throw std::runtime_error("Internal error: reference count < 0!"); + } + + virtual std::string toString() const = 0; +protected: + /** \brief Virtual protected deconstructor. + * (Will only be called by \ref ref) + */ + virtual ~Object() { print_destroyed(this); } +private: + mutable std::atomic m_refCount { 0 }; +}; + +// Tag class used to track constructions of ref objects. When we track constructors, below, we +// track and print out the actual class (e.g. ref), and *also* add a fake tracker for +// ref_tag. This lets us check that the total number of ref constructors/destructors is +// correct without having to check each individual ref type individually. +class ref_tag {}; + +/** + * \brief Reference counting helper + * + * The \a ref refeference template is a simple wrapper to store a + * pointer to an object. It takes care of increasing and decreasing + * the reference count of the object. When the last reference goes + * out of scope, the associated object will be deallocated. + * + * \ingroup libcore + */ +template class ref { +public: + /// Create a nullptr reference + ref() : m_ptr(nullptr) { print_default_created(this); track_default_created((ref_tag*) this); } + + /// Construct a reference from a pointer + ref(T *ptr) : m_ptr(ptr) { + if (m_ptr) ((Object *) m_ptr)->incRef(); + + print_created(this, "from pointer", m_ptr); track_created((ref_tag*) this, "from pointer"); + + } + + /// Copy constructor + ref(const ref &r) : m_ptr(r.m_ptr) { + if (m_ptr) + ((Object *) m_ptr)->incRef(); + + print_copy_created(this, "with pointer", m_ptr); track_copy_created((ref_tag*) this); + } + + /// Move constructor + ref(ref &&r) : m_ptr(r.m_ptr) { + r.m_ptr = nullptr; + + print_move_created(this, "with pointer", m_ptr); track_move_created((ref_tag*) this); + } + + /// Destroy this reference + ~ref() { + if (m_ptr) + ((Object *) m_ptr)->decRef(); + + print_destroyed(this); track_destroyed((ref_tag*) this); + } + + /// Move another reference into the current one + ref& operator=(ref&& r) { + print_move_assigned(this, "pointer", r.m_ptr); track_move_assigned((ref_tag*) this); + + if (*this == r) + return *this; + if (m_ptr) + ((Object *) m_ptr)->decRef(); + m_ptr = r.m_ptr; + r.m_ptr = nullptr; + return *this; + } + + /// Overwrite this reference with another reference + ref& operator=(const ref& r) { + print_copy_assigned(this, "pointer", r.m_ptr); track_copy_assigned((ref_tag*) this); + + if (m_ptr == r.m_ptr) + return *this; + if (m_ptr) + ((Object *) m_ptr)->decRef(); + m_ptr = r.m_ptr; + if (m_ptr) + ((Object *) m_ptr)->incRef(); + return *this; + } + + /// Overwrite this reference with a pointer to another object + ref& operator=(T *ptr) { + print_values(this, "assigned pointer"); track_values((ref_tag*) this, "assigned pointer"); + + if (m_ptr == ptr) + return *this; + if (m_ptr) + ((Object *) m_ptr)->decRef(); + m_ptr = ptr; + if (m_ptr) + ((Object *) m_ptr)->incRef(); + return *this; + } + + /// Compare this reference with another reference + bool operator==(const ref &r) const { return m_ptr == r.m_ptr; } + + /// Compare this reference with another reference + bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; } + + /// Compare this reference with a pointer + bool operator==(const T* ptr) const { return m_ptr == ptr; } + + /// Compare this reference with a pointer + bool operator!=(const T* ptr) const { return m_ptr != ptr; } + + /// Access the object referenced by this reference + T* operator->() { return m_ptr; } + + /// Access the object referenced by this reference + const T* operator->() const { return m_ptr; } + + /// Return a C++ reference to the referenced object + T& operator*() { return *m_ptr; } + + /// Return a const C++ reference to the referenced object + const T& operator*() const { return *m_ptr; } + + /// Return a pointer to the referenced object + operator T* () { return m_ptr; } + + /// Return a const pointer to the referenced object + T* get_ptr() { return m_ptr; } + + /// Return a pointer to the referenced object + const T* get_ptr() const { return m_ptr; } +private: + T *m_ptr; +}; + +#endif /* __OBJECT_H */ diff --git a/diffvg/pybind11/tests/pybind11_cross_module_tests.cpp b/diffvg/pybind11/tests/pybind11_cross_module_tests.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f705e310611619dff319f9b5d53b71e6fd54aec5 --- /dev/null +++ b/diffvg/pybind11/tests/pybind11_cross_module_tests.cpp @@ -0,0 +1,123 @@ +/* + tests/pybind11_cross_module_tests.cpp -- contains tests that require multiple modules + + Copyright (c) 2017 Jason Rhinelander + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "local_bindings.h" +#include +#include + +PYBIND11_MODULE(pybind11_cross_module_tests, m) { + m.doc() = "pybind11 cross-module test module"; + + // test_local_bindings.py tests: + // + // Definitions here are tested by importing both this module and the + // relevant pybind11_tests submodule from a test_whatever.py + + // test_load_external + bind_local(m, "ExternalType1", py::module_local()); + bind_local(m, "ExternalType2", py::module_local()); + + // test_exceptions.py + m.def("raise_runtime_error", []() { PyErr_SetString(PyExc_RuntimeError, "My runtime error"); throw py::error_already_set(); }); + m.def("raise_value_error", []() { PyErr_SetString(PyExc_ValueError, "My value error"); throw py::error_already_set(); }); + m.def("throw_pybind_value_error", []() { throw py::value_error("pybind11 value error"); }); + m.def("throw_pybind_type_error", []() { throw py::type_error("pybind11 type error"); }); + m.def("throw_stop_iteration", []() { throw py::stop_iteration(); }); + + // test_local_bindings.py + // Local to both: + bind_local(m, "LocalType", py::module_local()) + .def("get2", [](LocalType &t) { return t.i + 2; }) + ; + + // Can only be called with our python type: + m.def("local_value", [](LocalType &l) { return l.i; }); + + // test_nonlocal_failure + // This registration will fail (global registration when LocalFail is already registered + // globally in the main test module): + m.def("register_nonlocal", [m]() { + bind_local(m, "NonLocalType"); + }); + + // test_stl_bind_local + // stl_bind.h binders defaults to py::module_local if the types are local or converting: + py::bind_vector(m, "LocalVec"); + py::bind_map(m, "LocalMap"); + + // test_stl_bind_global + // and global if the type (or one of the types, for the map) is global (so these will fail, + // assuming pybind11_tests is already loaded): + m.def("register_nonlocal_vec", [m]() { + py::bind_vector(m, "NonLocalVec"); + }); + m.def("register_nonlocal_map", [m]() { + py::bind_map(m, "NonLocalMap"); + }); + // The default can, however, be overridden to global using `py::module_local()` or + // `py::module_local(false)`. + // Explicitly made local: + py::bind_vector(m, "NonLocalVec2", py::module_local()); + // Explicitly made global (and so will fail to bind): + m.def("register_nonlocal_map2", [m]() { + py::bind_map(m, "NonLocalMap2", py::module_local(false)); + }); + + // test_mixed_local_global + // We try this both with the global type registered first and vice versa (the order shouldn't + // matter). + m.def("register_mixed_global_local", [m]() { + bind_local(m, "MixedGlobalLocal", py::module_local()); + }); + m.def("register_mixed_local_global", [m]() { + bind_local(m, "MixedLocalGlobal", py::module_local(false)); + }); + m.def("get_mixed_gl", [](int i) { return MixedGlobalLocal(i); }); + m.def("get_mixed_lg", [](int i) { return MixedLocalGlobal(i); }); + + // test_internal_locals_differ + m.def("local_cpp_types_addr", []() { return (uintptr_t) &py::detail::registered_local_types_cpp(); }); + + // test_stl_caster_vs_stl_bind + py::bind_vector>(m, "VectorInt"); + + m.def("load_vector_via_binding", [](std::vector &v) { + return std::accumulate(v.begin(), v.end(), 0); + }); + + // test_cross_module_calls + m.def("return_self", [](LocalVec *v) { return v; }); + m.def("return_copy", [](const LocalVec &v) { return LocalVec(v); }); + + class Dog : public pets::Pet { public: Dog(std::string name) : Pet(name) {}; }; + py::class_(m, "Pet", py::module_local()) + .def("name", &pets::Pet::name); + // Binding for local extending class: + py::class_(m, "Dog") + .def(py::init()); + m.def("pet_name", [](pets::Pet &p) { return p.name(); }); + + py::class_(m, "MixGL", py::module_local()).def(py::init()); + m.def("get_gl_value", [](MixGL &o) { return o.i + 100; }); + + py::class_(m, "MixGL2", py::module_local()).def(py::init()); + + // test_vector_bool + // We can't test both stl.h and stl_bind.h conversions of `std::vector` within + // the same module (it would be an ODR violation). Therefore `bind_vector` of `bool` + // is defined here and tested in `test_stl_binders.py`. + py::bind_vector>(m, "VectorBool"); + + // test_missing_header_message + // The main module already includes stl.h, but we need to test the error message + // which appears when this header is missing. + m.def("missing_header_arg", [](std::vector) { }); + m.def("missing_header_return", []() { return std::vector(); }); +} diff --git a/diffvg/pybind11/tests/pybind11_tests.cpp b/diffvg/pybind11/tests/pybind11_tests.cpp new file mode 100644 index 0000000000000000000000000000000000000000..76e0298e83a1088a439441580bc3866c664dcaea --- /dev/null +++ b/diffvg/pybind11/tests/pybind11_tests.cpp @@ -0,0 +1,91 @@ +/* + tests/pybind11_tests.cpp -- pybind example plugin + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + +#include +#include + +/* +For testing purposes, we define a static global variable here in a function that each individual +test .cpp calls with its initialization lambda. It's convenient here because we can just not +compile some test files to disable/ignore some of the test code. + +It is NOT recommended as a way to use pybind11 in practice, however: the initialization order will +be essentially random, which is okay for our test scripts (there are no dependencies between the +individual pybind11 test .cpp files), but most likely not what you want when using pybind11 +productively. + +Instead, see the "How can I reduce the build time?" question in the "Frequently asked questions" +section of the documentation for good practice on splitting binding code over multiple files. +*/ +std::list> &initializers() { + static std::list> inits; + return inits; +} + +test_initializer::test_initializer(Initializer init) { + initializers().push_back(init); +} + +test_initializer::test_initializer(const char *submodule_name, Initializer init) { + initializers().push_back([=](py::module &parent) { + auto m = parent.def_submodule(submodule_name); + init(m); + }); +} + +void bind_ConstructorStats(py::module &m) { + py::class_(m, "ConstructorStats") + .def("alive", &ConstructorStats::alive) + .def("values", &ConstructorStats::values) + .def_readwrite("default_constructions", &ConstructorStats::default_constructions) + .def_readwrite("copy_assignments", &ConstructorStats::copy_assignments) + .def_readwrite("move_assignments", &ConstructorStats::move_assignments) + .def_readwrite("copy_constructions", &ConstructorStats::copy_constructions) + .def_readwrite("move_constructions", &ConstructorStats::move_constructions) + .def_static("get", (ConstructorStats &(*)(py::object)) &ConstructorStats::get, py::return_value_policy::reference_internal) + + // Not exactly ConstructorStats, but related: expose the internal pybind number of registered instances + // to allow instance cleanup checks (invokes a GC first) + .def_static("detail_reg_inst", []() { + ConstructorStats::gc(); + return py::detail::get_internals().registered_instances.size(); + }) + ; +} + +PYBIND11_MODULE(pybind11_tests, m) { + m.doc() = "pybind11 test module"; + + bind_ConstructorStats(m); + +#if !defined(NDEBUG) + m.attr("debug_enabled") = true; +#else + m.attr("debug_enabled") = false; +#endif + + py::class_(m, "UserType", "A `py::class_` type for testing") + .def(py::init<>()) + .def(py::init()) + .def("get_value", &UserType::value, "Get value using a method") + .def("set_value", &UserType::set, "Set value using a method") + .def_property("value", &UserType::value, &UserType::set, "Get/set value using a property") + .def("__repr__", [](const UserType& u) { return "UserType({})"_s.format(u.value()); }); + + py::class_(m, "IncType") + .def(py::init<>()) + .def(py::init()) + .def("__repr__", [](const IncType& u) { return "IncType({})"_s.format(u.value()); }); + + for (const auto &initializer : initializers()) + initializer(m); +} diff --git a/diffvg/pybind11/tests/pybind11_tests.h b/diffvg/pybind11/tests/pybind11_tests.h new file mode 100644 index 0000000000000000000000000000000000000000..1e47416270ff1c81dd01f4725b896341ae4dcd0f --- /dev/null +++ b/diffvg/pybind11/tests/pybind11_tests.h @@ -0,0 +1,65 @@ +#pragma once +#include + +#if defined(_MSC_VER) && _MSC_VER < 1910 +// We get some really long type names here which causes MSVC 2015 to emit warnings +# pragma warning(disable: 4503) // warning C4503: decorated name length exceeded, name was truncated +#endif + +namespace py = pybind11; +using namespace pybind11::literals; + +class test_initializer { + using Initializer = void (*)(py::module &); + +public: + test_initializer(Initializer init); + test_initializer(const char *submodule_name, Initializer init); +}; + +#define TEST_SUBMODULE(name, variable) \ + void test_submodule_##name(py::module &); \ + test_initializer name(#name, test_submodule_##name); \ + void test_submodule_##name(py::module &variable) + + +/// Dummy type which is not exported anywhere -- something to trigger a conversion error +struct UnregisteredType { }; + +/// A user-defined type which is exported and can be used by any test +class UserType { +public: + UserType() = default; + UserType(int i) : i(i) { } + + int value() const { return i; } + void set(int set) { i = set; } + +private: + int i = -1; +}; + +/// Like UserType, but increments `value` on copy for quick reference vs. copy tests +class IncType : public UserType { +public: + using UserType::UserType; + IncType() = default; + IncType(const IncType &other) : IncType(other.value() + 1) { } + IncType(IncType &&) = delete; + IncType &operator=(const IncType &) = delete; + IncType &operator=(IncType &&) = delete; +}; + +/// Custom cast-only type that casts to a string "rvalue" or "lvalue" depending on the cast context. +/// Used to test recursive casters (e.g. std::tuple, stl containers). +struct RValueCaster {}; +PYBIND11_NAMESPACE_BEGIN(pybind11) +PYBIND11_NAMESPACE_BEGIN(detail) +template<> class type_caster { +public: + PYBIND11_TYPE_CASTER(RValueCaster, _("RValueCaster")); + static handle cast(RValueCaster &&, return_value_policy, handle) { return py::str("rvalue").release(); } + static handle cast(const RValueCaster &, return_value_policy, handle) { return py::str("lvalue").release(); } +}; +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(pybind11) diff --git a/diffvg/pybind11/tests/pytest.ini b/diffvg/pybind11/tests/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..6d758ea6ac8d7315804875fba8d4e33cf1752e77 --- /dev/null +++ b/diffvg/pybind11/tests/pytest.ini @@ -0,0 +1,19 @@ +[pytest] +minversion = 3.1 +norecursedirs = test_cmake_build test_embed +xfail_strict = True +addopts = + # show summary of skipped tests + -rs + # capture only Python print and C++ py::print, but not C output (low-level Python errors) + --capture=sys + # enable all warnings + -Wa +filterwarnings = + # make warnings into errors but ignore certain third-party extension issues + error + # importing scipy submodules on some version of Python + ignore::ImportWarning + # bogus numpy ABI warning (see numpy/#432) + ignore:.*numpy.dtype size changed.*:RuntimeWarning + ignore:.*numpy.ufunc size changed.*:RuntimeWarning diff --git a/diffvg/pybind11/tests/requirements.txt b/diffvg/pybind11/tests/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..39bd57a1c7860bfdee5c6206ebf79426f5a49abc --- /dev/null +++ b/diffvg/pybind11/tests/requirements.txt @@ -0,0 +1,8 @@ +--extra-index-url https://antocuni.github.io/pypy-wheels/manylinux2010/ +numpy==1.16.6; python_version<"3.6" +numpy==1.18.0; platform_python_implementation=="PyPy" and sys_platform=="darwin" and python_version>="3.6" +numpy==1.19.1; (platform_python_implementation!="PyPy" or sys_platform!="darwin") and python_version>="3.6" and python_version<"3.9" +pytest==4.6.9; python_version<"3.5" +pytest==5.4.3; python_version>="3.5" +scipy==1.2.3; (platform_python_implementation!="PyPy" or sys_platform!="darwin") and python_version<"3.6" +scipy==1.5.2; (platform_python_implementation!="PyPy" or sys_platform!="darwin") and python_version>="3.6" and python_version<"3.9" diff --git a/diffvg/pybind11/tests/test_async.cpp b/diffvg/pybind11/tests/test_async.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f0ad0d535048fbb825b444e743193c743551cdd4 --- /dev/null +++ b/diffvg/pybind11/tests/test_async.cpp @@ -0,0 +1,26 @@ +/* + tests/test_async.cpp -- __await__ support + + Copyright (c) 2019 Google Inc. + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +TEST_SUBMODULE(async_module, m) { + struct DoesNotSupportAsync {}; + py::class_(m, "DoesNotSupportAsync") + .def(py::init<>()); + struct SupportsAsync {}; + py::class_(m, "SupportsAsync") + .def(py::init<>()) + .def("__await__", [](const SupportsAsync& self) -> py::object { + static_cast(self); + py::object loop = py::module::import("asyncio.events").attr("get_event_loop")(); + py::object f = loop.attr("create_future")(); + f.attr("set_result")(5); + return f.attr("__await__")(); + }); +} diff --git a/diffvg/pybind11/tests/test_async.py b/diffvg/pybind11/tests/test_async.py new file mode 100644 index 0000000000000000000000000000000000000000..df4489c499e88f190764dd17cef44b54b4516202 --- /dev/null +++ b/diffvg/pybind11/tests/test_async.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +import pytest + +asyncio = pytest.importorskip("asyncio") +m = pytest.importorskip("pybind11_tests.async_module") + + +@pytest.fixture +def event_loop(): + loop = asyncio.new_event_loop() + yield loop + loop.close() + + +async def get_await_result(x): + return await x + + +def test_await(event_loop): + assert 5 == event_loop.run_until_complete(get_await_result(m.SupportsAsync())) + + +def test_await_missing(event_loop): + with pytest.raises(TypeError): + event_loop.run_until_complete(get_await_result(m.DoesNotSupportAsync())) diff --git a/diffvg/pybind11/tests/test_buffers.cpp b/diffvg/pybind11/tests/test_buffers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1bc67ff7b66e86d7bf94de845e5737261f2a1280 --- /dev/null +++ b/diffvg/pybind11/tests/test_buffers.cpp @@ -0,0 +1,195 @@ +/* + tests/test_buffers.cpp -- supporting Pythons' buffer protocol + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + +TEST_SUBMODULE(buffers, m) { + // test_from_python / test_to_python: + class Matrix { + public: + Matrix(ssize_t rows, ssize_t cols) : m_rows(rows), m_cols(cols) { + print_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); + m_data = new float[(size_t) (rows*cols)]; + memset(m_data, 0, sizeof(float) * (size_t) (rows * cols)); + } + + Matrix(const Matrix &s) : m_rows(s.m_rows), m_cols(s.m_cols) { + print_copy_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); + m_data = new float[(size_t) (m_rows * m_cols)]; + memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols)); + } + + Matrix(Matrix &&s) : m_rows(s.m_rows), m_cols(s.m_cols), m_data(s.m_data) { + print_move_created(this); + s.m_rows = 0; + s.m_cols = 0; + s.m_data = nullptr; + } + + ~Matrix() { + print_destroyed(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); + delete[] m_data; + } + + Matrix &operator=(const Matrix &s) { + print_copy_assigned(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); + delete[] m_data; + m_rows = s.m_rows; + m_cols = s.m_cols; + m_data = new float[(size_t) (m_rows * m_cols)]; + memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols)); + return *this; + } + + Matrix &operator=(Matrix &&s) { + print_move_assigned(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); + if (&s != this) { + delete[] m_data; + m_rows = s.m_rows; m_cols = s.m_cols; m_data = s.m_data; + s.m_rows = 0; s.m_cols = 0; s.m_data = nullptr; + } + return *this; + } + + float operator()(ssize_t i, ssize_t j) const { + return m_data[(size_t) (i*m_cols + j)]; + } + + float &operator()(ssize_t i, ssize_t j) { + return m_data[(size_t) (i*m_cols + j)]; + } + + float *data() { return m_data; } + + ssize_t rows() const { return m_rows; } + ssize_t cols() const { return m_cols; } + private: + ssize_t m_rows; + ssize_t m_cols; + float *m_data; + }; + py::class_(m, "Matrix", py::buffer_protocol()) + .def(py::init()) + /// Construct from a buffer + .def(py::init([](py::buffer const b) { + py::buffer_info info = b.request(); + if (info.format != py::format_descriptor::format() || info.ndim != 2) + throw std::runtime_error("Incompatible buffer format!"); + + auto v = new Matrix(info.shape[0], info.shape[1]); + memcpy(v->data(), info.ptr, sizeof(float) * (size_t) (v->rows() * v->cols())); + return v; + })) + + .def("rows", &Matrix::rows) + .def("cols", &Matrix::cols) + + /// Bare bones interface + .def("__getitem__", [](const Matrix &m, std::pair i) { + if (i.first >= m.rows() || i.second >= m.cols()) + throw py::index_error(); + return m(i.first, i.second); + }) + .def("__setitem__", [](Matrix &m, std::pair i, float v) { + if (i.first >= m.rows() || i.second >= m.cols()) + throw py::index_error(); + m(i.first, i.second) = v; + }) + /// Provide buffer access + .def_buffer([](Matrix &m) -> py::buffer_info { + return py::buffer_info( + m.data(), /* Pointer to buffer */ + { m.rows(), m.cols() }, /* Buffer dimensions */ + { sizeof(float) * size_t(m.cols()), /* Strides (in bytes) for each index */ + sizeof(float) } + ); + }) + ; + + + // test_inherited_protocol + class SquareMatrix : public Matrix { + public: + SquareMatrix(ssize_t n) : Matrix(n, n) { } + }; + // Derived classes inherit the buffer protocol and the buffer access function + py::class_(m, "SquareMatrix") + .def(py::init()); + + + // test_pointer_to_member_fn + // Tests that passing a pointer to member to the base class works in + // the derived class. + struct Buffer { + int32_t value = 0; + + py::buffer_info get_buffer_info() { + return py::buffer_info(&value, sizeof(value), + py::format_descriptor::format(), 1); + } + }; + py::class_(m, "Buffer", py::buffer_protocol()) + .def(py::init<>()) + .def_readwrite("value", &Buffer::value) + .def_buffer(&Buffer::get_buffer_info); + + + class ConstBuffer { + std::unique_ptr value; + + public: + int32_t get_value() const { return *value; } + void set_value(int32_t v) { *value = v; } + + py::buffer_info get_buffer_info() const { + return py::buffer_info(value.get(), sizeof(*value), + py::format_descriptor::format(), 1); + } + + ConstBuffer() : value(new int32_t{0}) { }; + }; + py::class_(m, "ConstBuffer", py::buffer_protocol()) + .def(py::init<>()) + .def_property("value", &ConstBuffer::get_value, &ConstBuffer::set_value) + .def_buffer(&ConstBuffer::get_buffer_info); + + struct DerivedBuffer : public Buffer { }; + py::class_(m, "DerivedBuffer", py::buffer_protocol()) + .def(py::init<>()) + .def_readwrite("value", (int32_t DerivedBuffer::*) &DerivedBuffer::value) + .def_buffer(&DerivedBuffer::get_buffer_info); + + struct BufferReadOnly { + const uint8_t value = 0; + BufferReadOnly(uint8_t value): value(value) {} + + py::buffer_info get_buffer_info() { + return py::buffer_info(&value, 1); + } + }; + py::class_(m, "BufferReadOnly", py::buffer_protocol()) + .def(py::init()) + .def_buffer(&BufferReadOnly::get_buffer_info); + + struct BufferReadOnlySelect { + uint8_t value = 0; + bool readonly = false; + + py::buffer_info get_buffer_info() { + return py::buffer_info(&value, 1, readonly); + } + }; + py::class_(m, "BufferReadOnlySelect", py::buffer_protocol()) + .def(py::init<>()) + .def_readwrite("value", &BufferReadOnlySelect::value) + .def_readwrite("readonly", &BufferReadOnlySelect::readonly) + .def_buffer(&BufferReadOnlySelect::get_buffer_info); + +} diff --git a/diffvg/pybind11/tests/test_buffers.py b/diffvg/pybind11/tests/test_buffers.py new file mode 100644 index 0000000000000000000000000000000000000000..d6adaf1f5eee00f93e2b0ba7e3838c1107297080 --- /dev/null +++ b/diffvg/pybind11/tests/test_buffers.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +import io +import struct + +import pytest + +import env # noqa: F401 + +from pybind11_tests import buffers as m +from pybind11_tests import ConstructorStats + +np = pytest.importorskip("numpy") + + +def test_from_python(): + with pytest.raises(RuntimeError) as excinfo: + m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array + assert str(excinfo.value) == "Incompatible buffer format!" + + m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) + m4 = m.Matrix(m3) + + for i in range(m4.rows()): + for j in range(m4.cols()): + assert m3[i, j] == m4[i, j] + + cstats = ConstructorStats.get(m.Matrix) + assert cstats.alive() == 1 + del m3, m4 + assert cstats.alive() == 0 + assert cstats.values() == ["2x3 matrix"] + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Don't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2444 +def test_to_python(): + mat = m.Matrix(5, 4) + assert memoryview(mat).shape == (5, 4) + + assert mat[2, 3] == 0 + mat[2, 3] = 4.0 + mat[3, 2] = 7.0 + assert mat[2, 3] == 4 + assert mat[3, 2] == 7 + assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, ) + assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, ) + + mat2 = np.array(mat, copy=False) + assert mat2.shape == (5, 4) + assert abs(mat2).sum() == 11 + assert mat2[2, 3] == 4 and mat2[3, 2] == 7 + mat2[2, 3] = 5 + assert mat2[2, 3] == 5 + + cstats = ConstructorStats.get(m.Matrix) + assert cstats.alive() == 1 + del mat + pytest.gc_collect() + assert cstats.alive() == 1 + del mat2 # holds a mat reference + pytest.gc_collect() + assert cstats.alive() == 0 + assert cstats.values() == ["5x4 matrix"] + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Don't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + +def test_inherited_protocol(): + """SquareMatrix is derived from Matrix and inherits the buffer protocol""" + + matrix = m.SquareMatrix(5) + assert memoryview(matrix).shape == (5, 5) + assert np.asarray(matrix).shape == (5, 5) + + +def test_pointer_to_member_fn(): + for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]: + buf = cls() + buf.value = 0x12345678 + value = struct.unpack('i', bytearray(buf))[0] + assert value == 0x12345678 + + +def test_readonly_buffer(): + buf = m.BufferReadOnly(0x64) + view = memoryview(buf) + assert view[0] == b'd' if env.PY2 else 0x64 + assert view.readonly + + +def test_selective_readonly_buffer(): + buf = m.BufferReadOnlySelect() + + memoryview(buf)[0] = b'd' if env.PY2 else 0x64 + assert buf.value == 0x64 + + io.BytesIO(b'A').readinto(buf) + assert buf.value == ord(b'A') + + buf.readonly = True + with pytest.raises(TypeError): + memoryview(buf)[0] = b'\0' if env.PY2 else 0 + with pytest.raises(TypeError): + io.BytesIO(b'1').readinto(buf) diff --git a/diffvg/pybind11/tests/test_builtin_casters.cpp b/diffvg/pybind11/tests/test_builtin_casters.cpp new file mode 100644 index 0000000000000000000000000000000000000000..acc9f8fb368899cde8b25702d4410c0d591fb5ee --- /dev/null +++ b/diffvg/pybind11/tests/test_builtin_casters.cpp @@ -0,0 +1,192 @@ +/* + tests/test_builtin_casters.cpp -- Casters available without any additional headers + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant +#endif + +TEST_SUBMODULE(builtin_casters, m) { + // test_simple_string + m.def("string_roundtrip", [](const char *s) { return s; }); + + // test_unicode_conversion + // Some test characters in utf16 and utf32 encodings. The last one (the 𝐀) contains a null byte + char32_t a32 = 0x61 /*a*/, z32 = 0x7a /*z*/, ib32 = 0x203d /*β€½*/, cake32 = 0x1f382 /*πŸŽ‚*/, mathbfA32 = 0x1d400 /*𝐀*/; + char16_t b16 = 0x62 /*b*/, z16 = 0x7a, ib16 = 0x203d, cake16_1 = 0xd83c, cake16_2 = 0xdf82, mathbfA16_1 = 0xd835, mathbfA16_2 = 0xdc00; + std::wstring wstr; + wstr.push_back(0x61); // a + wstr.push_back(0x2e18); // ⸘ + if (sizeof(wchar_t) == 2) { wstr.push_back(mathbfA16_1); wstr.push_back(mathbfA16_2); } // 𝐀, utf16 + else { wstr.push_back((wchar_t) mathbfA32); } // 𝐀, utf32 + wstr.push_back(0x7a); // z + + m.def("good_utf8_string", []() { return std::string((const char*)u8"Say utf8\u203d \U0001f382 \U0001d400"); }); // Say utf8β€½ πŸŽ‚ 𝐀 + m.def("good_utf16_string", [=]() { return std::u16string({ b16, ib16, cake16_1, cake16_2, mathbfA16_1, mathbfA16_2, z16 }); }); // bβ€½πŸŽ‚π€z + m.def("good_utf32_string", [=]() { return std::u32string({ a32, mathbfA32, cake32, ib32, z32 }); }); // aπ€πŸŽ‚β€½z + m.def("good_wchar_string", [=]() { return wstr; }); // a‽𝐀z + m.def("bad_utf8_string", []() { return std::string("abc\xd0" "def"); }); + m.def("bad_utf16_string", [=]() { return std::u16string({ b16, char16_t(0xd800), z16 }); }); + // Under Python 2.7, invalid unicode UTF-32 characters don't appear to trigger UnicodeDecodeError + if (PY_MAJOR_VERSION >= 3) + m.def("bad_utf32_string", [=]() { return std::u32string({ a32, char32_t(0xd800), z32 }); }); + if (PY_MAJOR_VERSION >= 3 || sizeof(wchar_t) == 2) + m.def("bad_wchar_string", [=]() { return std::wstring({ wchar_t(0x61), wchar_t(0xd800) }); }); + m.def("u8_Z", []() -> char { return 'Z'; }); + m.def("u8_eacute", []() -> char { return '\xe9'; }); + m.def("u16_ibang", [=]() -> char16_t { return ib16; }); + m.def("u32_mathbfA", [=]() -> char32_t { return mathbfA32; }); + m.def("wchar_heart", []() -> wchar_t { return 0x2665; }); + + // test_single_char_arguments + m.attr("wchar_size") = py::cast(sizeof(wchar_t)); + m.def("ord_char", [](char c) -> int { return static_cast(c); }); + m.def("ord_char_lv", [](char &c) -> int { return static_cast(c); }); + m.def("ord_char16", [](char16_t c) -> uint16_t { return c; }); + m.def("ord_char16_lv", [](char16_t &c) -> uint16_t { return c; }); + m.def("ord_char32", [](char32_t c) -> uint32_t { return c; }); + m.def("ord_wchar", [](wchar_t c) -> int { return c; }); + + // test_bytes_to_string + m.def("strlen", [](char *s) { return strlen(s); }); + m.def("string_length", [](std::string s) { return s.length(); }); + +#ifdef PYBIND11_HAS_U8STRING + m.attr("has_u8string") = true; + m.def("good_utf8_u8string", []() { return std::u8string(u8"Say utf8\u203d \U0001f382 \U0001d400"); }); // Say utf8β€½ πŸŽ‚ 𝐀 + m.def("bad_utf8_u8string", []() { return std::u8string((const char8_t*)"abc\xd0" "def"); }); + + m.def("u8_char8_Z", []() -> char8_t { return u8'Z'; }); + + // test_single_char_arguments + m.def("ord_char8", [](char8_t c) -> int { return static_cast(c); }); + m.def("ord_char8_lv", [](char8_t &c) -> int { return static_cast(c); }); +#endif + + // test_string_view +#ifdef PYBIND11_HAS_STRING_VIEW + m.attr("has_string_view") = true; + m.def("string_view_print", [](std::string_view s) { py::print(s, s.size()); }); + m.def("string_view16_print", [](std::u16string_view s) { py::print(s, s.size()); }); + m.def("string_view32_print", [](std::u32string_view s) { py::print(s, s.size()); }); + m.def("string_view_chars", [](std::string_view s) { py::list l; for (auto c : s) l.append((std::uint8_t) c); return l; }); + m.def("string_view16_chars", [](std::u16string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; }); + m.def("string_view32_chars", [](std::u32string_view s) { py::list l; for (auto c : s) l.append((int) c); return l; }); + m.def("string_view_return", []() { return std::string_view((const char*)u8"utf8 secret \U0001f382"); }); + m.def("string_view16_return", []() { return std::u16string_view(u"utf16 secret \U0001f382"); }); + m.def("string_view32_return", []() { return std::u32string_view(U"utf32 secret \U0001f382"); }); + +# ifdef PYBIND11_HAS_U8STRING + m.def("string_view8_print", [](std::u8string_view s) { py::print(s, s.size()); }); + m.def("string_view8_chars", [](std::u8string_view s) { py::list l; for (auto c : s) l.append((std::uint8_t) c); return l; }); + m.def("string_view8_return", []() { return std::u8string_view(u8"utf8 secret \U0001f382"); }); +# endif +#endif + + // test_integer_casting + m.def("i32_str", [](std::int32_t v) { return std::to_string(v); }); + m.def("u32_str", [](std::uint32_t v) { return std::to_string(v); }); + m.def("i64_str", [](std::int64_t v) { return std::to_string(v); }); + m.def("u64_str", [](std::uint64_t v) { return std::to_string(v); }); + + // test_tuple + m.def("pair_passthrough", [](std::pair input) { + return std::make_pair(input.second, input.first); + }, "Return a pair in reversed order"); + m.def("tuple_passthrough", [](std::tuple input) { + return std::make_tuple(std::get<2>(input), std::get<1>(input), std::get<0>(input)); + }, "Return a triple in reversed order"); + m.def("empty_tuple", []() { return std::tuple<>(); }); + static std::pair lvpair; + static std::tuple lvtuple; + static std::pair>> lvnested; + m.def("rvalue_pair", []() { return std::make_pair(RValueCaster{}, RValueCaster{}); }); + m.def("lvalue_pair", []() -> const decltype(lvpair) & { return lvpair; }); + m.def("rvalue_tuple", []() { return std::make_tuple(RValueCaster{}, RValueCaster{}, RValueCaster{}); }); + m.def("lvalue_tuple", []() -> const decltype(lvtuple) & { return lvtuple; }); + m.def("rvalue_nested", []() { + return std::make_pair(RValueCaster{}, std::make_tuple(RValueCaster{}, std::make_pair(RValueCaster{}, RValueCaster{}))); }); + m.def("lvalue_nested", []() -> const decltype(lvnested) & { return lvnested; }); + + static std::pair int_string_pair{2, "items"}; + m.def("int_string_pair", []() { return &int_string_pair; }); + + // test_builtins_cast_return_none + m.def("return_none_string", []() -> std::string * { return nullptr; }); + m.def("return_none_char", []() -> const char * { return nullptr; }); + m.def("return_none_bool", []() -> bool * { return nullptr; }); + m.def("return_none_int", []() -> int * { return nullptr; }); + m.def("return_none_float", []() -> float * { return nullptr; }); + m.def("return_none_pair", []() -> std::pair * { return nullptr; }); + + // test_none_deferred + m.def("defer_none_cstring", [](char *) { return false; }); + m.def("defer_none_cstring", [](py::none) { return true; }); + m.def("defer_none_custom", [](UserType *) { return false; }); + m.def("defer_none_custom", [](py::none) { return true; }); + m.def("nodefer_none_void", [](void *) { return true; }); + m.def("nodefer_none_void", [](py::none) { return false; }); + + // test_void_caster + m.def("load_nullptr_t", [](std::nullptr_t) {}); // not useful, but it should still compile + m.def("cast_nullptr_t", []() { return std::nullptr_t{}; }); + + // test_bool_caster + m.def("bool_passthrough", [](bool arg) { return arg; }); + m.def("bool_passthrough_noconvert", [](bool arg) { return arg; }, py::arg().noconvert()); + + // test_reference_wrapper + m.def("refwrap_builtin", [](std::reference_wrapper p) { return 10 * p.get(); }); + m.def("refwrap_usertype", [](std::reference_wrapper p) { return p.get().value(); }); + // Not currently supported (std::pair caster has return-by-value cast operator); + // triggers static_assert failure. + //m.def("refwrap_pair", [](std::reference_wrapper>) { }); + + m.def("refwrap_list", [](bool copy) { + static IncType x1(1), x2(2); + py::list l; + for (auto &f : {std::ref(x1), std::ref(x2)}) { + l.append(py::cast(f, copy ? py::return_value_policy::copy + : py::return_value_policy::reference)); + } + return l; + }, "copy"_a); + + m.def("refwrap_iiw", [](const IncType &w) { return w.value(); }); + m.def("refwrap_call_iiw", [](IncType &w, py::function f) { + py::list l; + l.append(f(std::ref(w))); + l.append(f(std::cref(w))); + IncType x(w.value()); + l.append(f(std::ref(x))); + IncType y(w.value()); + auto r3 = std::ref(y); + l.append(f(r3)); + return l; + }); + + // test_complex + m.def("complex_cast", [](float x) { return "{}"_s.format(x); }); + m.def("complex_cast", [](std::complex x) { return "({}, {})"_s.format(x.real(), x.imag()); }); + + // test int vs. long (Python 2) + m.def("int_cast", []() {return (int) 42;}); + m.def("long_cast", []() {return (long) 42;}); + m.def("longlong_cast", []() {return ULLONG_MAX;}); + + /// test void* cast operator + m.def("test_void_caster", []() -> bool { + void *v = (void *) 0xabcd; + py::object o = py::cast(v); + return py::cast(o) == v; + }); +} diff --git a/diffvg/pybind11/tests/test_builtin_casters.py b/diffvg/pybind11/tests/test_builtin_casters.py new file mode 100644 index 0000000000000000000000000000000000000000..08d38bc1546f194021f6f47360e5a544a4267437 --- /dev/null +++ b/diffvg/pybind11/tests/test_builtin_casters.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import builtin_casters as m +from pybind11_tests import UserType, IncType + + +def test_simple_string(): + assert m.string_roundtrip("const char *") == "const char *" + + +def test_unicode_conversion(): + """Tests unicode conversion and error reporting.""" + assert m.good_utf8_string() == u"Say utf8β€½ πŸŽ‚ 𝐀" + assert m.good_utf16_string() == u"bβ€½πŸŽ‚π€z" + assert m.good_utf32_string() == u"aπ€πŸŽ‚β€½z" + assert m.good_wchar_string() == u"aβΈ˜π€z" + if hasattr(m, "has_u8string"): + assert m.good_utf8_u8string() == u"Say utf8β€½ πŸŽ‚ 𝐀" + + with pytest.raises(UnicodeDecodeError): + m.bad_utf8_string() + + with pytest.raises(UnicodeDecodeError): + m.bad_utf16_string() + + # These are provided only if they actually fail (they don't when 32-bit and under Python 2.7) + if hasattr(m, "bad_utf32_string"): + with pytest.raises(UnicodeDecodeError): + m.bad_utf32_string() + if hasattr(m, "bad_wchar_string"): + with pytest.raises(UnicodeDecodeError): + m.bad_wchar_string() + if hasattr(m, "has_u8string"): + with pytest.raises(UnicodeDecodeError): + m.bad_utf8_u8string() + + assert m.u8_Z() == 'Z' + assert m.u8_eacute() == u'Γ©' + assert m.u16_ibang() == u'β€½' + assert m.u32_mathbfA() == u'𝐀' + assert m.wchar_heart() == u'β™₯' + if hasattr(m, "has_u8string"): + assert m.u8_char8_Z() == 'Z' + + +def test_single_char_arguments(): + """Tests failures for passing invalid inputs to char-accepting functions""" + def toobig_message(r): + return "Character code point not in range({0:#x})".format(r) + toolong_message = "Expected a character, but multi-character string found" + + assert m.ord_char(u'a') == 0x61 # simple ASCII + assert m.ord_char_lv(u'b') == 0x62 + assert m.ord_char(u'Γ©') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char + with pytest.raises(ValueError) as excinfo: + assert m.ord_char(u'Δ€') == 0x100 # requires 2 bytes, doesn't fit in a char + assert str(excinfo.value) == toobig_message(0x100) + with pytest.raises(ValueError) as excinfo: + assert m.ord_char(u'ab') + assert str(excinfo.value) == toolong_message + + assert m.ord_char16(u'a') == 0x61 + assert m.ord_char16(u'Γ©') == 0xE9 + assert m.ord_char16_lv(u'Γͺ') == 0xEA + assert m.ord_char16(u'Δ€') == 0x100 + assert m.ord_char16(u'β€½') == 0x203d + assert m.ord_char16(u'β™₯') == 0x2665 + assert m.ord_char16_lv(u'β™‘') == 0x2661 + with pytest.raises(ValueError) as excinfo: + assert m.ord_char16(u'πŸŽ‚') == 0x1F382 # requires surrogate pair + assert str(excinfo.value) == toobig_message(0x10000) + with pytest.raises(ValueError) as excinfo: + assert m.ord_char16(u'aa') + assert str(excinfo.value) == toolong_message + + assert m.ord_char32(u'a') == 0x61 + assert m.ord_char32(u'Γ©') == 0xE9 + assert m.ord_char32(u'Δ€') == 0x100 + assert m.ord_char32(u'β€½') == 0x203d + assert m.ord_char32(u'β™₯') == 0x2665 + assert m.ord_char32(u'πŸŽ‚') == 0x1F382 + with pytest.raises(ValueError) as excinfo: + assert m.ord_char32(u'aa') + assert str(excinfo.value) == toolong_message + + assert m.ord_wchar(u'a') == 0x61 + assert m.ord_wchar(u'Γ©') == 0xE9 + assert m.ord_wchar(u'Δ€') == 0x100 + assert m.ord_wchar(u'β€½') == 0x203d + assert m.ord_wchar(u'β™₯') == 0x2665 + if m.wchar_size == 2: + with pytest.raises(ValueError) as excinfo: + assert m.ord_wchar(u'πŸŽ‚') == 0x1F382 # requires surrogate pair + assert str(excinfo.value) == toobig_message(0x10000) + else: + assert m.ord_wchar(u'πŸŽ‚') == 0x1F382 + with pytest.raises(ValueError) as excinfo: + assert m.ord_wchar(u'aa') + assert str(excinfo.value) == toolong_message + + if hasattr(m, "has_u8string"): + assert m.ord_char8(u'a') == 0x61 # simple ASCII + assert m.ord_char8_lv(u'b') == 0x62 + assert m.ord_char8(u'Γ©') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char + with pytest.raises(ValueError) as excinfo: + assert m.ord_char8(u'Δ€') == 0x100 # requires 2 bytes, doesn't fit in a char + assert str(excinfo.value) == toobig_message(0x100) + with pytest.raises(ValueError) as excinfo: + assert m.ord_char8(u'ab') + assert str(excinfo.value) == toolong_message + + +def test_bytes_to_string(): + """Tests the ability to pass bytes to C++ string-accepting functions. Note that this is + one-way: the only way to return bytes to Python is via the pybind11::bytes class.""" + # Issue #816 + + def to_bytes(s): + b = s if env.PY2 else s.encode("utf8") + assert isinstance(b, bytes) + return b + + assert m.strlen(to_bytes("hi")) == 2 + assert m.string_length(to_bytes("world")) == 5 + assert m.string_length(to_bytes("a\x00b")) == 3 + assert m.strlen(to_bytes("a\x00b")) == 1 # C-string limitation + + # passing in a utf8 encoded string should work + assert m.string_length(u'πŸ’©'.encode("utf8")) == 4 + + +@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no ") +def test_string_view(capture): + """Tests support for C++17 string_view arguments and return values""" + assert m.string_view_chars("Hi") == [72, 105] + assert m.string_view_chars("Hi πŸŽ‚") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82] + assert m.string_view16_chars(u"Hi πŸŽ‚") == [72, 105, 32, 0xd83c, 0xdf82] + assert m.string_view32_chars(u"Hi πŸŽ‚") == [72, 105, 32, 127874] + if hasattr(m, "has_u8string"): + assert m.string_view8_chars("Hi") == [72, 105] + assert m.string_view8_chars(u"Hi πŸŽ‚") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82] + + assert m.string_view_return() == u"utf8 secret πŸŽ‚" + assert m.string_view16_return() == u"utf16 secret πŸŽ‚" + assert m.string_view32_return() == u"utf32 secret πŸŽ‚" + if hasattr(m, "has_u8string"): + assert m.string_view8_return() == u"utf8 secret πŸŽ‚" + + with capture: + m.string_view_print("Hi") + m.string_view_print("utf8 πŸŽ‚") + m.string_view16_print(u"utf16 πŸŽ‚") + m.string_view32_print(u"utf32 πŸŽ‚") + assert capture == u""" + Hi 2 + utf8 πŸŽ‚ 9 + utf16 πŸŽ‚ 8 + utf32 πŸŽ‚ 7 + """ + if hasattr(m, "has_u8string"): + with capture: + m.string_view8_print("Hi") + m.string_view8_print(u"utf8 πŸŽ‚") + assert capture == u""" + Hi 2 + utf8 πŸŽ‚ 9 + """ + + with capture: + m.string_view_print("Hi, ascii") + m.string_view_print("Hi, utf8 πŸŽ‚") + m.string_view16_print(u"Hi, utf16 πŸŽ‚") + m.string_view32_print(u"Hi, utf32 πŸŽ‚") + assert capture == u""" + Hi, ascii 9 + Hi, utf8 πŸŽ‚ 13 + Hi, utf16 πŸŽ‚ 12 + Hi, utf32 πŸŽ‚ 11 + """ + if hasattr(m, "has_u8string"): + with capture: + m.string_view8_print("Hi, ascii") + m.string_view8_print(u"Hi, utf8 πŸŽ‚") + assert capture == u""" + Hi, ascii 9 + Hi, utf8 πŸŽ‚ 13 + """ + + +def test_integer_casting(): + """Issue #929 - out-of-range integer values shouldn't be accepted""" + assert m.i32_str(-1) == "-1" + assert m.i64_str(-1) == "-1" + assert m.i32_str(2000000000) == "2000000000" + assert m.u32_str(2000000000) == "2000000000" + if env.PY2: + assert m.i32_str(long(-1)) == "-1" # noqa: F821 undefined name 'long' + assert m.i64_str(long(-1)) == "-1" # noqa: F821 undefined name 'long' + assert m.i64_str(long(-999999999999)) == "-999999999999" # noqa: F821 undefined name + assert m.u64_str(long(999999999999)) == "999999999999" # noqa: F821 undefined name 'long' + else: + assert m.i64_str(-999999999999) == "-999999999999" + assert m.u64_str(999999999999) == "999999999999" + + with pytest.raises(TypeError) as excinfo: + m.u32_str(-1) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.u64_str(-1) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.i32_str(-3000000000) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.i32_str(3000000000) + assert "incompatible function arguments" in str(excinfo.value) + + if env.PY2: + with pytest.raises(TypeError) as excinfo: + m.u32_str(long(-1)) # noqa: F821 undefined name 'long' + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.u64_str(long(-1)) # noqa: F821 undefined name 'long' + assert "incompatible function arguments" in str(excinfo.value) + + +def test_tuple(doc): + """std::pair <-> tuple & std::tuple <-> tuple""" + assert m.pair_passthrough((True, "test")) == ("test", True) + assert m.tuple_passthrough((True, "test", 5)) == (5, "test", True) + # Any sequence can be cast to a std::pair or std::tuple + assert m.pair_passthrough([True, "test"]) == ("test", True) + assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True) + assert m.empty_tuple() == () + + assert doc(m.pair_passthrough) == """ + pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool] + + Return a pair in reversed order + """ + assert doc(m.tuple_passthrough) == """ + tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool] + + Return a triple in reversed order + """ + + assert m.rvalue_pair() == ("rvalue", "rvalue") + assert m.lvalue_pair() == ("lvalue", "lvalue") + assert m.rvalue_tuple() == ("rvalue", "rvalue", "rvalue") + assert m.lvalue_tuple() == ("lvalue", "lvalue", "lvalue") + assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue"))) + assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue"))) + + assert m.int_string_pair() == (2, "items") + + +def test_builtins_cast_return_none(): + """Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None""" + assert m.return_none_string() is None + assert m.return_none_char() is None + assert m.return_none_bool() is None + assert m.return_none_int() is None + assert m.return_none_float() is None + assert m.return_none_pair() is None + + +def test_none_deferred(): + """None passed as various argument types should defer to other overloads""" + assert not m.defer_none_cstring("abc") + assert m.defer_none_cstring(None) + assert not m.defer_none_custom(UserType()) + assert m.defer_none_custom(None) + assert m.nodefer_none_void(None) + + +def test_void_caster(): + assert m.load_nullptr_t(None) is None + assert m.cast_nullptr_t() is None + + +def test_reference_wrapper(): + """std::reference_wrapper for builtin and user types""" + assert m.refwrap_builtin(42) == 420 + assert m.refwrap_usertype(UserType(42)) == 42 + + with pytest.raises(TypeError) as excinfo: + m.refwrap_builtin(None) + assert "incompatible function arguments" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + m.refwrap_usertype(None) + assert "incompatible function arguments" in str(excinfo.value) + + a1 = m.refwrap_list(copy=True) + a2 = m.refwrap_list(copy=True) + assert [x.value for x in a1] == [2, 3] + assert [x.value for x in a2] == [2, 3] + assert not a1[0] is a2[0] and not a1[1] is a2[1] + + b1 = m.refwrap_list(copy=False) + b2 = m.refwrap_list(copy=False) + assert [x.value for x in b1] == [1, 2] + assert [x.value for x in b2] == [1, 2] + assert b1[0] is b2[0] and b1[1] is b2[1] + + assert m.refwrap_iiw(IncType(5)) == 5 + assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10] + + +def test_complex_cast(): + """std::complex casts""" + assert m.complex_cast(1) == "1.0" + assert m.complex_cast(2j) == "(0.0, 2.0)" + + +def test_bool_caster(): + """Test bool caster implicit conversions.""" + convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert + + def require_implicit(v): + pytest.raises(TypeError, noconvert, v) + + def cant_convert(v): + pytest.raises(TypeError, convert, v) + + # straight up bool + assert convert(True) is True + assert convert(False) is False + assert noconvert(True) is True + assert noconvert(False) is False + + # None requires implicit conversion + require_implicit(None) + assert convert(None) is False + + class A(object): + def __init__(self, x): + self.x = x + + def __nonzero__(self): + return self.x + + def __bool__(self): + return self.x + + class B(object): + pass + + # Arbitrary objects are not accepted + cant_convert(object()) + cant_convert(B()) + + # Objects with __nonzero__ / __bool__ defined can be converted + require_implicit(A(True)) + assert convert(A(True)) is True + assert convert(A(False)) is False + + +def test_numpy_bool(): + np = pytest.importorskip("numpy") + + convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert + + def cant_convert(v): + pytest.raises(TypeError, convert, v) + + # np.bool_ is not considered implicit + assert convert(np.bool_(True)) is True + assert convert(np.bool_(False)) is False + assert noconvert(np.bool_(True)) is True + assert noconvert(np.bool_(False)) is False + cant_convert(np.zeros(2, dtype='int')) + + +def test_int_long(): + """In Python 2, a C++ int should return a Python int rather than long + if possible: longs are not always accepted where ints are used (such + as the argument to sys.exit()). A C++ long long is always a Python + long.""" + + import sys + must_be_long = type(getattr(sys, 'maxint', 1) + 1) + assert isinstance(m.int_cast(), int) + assert isinstance(m.long_cast(), int) + assert isinstance(m.longlong_cast(), must_be_long) + + +def test_void_caster_2(): + assert m.test_void_caster() diff --git a/diffvg/pybind11/tests/test_call_policies.cpp b/diffvg/pybind11/tests/test_call_policies.cpp new file mode 100644 index 0000000000000000000000000000000000000000..26c83f81b0ed370365d48279a4b8f3d4d23b5487 --- /dev/null +++ b/diffvg/pybind11/tests/test_call_policies.cpp @@ -0,0 +1,101 @@ +/* + tests/test_call_policies.cpp -- keep_alive and call_guard + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +struct CustomGuard { + static bool enabled; + + CustomGuard() { enabled = true; } + ~CustomGuard() { enabled = false; } + + static const char *report_status() { return enabled ? "guarded" : "unguarded"; } +}; +bool CustomGuard::enabled = false; + +struct DependentGuard { + static bool enabled; + + DependentGuard() { enabled = CustomGuard::enabled; } + ~DependentGuard() { enabled = false; } + + static const char *report_status() { return enabled ? "guarded" : "unguarded"; } +}; +bool DependentGuard::enabled = false; + +TEST_SUBMODULE(call_policies, m) { + // Parent/Child are used in: + // test_keep_alive_argument, test_keep_alive_return_value, test_alive_gc_derived, + // test_alive_gc_multi_derived, test_return_none, test_keep_alive_constructor + class Child { + public: + Child() { py::print("Allocating child."); } + Child(const Child &) = default; + Child(Child &&) = default; + ~Child() { py::print("Releasing child."); } + }; + py::class_(m, "Child") + .def(py::init<>()); + + class Parent { + public: + Parent() { py::print("Allocating parent."); } + Parent(const Parent& parent) = default; + ~Parent() { py::print("Releasing parent."); } + void addChild(Child *) { } + Child *returnChild() { return new Child(); } + Child *returnNullChild() { return nullptr; } + }; + py::class_(m, "Parent") + .def(py::init<>()) + .def(py::init([](Child *) { return new Parent(); }), py::keep_alive<1, 2>()) + .def("addChild", &Parent::addChild) + .def("addChildKeepAlive", &Parent::addChild, py::keep_alive<1, 2>()) + .def("returnChild", &Parent::returnChild) + .def("returnChildKeepAlive", &Parent::returnChild, py::keep_alive<1, 0>()) + .def("returnNullChildKeepAliveChild", &Parent::returnNullChild, py::keep_alive<1, 0>()) + .def("returnNullChildKeepAliveParent", &Parent::returnNullChild, py::keep_alive<0, 1>()); + +#if !defined(PYPY_VERSION) + // test_alive_gc + class ParentGC : public Parent { + public: + using Parent::Parent; + }; + py::class_(m, "ParentGC", py::dynamic_attr()) + .def(py::init<>()); +#endif + + // test_call_guard + m.def("unguarded_call", &CustomGuard::report_status); + m.def("guarded_call", &CustomGuard::report_status, py::call_guard()); + + m.def("multiple_guards_correct_order", []() { + return CustomGuard::report_status() + std::string(" & ") + DependentGuard::report_status(); + }, py::call_guard()); + + m.def("multiple_guards_wrong_order", []() { + return DependentGuard::report_status() + std::string(" & ") + CustomGuard::report_status(); + }, py::call_guard()); + +#if defined(WITH_THREAD) && !defined(PYPY_VERSION) + // `py::call_guard()` should work in PyPy as well, + // but it's unclear how to test it without `PyGILState_GetThisThreadState`. + auto report_gil_status = []() { + auto is_gil_held = false; + if (auto tstate = py::detail::get_thread_state_unchecked()) + is_gil_held = (tstate == PyGILState_GetThisThreadState()); + + return is_gil_held ? "GIL held" : "GIL released"; + }; + + m.def("with_gil", report_gil_status); + m.def("without_gil", report_gil_status, py::call_guard()); +#endif +} diff --git a/diffvg/pybind11/tests/test_call_policies.py b/diffvg/pybind11/tests/test_call_policies.py new file mode 100644 index 0000000000000000000000000000000000000000..ec005c132f9c172fda1570073ada46342e38a2ea --- /dev/null +++ b/diffvg/pybind11/tests/test_call_policies.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import call_policies as m +from pybind11_tests import ConstructorStats + + +@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False) +def test_keep_alive_argument(capture): + n_inst = ConstructorStats.detail_reg_inst() + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.addChild(m.Child()) + assert ConstructorStats.detail_reg_inst() == n_inst + 1 + assert capture == """ + Allocating child. + Releasing child. + """ + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == "Releasing parent." + + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.addChildKeepAlive(m.Child()) + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + assert capture == "Allocating child." + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + """ + + +def test_keep_alive_return_value(capture): + n_inst = ConstructorStats.detail_reg_inst() + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.returnChild() + assert ConstructorStats.detail_reg_inst() == n_inst + 1 + assert capture == """ + Allocating child. + Releasing child. + """ + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == "Releasing parent." + + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.returnChildKeepAlive() + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + assert capture == "Allocating child." + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + """ + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2447 +@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented") +def test_alive_gc(capture): + n_inst = ConstructorStats.detail_reg_inst() + p = m.ParentGC() + p.addChildKeepAlive(m.Child()) + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + lst = [p] + lst.append(lst) # creates a circular reference + with capture: + del p, lst + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + """ + + +def test_alive_gc_derived(capture): + class Derived(m.Parent): + pass + + n_inst = ConstructorStats.detail_reg_inst() + p = Derived() + p.addChildKeepAlive(m.Child()) + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + lst = [p] + lst.append(lst) # creates a circular reference + with capture: + del p, lst + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + """ + + +def test_alive_gc_multi_derived(capture): + class Derived(m.Parent, m.Child): + def __init__(self): + m.Parent.__init__(self) + m.Child.__init__(self) + + n_inst = ConstructorStats.detail_reg_inst() + p = Derived() + p.addChildKeepAlive(m.Child()) + # +3 rather than +2 because Derived corresponds to two registered instances + assert ConstructorStats.detail_reg_inst() == n_inst + 3 + lst = [p] + lst.append(lst) # creates a circular reference + with capture: + del p, lst + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + Releasing child. + """ + + +def test_return_none(capture): + n_inst = ConstructorStats.detail_reg_inst() + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.returnNullChildKeepAliveChild() + assert ConstructorStats.detail_reg_inst() == n_inst + 1 + assert capture == "" + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == "Releasing parent." + + with capture: + p = m.Parent() + assert capture == "Allocating parent." + with capture: + p.returnNullChildKeepAliveParent() + assert ConstructorStats.detail_reg_inst() == n_inst + 1 + assert capture == "" + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == "Releasing parent." + + +def test_keep_alive_constructor(capture): + n_inst = ConstructorStats.detail_reg_inst() + + with capture: + p = m.Parent(m.Child()) + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + assert capture == """ + Allocating child. + Allocating parent. + """ + with capture: + del p + assert ConstructorStats.detail_reg_inst() == n_inst + assert capture == """ + Releasing parent. + Releasing child. + """ + + +def test_call_guard(): + assert m.unguarded_call() == "unguarded" + assert m.guarded_call() == "guarded" + + assert m.multiple_guards_correct_order() == "guarded & guarded" + assert m.multiple_guards_wrong_order() == "unguarded & guarded" + + if hasattr(m, "with_gil"): + assert m.with_gil() == "GIL held" + assert m.without_gil() == "GIL released" diff --git a/diffvg/pybind11/tests/test_callbacks.cpp b/diffvg/pybind11/tests/test_callbacks.cpp new file mode 100644 index 0000000000000000000000000000000000000000..71b88c44c7650a7e7b3f37cee19359e15bbb0270 --- /dev/null +++ b/diffvg/pybind11/tests/test_callbacks.cpp @@ -0,0 +1,168 @@ +/* + tests/test_callbacks.cpp -- callbacks + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include +#include + + +int dummy_function(int i) { return i + 1; } + +TEST_SUBMODULE(callbacks, m) { + // test_callbacks, test_function_signatures + m.def("test_callback1", [](py::object func) { return func(); }); + m.def("test_callback2", [](py::object func) { return func("Hello", 'x', true, 5); }); + m.def("test_callback3", [](const std::function &func) { + return "func(43) = " + std::to_string(func(43)); }); + m.def("test_callback4", []() -> std::function { return [](int i) { return i+1; }; }); + m.def("test_callback5", []() { + return py::cpp_function([](int i) { return i+1; }, py::arg("number")); + }); + + // test_keyword_args_and_generalized_unpacking + m.def("test_tuple_unpacking", [](py::function f) { + auto t1 = py::make_tuple(2, 3); + auto t2 = py::make_tuple(5, 6); + return f("positional", 1, *t1, 4, *t2); + }); + + m.def("test_dict_unpacking", [](py::function f) { + auto d1 = py::dict("key"_a="value", "a"_a=1); + auto d2 = py::dict(); + auto d3 = py::dict("b"_a=2); + return f("positional", 1, **d1, **d2, **d3); + }); + + m.def("test_keyword_args", [](py::function f) { + return f("x"_a=10, "y"_a=20); + }); + + m.def("test_unpacking_and_keywords1", [](py::function f) { + auto args = py::make_tuple(2); + auto kwargs = py::dict("d"_a=4); + return f(1, *args, "c"_a=3, **kwargs); + }); + + m.def("test_unpacking_and_keywords2", [](py::function f) { + auto kwargs1 = py::dict("a"_a=1); + auto kwargs2 = py::dict("c"_a=3, "d"_a=4); + return f("positional", *py::make_tuple(1), 2, *py::make_tuple(3, 4), 5, + "key"_a="value", **kwargs1, "b"_a=2, **kwargs2, "e"_a=5); + }); + + m.def("test_unpacking_error1", [](py::function f) { + auto kwargs = py::dict("x"_a=3); + return f("x"_a=1, "y"_a=2, **kwargs); // duplicate ** after keyword + }); + + m.def("test_unpacking_error2", [](py::function f) { + auto kwargs = py::dict("x"_a=3); + return f(**kwargs, "x"_a=1); // duplicate keyword after ** + }); + + m.def("test_arg_conversion_error1", [](py::function f) { + f(234, UnregisteredType(), "kw"_a=567); + }); + + m.def("test_arg_conversion_error2", [](py::function f) { + f(234, "expected_name"_a=UnregisteredType(), "kw"_a=567); + }); + + // test_lambda_closure_cleanup + struct Payload { + Payload() { print_default_created(this); } + ~Payload() { print_destroyed(this); } + Payload(const Payload &) { print_copy_created(this); } + Payload(Payload &&) { print_move_created(this); } + }; + // Export the payload constructor statistics for testing purposes: + m.def("payload_cstats", &ConstructorStats::get); + /* Test cleanup of lambda closure */ + m.def("test_cleanup", []() -> std::function { + Payload p; + + return [p]() { + /* p should be cleaned up when the returned function is garbage collected */ + (void) p; + }; + }); + + // test_cpp_function_roundtrip + /* Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer */ + m.def("dummy_function", &dummy_function); + m.def("dummy_function2", [](int i, int j) { return i + j; }); + m.def("roundtrip", [](std::function f, bool expect_none = false) { + if (expect_none && f) + throw std::runtime_error("Expected None to be converted to empty std::function"); + return f; + }, py::arg("f"), py::arg("expect_none")=false); + m.def("test_dummy_function", [](const std::function &f) -> std::string { + using fn_type = int (*)(int); + auto result = f.target(); + if (!result) { + auto r = f(1); + return "can't convert to function pointer: eval(1) = " + std::to_string(r); + } else if (*result == dummy_function) { + auto r = (*result)(1); + return "matches dummy_function: eval(1) = " + std::to_string(r); + } else { + return "argument does NOT match dummy_function. This should never happen!"; + } + }); + + class AbstractBase { public: virtual unsigned int func() = 0; }; + m.def("func_accepting_func_accepting_base", [](std::function) { }); + + struct MovableObject { + bool valid = true; + + MovableObject() = default; + MovableObject(const MovableObject &) = default; + MovableObject &operator=(const MovableObject &) = default; + MovableObject(MovableObject &&o) : valid(o.valid) { o.valid = false; } + MovableObject &operator=(MovableObject &&o) { + valid = o.valid; + o.valid = false; + return *this; + } + }; + py::class_(m, "MovableObject"); + + // test_movable_object + m.def("callback_with_movable", [](std::function f) { + auto x = MovableObject(); + f(x); // lvalue reference shouldn't move out object + return x.valid; // must still return `true` + }); + + // test_bound_method_callback + struct CppBoundMethodTest {}; + py::class_(m, "CppBoundMethodTest") + .def(py::init<>()) + .def("triple", [](CppBoundMethodTest &, int val) { return 3 * val; }); + + // test async Python callbacks + using callback_f = std::function; + m.def("test_async_callback", [](callback_f f, py::list work) { + // make detached thread that calls `f` with piece of work after a little delay + auto start_f = [f](int j) { + auto invoke_f = [f, j] { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + f(j); + }; + auto t = std::thread(std::move(invoke_f)); + t.detach(); + }; + + // spawn worker threads + for (auto i : work) + start_f(py::cast(i)); + }); +} diff --git a/diffvg/pybind11/tests/test_callbacks.py b/diffvg/pybind11/tests/test_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d0e045d224aab7381549bdcfb1d2102cdd0eb7 --- /dev/null +++ b/diffvg/pybind11/tests/test_callbacks.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import callbacks as m +from threading import Thread + + +def test_callbacks(): + from functools import partial + + def func1(): + return "func1" + + def func2(a, b, c, d): + return "func2", a, b, c, d + + def func3(a): + return "func3({})".format(a) + + assert m.test_callback1(func1) == "func1" + assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5) + assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4) + assert m.test_callback1(partial(func3, "partial")) == "func3(partial)" + assert m.test_callback3(lambda i: i + 1) == "func(43) = 44" + + f = m.test_callback4() + assert f(43) == 44 + f = m.test_callback5() + assert f(number=43) == 44 + + +def test_bound_method_callback(): + # Bound Python method: + class MyClass: + def double(self, val): + return 2 * val + + z = MyClass() + assert m.test_callback3(z.double) == "func(43) = 86" + + z = m.CppBoundMethodTest() + assert m.test_callback3(z.triple) == "func(43) = 129" + + +def test_keyword_args_and_generalized_unpacking(): + + def f(*args, **kwargs): + return args, kwargs + + assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {}) + assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2}) + assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20}) + assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4}) + assert m.test_unpacking_and_keywords2(f) == ( + ("positional", 1, 2, 3, 4, 5), + {"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5} + ) + + with pytest.raises(TypeError) as excinfo: + m.test_unpacking_error1(f) + assert "Got multiple values for keyword argument" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + m.test_unpacking_error2(f) + assert "Got multiple values for keyword argument" in str(excinfo.value) + + with pytest.raises(RuntimeError) as excinfo: + m.test_arg_conversion_error1(f) + assert "Unable to convert call argument" in str(excinfo.value) + + with pytest.raises(RuntimeError) as excinfo: + m.test_arg_conversion_error2(f) + assert "Unable to convert call argument" in str(excinfo.value) + + +def test_lambda_closure_cleanup(): + m.test_cleanup() + cstats = m.payload_cstats() + assert cstats.alive() == 0 + assert cstats.copy_constructions == 1 + assert cstats.move_constructions >= 1 + + +def test_cpp_function_roundtrip(): + """Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer""" + + assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2" + assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) == + "matches dummy_function: eval(1) = 2") + assert m.roundtrip(None, expect_none=True) is None + assert (m.test_dummy_function(lambda x: x + 2) == + "can't convert to function pointer: eval(1) = 3") + + with pytest.raises(TypeError) as excinfo: + m.test_dummy_function(m.dummy_function2) + assert "incompatible function arguments" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + m.test_dummy_function(lambda x, y: x + y) + assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument", + "takes exactly 2 arguments")) + + +def test_function_signatures(doc): + assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str" + assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]" + + +def test_movable_object(): + assert m.callback_with_movable(lambda _: None) is True + + +def test_async_callbacks(): + # serves as state for async callback + class Item: + def __init__(self, value): + self.value = value + + res = [] + + # generate stateful lambda that will store result in `res` + def gen_f(): + s = Item(3) + return lambda j: res.append(s.value + j) + + # do some work async + work = [1, 2, 3, 4] + m.test_async_callback(gen_f(), work) + # wait until work is done + from time import sleep + sleep(0.5) + assert sum(res) == sum([x + 3 for x in work]) + + +def test_async_async_callbacks(): + t = Thread(target=test_async_callbacks) + t.start() + t.join() diff --git a/diffvg/pybind11/tests/test_chrono.cpp b/diffvg/pybind11/tests/test_chrono.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d79d4b6ca96ecf1c2faa8bd8002a2eb38f39124 --- /dev/null +++ b/diffvg/pybind11/tests/test_chrono.cpp @@ -0,0 +1,56 @@ +/* + tests/test_chrono.cpp -- test conversions to/from std::chrono types + + Copyright (c) 2016 Trent Houliston and + Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include +#include + +TEST_SUBMODULE(chrono, m) { + using system_time = std::chrono::system_clock::time_point; + using steady_time = std::chrono::steady_clock::time_point; + + using timespan = std::chrono::duration; + using timestamp = std::chrono::time_point; + + // test_chrono_system_clock + // Return the current time off the wall clock + m.def("test_chrono1", []() { return std::chrono::system_clock::now(); }); + + // test_chrono_system_clock_roundtrip + // Round trip the passed in system clock time + m.def("test_chrono2", [](system_time t) { return t; }); + + // test_chrono_duration_roundtrip + // Round trip the passed in duration + m.def("test_chrono3", [](std::chrono::system_clock::duration d) { return d; }); + + // test_chrono_duration_subtraction_equivalence + // Difference between two passed in time_points + m.def("test_chrono4", [](system_time a, system_time b) { return a - b; }); + + // test_chrono_steady_clock + // Return the current time off the steady_clock + m.def("test_chrono5", []() { return std::chrono::steady_clock::now(); }); + + // test_chrono_steady_clock_roundtrip + // Round trip a steady clock timepoint + m.def("test_chrono6", [](steady_time t) { return t; }); + + // test_floating_point_duration + // Roundtrip a duration in microseconds from a float argument + m.def("test_chrono7", [](std::chrono::microseconds t) { return t; }); + // Float durations (issue #719) + m.def("test_chrono_float_diff", [](std::chrono::duration a, std::chrono::duration b) { + return a - b; }); + + m.def("test_nano_timepoint", [](timestamp start, timespan delta) -> timestamp { + return start + delta; + }); +} diff --git a/diffvg/pybind11/tests/test_chrono.py b/diffvg/pybind11/tests/test_chrono.py new file mode 100644 index 0000000000000000000000000000000000000000..76783905a3bc9b60e5b58afdbdf592e88afb4f74 --- /dev/null +++ b/diffvg/pybind11/tests/test_chrono.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +from pybind11_tests import chrono as m +import datetime +import pytest + +import env # noqa: F401 + + +def test_chrono_system_clock(): + + # Get the time from both c++ and datetime + date0 = datetime.datetime.today() + date1 = m.test_chrono1() + date2 = datetime.datetime.today() + + # The returned value should be a datetime + assert isinstance(date1, datetime.datetime) + + # The numbers should vary by a very small amount (time it took to execute) + diff_python = abs(date2 - date0) + diff = abs(date1 - date2) + + # There should never be a days difference + assert diff.days == 0 + + # Since datetime.datetime.today() calls time.time(), and on some platforms + # that has 1 second accuracy, we compare this way + assert diff.seconds <= diff_python.seconds + + +def test_chrono_system_clock_roundtrip(): + date1 = datetime.datetime.today() + + # Roundtrip the time + date2 = m.test_chrono2(date1) + + # The returned value should be a datetime + assert isinstance(date2, datetime.datetime) + + # They should be identical (no information lost on roundtrip) + diff = abs(date1 - date2) + assert diff.days == 0 + assert diff.seconds == 0 + assert diff.microseconds == 0 + + +def test_chrono_system_clock_roundtrip_date(): + date1 = datetime.date.today() + + # Roundtrip the time + datetime2 = m.test_chrono2(date1) + date2 = datetime2.date() + time2 = datetime2.time() + + # The returned value should be a datetime + assert isinstance(datetime2, datetime.datetime) + assert isinstance(date2, datetime.date) + assert isinstance(time2, datetime.time) + + # They should be identical (no information lost on roundtrip) + diff = abs(date1 - date2) + assert diff.days == 0 + assert diff.seconds == 0 + assert diff.microseconds == 0 + + # Year, Month & Day should be the same after the round trip + assert date1.year == date2.year + assert date1.month == date2.month + assert date1.day == date2.day + + # There should be no time information + assert time2.hour == 0 + assert time2.minute == 0 + assert time2.second == 0 + assert time2.microsecond == 0 + + +SKIP_TZ_ENV_ON_WIN = pytest.mark.skipif( + "env.WIN", reason="TZ environment variable only supported on POSIX" +) + + +@pytest.mark.parametrize("time1", [ + datetime.datetime.today().time(), + datetime.time(0, 0, 0), + datetime.time(0, 0, 0, 1), + datetime.time(0, 28, 45, 109827), + datetime.time(0, 59, 59, 999999), + datetime.time(1, 0, 0), + datetime.time(5, 59, 59, 0), + datetime.time(5, 59, 59, 1), +]) +@pytest.mark.parametrize("tz", [ + None, + pytest.param("Europe/Brussels", marks=SKIP_TZ_ENV_ON_WIN), + pytest.param("Asia/Pyongyang", marks=SKIP_TZ_ENV_ON_WIN), + pytest.param("America/New_York", marks=SKIP_TZ_ENV_ON_WIN), +]) +def test_chrono_system_clock_roundtrip_time(time1, tz, monkeypatch): + if tz is not None: + monkeypatch.setenv("TZ", "/usr/share/zoneinfo/{}".format(tz)) + + # Roundtrip the time + datetime2 = m.test_chrono2(time1) + date2 = datetime2.date() + time2 = datetime2.time() + + # The returned value should be a datetime + assert isinstance(datetime2, datetime.datetime) + assert isinstance(date2, datetime.date) + assert isinstance(time2, datetime.time) + + # Hour, Minute, Second & Microsecond should be the same after the round trip + assert time1.hour == time2.hour + assert time1.minute == time2.minute + assert time1.second == time2.second + assert time1.microsecond == time2.microsecond + + # There should be no date information (i.e. date = python base date) + assert date2.year == 1970 + assert date2.month == 1 + assert date2.day == 1 + + +def test_chrono_duration_roundtrip(): + + # Get the difference between two times (a timedelta) + date1 = datetime.datetime.today() + date2 = datetime.datetime.today() + diff = date2 - date1 + + # Make sure this is a timedelta + assert isinstance(diff, datetime.timedelta) + + cpp_diff = m.test_chrono3(diff) + + assert cpp_diff.days == diff.days + assert cpp_diff.seconds == diff.seconds + assert cpp_diff.microseconds == diff.microseconds + + +def test_chrono_duration_subtraction_equivalence(): + + date1 = datetime.datetime.today() + date2 = datetime.datetime.today() + + diff = date2 - date1 + cpp_diff = m.test_chrono4(date2, date1) + + assert cpp_diff.days == diff.days + assert cpp_diff.seconds == diff.seconds + assert cpp_diff.microseconds == diff.microseconds + + +def test_chrono_duration_subtraction_equivalence_date(): + + date1 = datetime.date.today() + date2 = datetime.date.today() + + diff = date2 - date1 + cpp_diff = m.test_chrono4(date2, date1) + + assert cpp_diff.days == diff.days + assert cpp_diff.seconds == diff.seconds + assert cpp_diff.microseconds == diff.microseconds + + +def test_chrono_steady_clock(): + time1 = m.test_chrono5() + assert isinstance(time1, datetime.timedelta) + + +def test_chrono_steady_clock_roundtrip(): + time1 = datetime.timedelta(days=10, seconds=10, microseconds=100) + time2 = m.test_chrono6(time1) + + assert isinstance(time2, datetime.timedelta) + + # They should be identical (no information lost on roundtrip) + assert time1.days == time2.days + assert time1.seconds == time2.seconds + assert time1.microseconds == time2.microseconds + + +def test_floating_point_duration(): + # Test using a floating point number in seconds + time = m.test_chrono7(35.525123) + + assert isinstance(time, datetime.timedelta) + + assert time.seconds == 35 + assert 525122 <= time.microseconds <= 525123 + + diff = m.test_chrono_float_diff(43.789012, 1.123456) + assert diff.seconds == 42 + assert 665556 <= diff.microseconds <= 665557 + + +def test_nano_timepoint(): + time = datetime.datetime.now() + time1 = m.test_nano_timepoint(time, datetime.timedelta(seconds=60)) + assert(time1 == time + datetime.timedelta(seconds=60)) diff --git a/diffvg/pybind11/tests/test_class.cpp b/diffvg/pybind11/tests/test_class.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5369cb064cc9fee76546529398787980f9c4c76e --- /dev/null +++ b/diffvg/pybind11/tests/test_class.cpp @@ -0,0 +1,449 @@ +/* + tests/test_class.cpp -- test py::class_ definitions and basic functionality + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include "local_bindings.h" +#include + +#if defined(_MSC_VER) +# pragma warning(disable: 4324) // warning C4324: structure was padded due to alignment specifier +#endif + +// test_brace_initialization +struct NoBraceInitialization { + NoBraceInitialization(std::vector v) : vec{std::move(v)} {} + template + NoBraceInitialization(std::initializer_list l) : vec(l) {} + + std::vector vec; +}; + +TEST_SUBMODULE(class_, m) { + // test_instance + struct NoConstructor { + NoConstructor() = default; + NoConstructor(const NoConstructor &) = default; + NoConstructor(NoConstructor &&) = default; + static NoConstructor *new_instance() { + auto *ptr = new NoConstructor(); + print_created(ptr, "via new_instance"); + return ptr; + } + ~NoConstructor() { print_destroyed(this); } + }; + + py::class_(m, "NoConstructor") + .def_static("new_instance", &NoConstructor::new_instance, "Return an instance"); + + // test_inheritance + class Pet { + public: + Pet(const std::string &name, const std::string &species) + : m_name(name), m_species(species) {} + std::string name() const { return m_name; } + std::string species() const { return m_species; } + private: + std::string m_name; + std::string m_species; + }; + + class Dog : public Pet { + public: + Dog(const std::string &name) : Pet(name, "dog") {} + std::string bark() const { return "Woof!"; } + }; + + class Rabbit : public Pet { + public: + Rabbit(const std::string &name) : Pet(name, "parrot") {} + }; + + class Hamster : public Pet { + public: + Hamster(const std::string &name) : Pet(name, "rodent") {} + }; + + class Chimera : public Pet { + Chimera() : Pet("Kimmy", "chimera") {} + }; + + py::class_ pet_class(m, "Pet"); + pet_class + .def(py::init()) + .def("name", &Pet::name) + .def("species", &Pet::species); + + /* One way of declaring a subclass relationship: reference parent's class_ object */ + py::class_(m, "Dog", pet_class) + .def(py::init()); + + /* Another way of declaring a subclass relationship: reference parent's C++ type */ + py::class_(m, "Rabbit") + .def(py::init()); + + /* And another: list parent in class template arguments */ + py::class_(m, "Hamster") + .def(py::init()); + + /* Constructors are not inherited by default */ + py::class_(m, "Chimera"); + + m.def("pet_name_species", [](const Pet &pet) { return pet.name() + " is a " + pet.species(); }); + m.def("dog_bark", [](const Dog &dog) { return dog.bark(); }); + + // test_automatic_upcasting + struct BaseClass { + BaseClass() = default; + BaseClass(const BaseClass &) = default; + BaseClass(BaseClass &&) = default; + virtual ~BaseClass() {} + }; + struct DerivedClass1 : BaseClass { }; + struct DerivedClass2 : BaseClass { }; + + py::class_(m, "BaseClass").def(py::init<>()); + py::class_(m, "DerivedClass1").def(py::init<>()); + py::class_(m, "DerivedClass2").def(py::init<>()); + + m.def("return_class_1", []() -> BaseClass* { return new DerivedClass1(); }); + m.def("return_class_2", []() -> BaseClass* { return new DerivedClass2(); }); + m.def("return_class_n", [](int n) -> BaseClass* { + if (n == 1) return new DerivedClass1(); + if (n == 2) return new DerivedClass2(); + return new BaseClass(); + }); + m.def("return_none", []() -> BaseClass* { return nullptr; }); + + // test_isinstance + m.def("check_instances", [](py::list l) { + return py::make_tuple( + py::isinstance(l[0]), + py::isinstance(l[1]), + py::isinstance(l[2]), + py::isinstance(l[3]), + py::isinstance(l[4]), + py::isinstance(l[5]), + py::isinstance(l[6]) + ); + }); + + // test_mismatched_holder + struct MismatchBase1 { }; + struct MismatchDerived1 : MismatchBase1 { }; + + struct MismatchBase2 { }; + struct MismatchDerived2 : MismatchBase2 { }; + + m.def("mismatched_holder_1", []() { + auto mod = py::module::import("__main__"); + py::class_>(mod, "MismatchBase1"); + py::class_(mod, "MismatchDerived1"); + }); + m.def("mismatched_holder_2", []() { + auto mod = py::module::import("__main__"); + py::class_(mod, "MismatchBase2"); + py::class_, + MismatchBase2>(mod, "MismatchDerived2"); + }); + + // test_override_static + // #511: problem with inheritance + overwritten def_static + struct MyBase { + static std::unique_ptr make() { + return std::unique_ptr(new MyBase()); + } + }; + + struct MyDerived : MyBase { + static std::unique_ptr make() { + return std::unique_ptr(new MyDerived()); + } + }; + + py::class_(m, "MyBase") + .def_static("make", &MyBase::make); + + py::class_(m, "MyDerived") + .def_static("make", &MyDerived::make) + .def_static("make2", &MyDerived::make); + + // test_implicit_conversion_life_support + struct ConvertibleFromUserType { + int i; + + ConvertibleFromUserType(UserType u) : i(u.value()) { } + }; + + py::class_(m, "AcceptsUserType") + .def(py::init()); + py::implicitly_convertible(); + + m.def("implicitly_convert_argument", [](const ConvertibleFromUserType &r) { return r.i; }); + m.def("implicitly_convert_variable", [](py::object o) { + // `o` is `UserType` and `r` is a reference to a temporary created by implicit + // conversion. This is valid when called inside a bound function because the temp + // object is attached to the same life support system as the arguments. + const auto &r = o.cast(); + return r.i; + }); + m.add_object("implicitly_convert_variable_fail", [&] { + auto f = [](PyObject *, PyObject *args) -> PyObject * { + auto o = py::reinterpret_borrow(args)[0]; + try { // It should fail here because there is no life support. + o.cast(); + } catch (const py::cast_error &e) { + return py::str(e.what()).release().ptr(); + } + return py::str().release().ptr(); + }; + + auto def = new PyMethodDef{"f", f, METH_VARARGS, nullptr}; + return py::reinterpret_steal(PyCFunction_NewEx(def, nullptr, m.ptr())); + }()); + + // test_operator_new_delete + struct HasOpNewDel { + std::uint64_t i; + static void *operator new(size_t s) { py::print("A new", s); return ::operator new(s); } + static void *operator new(size_t s, void *ptr) { py::print("A placement-new", s); return ptr; } + static void operator delete(void *p) { py::print("A delete"); return ::operator delete(p); } + }; + struct HasOpNewDelSize { + std::uint32_t i; + static void *operator new(size_t s) { py::print("B new", s); return ::operator new(s); } + static void *operator new(size_t s, void *ptr) { py::print("B placement-new", s); return ptr; } + static void operator delete(void *p, size_t s) { py::print("B delete", s); return ::operator delete(p); } + }; + struct AliasedHasOpNewDelSize { + std::uint64_t i; + static void *operator new(size_t s) { py::print("C new", s); return ::operator new(s); } + static void *operator new(size_t s, void *ptr) { py::print("C placement-new", s); return ptr; } + static void operator delete(void *p, size_t s) { py::print("C delete", s); return ::operator delete(p); } + virtual ~AliasedHasOpNewDelSize() = default; + AliasedHasOpNewDelSize() = default; + AliasedHasOpNewDelSize(const AliasedHasOpNewDelSize&) = delete; + }; + struct PyAliasedHasOpNewDelSize : AliasedHasOpNewDelSize { + PyAliasedHasOpNewDelSize() = default; + PyAliasedHasOpNewDelSize(int) { } + std::uint64_t j; + }; + struct HasOpNewDelBoth { + std::uint32_t i[8]; + static void *operator new(size_t s) { py::print("D new", s); return ::operator new(s); } + static void *operator new(size_t s, void *ptr) { py::print("D placement-new", s); return ptr; } + static void operator delete(void *p) { py::print("D delete"); return ::operator delete(p); } + static void operator delete(void *p, size_t s) { py::print("D wrong delete", s); return ::operator delete(p); } + }; + py::class_(m, "HasOpNewDel").def(py::init<>()); + py::class_(m, "HasOpNewDelSize").def(py::init<>()); + py::class_(m, "HasOpNewDelBoth").def(py::init<>()); + py::class_ aliased(m, "AliasedHasOpNewDelSize"); + aliased.def(py::init<>()); + aliased.attr("size_noalias") = py::int_(sizeof(AliasedHasOpNewDelSize)); + aliased.attr("size_alias") = py::int_(sizeof(PyAliasedHasOpNewDelSize)); + + // This test is actually part of test_local_bindings (test_duplicate_local), but we need a + // definition in a different compilation unit within the same module: + bind_local(m, "LocalExternal", py::module_local()); + + // test_bind_protected_functions + class ProtectedA { + protected: + int foo() const { return value; } + + private: + int value = 42; + }; + + class PublicistA : public ProtectedA { + public: + using ProtectedA::foo; + }; + + py::class_(m, "ProtectedA") + .def(py::init<>()) +#if !defined(_MSC_VER) || _MSC_VER >= 1910 + .def("foo", &PublicistA::foo); +#else + .def("foo", static_cast(&PublicistA::foo)); +#endif + + class ProtectedB { + public: + virtual ~ProtectedB() = default; + ProtectedB() = default; + ProtectedB(const ProtectedB &) = delete; + + protected: + virtual int foo() const { return value; } + + private: + int value = 42; + }; + + class TrampolineB : public ProtectedB { + public: + int foo() const override { PYBIND11_OVERLOAD(int, ProtectedB, foo, ); } + }; + + class PublicistB : public ProtectedB { + public: + using ProtectedB::foo; + }; + + py::class_(m, "ProtectedB") + .def(py::init<>()) +#if !defined(_MSC_VER) || _MSC_VER >= 1910 + .def("foo", &PublicistB::foo); +#else + .def("foo", static_cast(&PublicistB::foo)); +#endif + + // test_brace_initialization + struct BraceInitialization { + int field1; + std::string field2; + }; + + py::class_(m, "BraceInitialization") + .def(py::init()) + .def_readwrite("field1", &BraceInitialization::field1) + .def_readwrite("field2", &BraceInitialization::field2); + // We *don't* want to construct using braces when the given constructor argument maps to a + // constructor, because brace initialization could go to the wrong place (in particular when + // there is also an `initializer_list`-accept constructor): + py::class_(m, "NoBraceInitialization") + .def(py::init>()) + .def_readonly("vec", &NoBraceInitialization::vec); + + // test_reentrant_implicit_conversion_failure + // #1035: issue with runaway reentrant implicit conversion + struct BogusImplicitConversion { + BogusImplicitConversion(const BogusImplicitConversion &) { } + }; + + py::class_(m, "BogusImplicitConversion") + .def(py::init()); + + py::implicitly_convertible(); + + // test_qualname + // #1166: nested class docstring doesn't show nested name + // Also related: tests that __qualname__ is set properly + struct NestBase {}; + struct Nested {}; + py::class_ base(m, "NestBase"); + base.def(py::init<>()); + py::class_(base, "Nested") + .def(py::init<>()) + .def("fn", [](Nested &, int, NestBase &, Nested &) {}) + .def("fa", [](Nested &, int, NestBase &, Nested &) {}, + "a"_a, "b"_a, "c"_a); + base.def("g", [](NestBase &, Nested &) {}); + base.def("h", []() { return NestBase(); }); + + // test_error_after_conversion + // The second-pass path through dispatcher() previously didn't + // remember which overload was used, and would crash trying to + // generate a useful error message + + struct NotRegistered {}; + struct StringWrapper { std::string str; }; + m.def("test_error_after_conversions", [](int) {}); + m.def("test_error_after_conversions", + [](StringWrapper) -> NotRegistered { return {}; }); + py::class_(m, "StringWrapper").def(py::init()); + py::implicitly_convertible(); + + #if defined(PYBIND11_CPP17) + struct alignas(1024) Aligned { + std::uintptr_t ptr() const { return (uintptr_t) this; } + }; + py::class_(m, "Aligned") + .def(py::init<>()) + .def("ptr", &Aligned::ptr); + #endif + + // test_final + struct IsFinal final {}; + py::class_(m, "IsFinal", py::is_final()); + + // test_non_final_final + struct IsNonFinalFinal {}; + py::class_(m, "IsNonFinalFinal", py::is_final()); + + struct PyPrintDestructor { + PyPrintDestructor() {} + ~PyPrintDestructor() { + py::print("Print from destructor"); + } + void throw_something() { throw std::runtime_error("error"); } + }; + py::class_(m, "PyPrintDestructor") + .def(py::init<>()) + .def("throw_something", &PyPrintDestructor::throw_something); +} + +template class BreaksBase { public: + virtual ~BreaksBase() = default; + BreaksBase() = default; + BreaksBase(const BreaksBase&) = delete; +}; +template class BreaksTramp : public BreaksBase {}; +// These should all compile just fine: +typedef py::class_, std::unique_ptr>, BreaksTramp<1>> DoesntBreak1; +typedef py::class_, BreaksTramp<2>, std::unique_ptr>> DoesntBreak2; +typedef py::class_, std::unique_ptr>> DoesntBreak3; +typedef py::class_, BreaksTramp<4>> DoesntBreak4; +typedef py::class_> DoesntBreak5; +typedef py::class_, std::shared_ptr>, BreaksTramp<6>> DoesntBreak6; +typedef py::class_, BreaksTramp<7>, std::shared_ptr>> DoesntBreak7; +typedef py::class_, std::shared_ptr>> DoesntBreak8; +#define CHECK_BASE(N) static_assert(std::is_same>::value, \ + "DoesntBreak" #N " has wrong type!") +CHECK_BASE(1); CHECK_BASE(2); CHECK_BASE(3); CHECK_BASE(4); CHECK_BASE(5); CHECK_BASE(6); CHECK_BASE(7); CHECK_BASE(8); +#define CHECK_ALIAS(N) static_assert(DoesntBreak##N::has_alias && std::is_same>::value, \ + "DoesntBreak" #N " has wrong type_alias!") +#define CHECK_NOALIAS(N) static_assert(!DoesntBreak##N::has_alias && std::is_void::value, \ + "DoesntBreak" #N " has type alias, but shouldn't!") +CHECK_ALIAS(1); CHECK_ALIAS(2); CHECK_NOALIAS(3); CHECK_ALIAS(4); CHECK_NOALIAS(5); CHECK_ALIAS(6); CHECK_ALIAS(7); CHECK_NOALIAS(8); +#define CHECK_HOLDER(N, TYPE) static_assert(std::is_same>>::value, \ + "DoesntBreak" #N " has wrong holder_type!") +CHECK_HOLDER(1, unique); CHECK_HOLDER(2, unique); CHECK_HOLDER(3, unique); CHECK_HOLDER(4, unique); CHECK_HOLDER(5, unique); +CHECK_HOLDER(6, shared); CHECK_HOLDER(7, shared); CHECK_HOLDER(8, shared); + +// There's no nice way to test that these fail because they fail to compile; leave them here, +// though, so that they can be manually tested by uncommenting them (and seeing that compilation +// failures occurs). + +// We have to actually look into the type: the typedef alone isn't enough to instantiate the type: +#define CHECK_BROKEN(N) static_assert(std::is_same>::value, \ + "Breaks1 has wrong type!"); + +//// Two holder classes: +//typedef py::class_, std::unique_ptr>, std::unique_ptr>> Breaks1; +//CHECK_BROKEN(1); +//// Two aliases: +//typedef py::class_, BreaksTramp<-2>, BreaksTramp<-2>> Breaks2; +//CHECK_BROKEN(2); +//// Holder + 2 aliases +//typedef py::class_, std::unique_ptr>, BreaksTramp<-3>, BreaksTramp<-3>> Breaks3; +//CHECK_BROKEN(3); +//// Alias + 2 holders +//typedef py::class_, std::unique_ptr>, BreaksTramp<-4>, std::shared_ptr>> Breaks4; +//CHECK_BROKEN(4); +//// Invalid option (not a subclass or holder) +//typedef py::class_, BreaksTramp<-4>> Breaks5; +//CHECK_BROKEN(5); +//// Invalid option: multiple inheritance not supported: +//template <> struct BreaksBase<-8> : BreaksBase<-6>, BreaksBase<-7> {}; +//typedef py::class_, BreaksBase<-6>, BreaksBase<-7>> Breaks8; +//CHECK_BROKEN(8); diff --git a/diffvg/pybind11/tests/test_class.py b/diffvg/pybind11/tests/test_class.py new file mode 100644 index 0000000000000000000000000000000000000000..4214fe79d7fbab2b38a1f15ca39d41e7cd33a171 --- /dev/null +++ b/diffvg/pybind11/tests/test_class.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import class_ as m +from pybind11_tests import UserType, ConstructorStats + + +def test_repr(): + # In Python 3.3+, repr() accesses __qualname__ + assert "pybind11_type" in repr(type(UserType)) + assert "UserType" in repr(UserType) + + +def test_instance(msg): + with pytest.raises(TypeError) as excinfo: + m.NoConstructor() + assert msg(excinfo.value) == "m.class_.NoConstructor: No constructor defined!" + + instance = m.NoConstructor.new_instance() + + cstats = ConstructorStats.get(m.NoConstructor) + assert cstats.alive() == 1 + del instance + assert cstats.alive() == 0 + + +def test_docstrings(doc): + assert doc(UserType) == "A `py::class_` type for testing" + assert UserType.__name__ == "UserType" + assert UserType.__module__ == "pybind11_tests" + assert UserType.get_value.__name__ == "get_value" + assert UserType.get_value.__module__ == "pybind11_tests" + + assert doc(UserType.get_value) == """ + get_value(self: m.UserType) -> int + + Get value using a method + """ + assert doc(UserType.value) == "Get/set value using a property" + + assert doc(m.NoConstructor.new_instance) == """ + new_instance() -> m.class_.NoConstructor + + Return an instance + """ + + +def test_qualname(doc): + """Tests that a properly qualified name is set in __qualname__ (even in pre-3.3, where we + backport the attribute) and that generated docstrings properly use it and the module name""" + assert m.NestBase.__qualname__ == "NestBase" + assert m.NestBase.Nested.__qualname__ == "NestBase.Nested" + + assert doc(m.NestBase.__init__) == """ + __init__(self: m.class_.NestBase) -> None + """ + assert doc(m.NestBase.g) == """ + g(self: m.class_.NestBase, arg0: m.class_.NestBase.Nested) -> None + """ + assert doc(m.NestBase.Nested.__init__) == """ + __init__(self: m.class_.NestBase.Nested) -> None + """ + assert doc(m.NestBase.Nested.fn) == """ + fn(self: m.class_.NestBase.Nested, arg0: int, arg1: m.class_.NestBase, arg2: m.class_.NestBase.Nested) -> None + """ # noqa: E501 line too long + assert doc(m.NestBase.Nested.fa) == """ + fa(self: m.class_.NestBase.Nested, a: int, b: m.class_.NestBase, c: m.class_.NestBase.Nested) -> None + """ # noqa: E501 line too long + assert m.NestBase.__module__ == "pybind11_tests.class_" + assert m.NestBase.Nested.__module__ == "pybind11_tests.class_" + + +def test_inheritance(msg): + roger = m.Rabbit('Rabbit') + assert roger.name() + " is a " + roger.species() == "Rabbit is a parrot" + assert m.pet_name_species(roger) == "Rabbit is a parrot" + + polly = m.Pet('Polly', 'parrot') + assert polly.name() + " is a " + polly.species() == "Polly is a parrot" + assert m.pet_name_species(polly) == "Polly is a parrot" + + molly = m.Dog('Molly') + assert molly.name() + " is a " + molly.species() == "Molly is a dog" + assert m.pet_name_species(molly) == "Molly is a dog" + + fred = m.Hamster('Fred') + assert fred.name() + " is a " + fred.species() == "Fred is a rodent" + + assert m.dog_bark(molly) == "Woof!" + + with pytest.raises(TypeError) as excinfo: + m.dog_bark(polly) + assert msg(excinfo.value) == """ + dog_bark(): incompatible function arguments. The following argument types are supported: + 1. (arg0: m.class_.Dog) -> str + + Invoked with: + """ + + with pytest.raises(TypeError) as excinfo: + m.Chimera("lion", "goat") + assert "No constructor defined!" in str(excinfo.value) + + +def test_inheritance_init(msg): + + # Single base + class Python(m.Pet): + def __init__(self): + pass + with pytest.raises(TypeError) as exc_info: + Python() + expected = ["m.class_.Pet.__init__() must be called when overriding __init__", + "Pet.__init__() must be called when overriding __init__"] # PyPy? + # TODO: fix PyPy error message wrt. tp_name/__qualname__? + assert msg(exc_info.value) in expected + + # Multiple bases + class RabbitHamster(m.Rabbit, m.Hamster): + def __init__(self): + m.Rabbit.__init__(self, "RabbitHamster") + + with pytest.raises(TypeError) as exc_info: + RabbitHamster() + expected = ["m.class_.Hamster.__init__() must be called when overriding __init__", + "Hamster.__init__() must be called when overriding __init__"] # PyPy + assert msg(exc_info.value) in expected + + +def test_automatic_upcasting(): + assert type(m.return_class_1()).__name__ == "DerivedClass1" + assert type(m.return_class_2()).__name__ == "DerivedClass2" + assert type(m.return_none()).__name__ == "NoneType" + # Repeat these a few times in a random order to ensure no invalid caching is applied + assert type(m.return_class_n(1)).__name__ == "DerivedClass1" + assert type(m.return_class_n(2)).__name__ == "DerivedClass2" + assert type(m.return_class_n(0)).__name__ == "BaseClass" + assert type(m.return_class_n(2)).__name__ == "DerivedClass2" + assert type(m.return_class_n(2)).__name__ == "DerivedClass2" + assert type(m.return_class_n(0)).__name__ == "BaseClass" + assert type(m.return_class_n(1)).__name__ == "DerivedClass1" + + +def test_isinstance(): + objects = [tuple(), dict(), m.Pet("Polly", "parrot")] + [m.Dog("Molly")] * 4 + expected = (True, True, True, True, True, False, False) + assert m.check_instances(objects) == expected + + +def test_mismatched_holder(): + import re + + with pytest.raises(RuntimeError) as excinfo: + m.mismatched_holder_1() + assert re.match('generic_type: type ".*MismatchDerived1" does not have a non-default ' + 'holder type while its base ".*MismatchBase1" does', str(excinfo.value)) + + with pytest.raises(RuntimeError) as excinfo: + m.mismatched_holder_2() + assert re.match('generic_type: type ".*MismatchDerived2" has a non-default holder type ' + 'while its base ".*MismatchBase2" does not', str(excinfo.value)) + + +def test_override_static(): + """#511: problem with inheritance + overwritten def_static""" + b = m.MyBase.make() + d1 = m.MyDerived.make2() + d2 = m.MyDerived.make() + + assert isinstance(b, m.MyBase) + assert isinstance(d1, m.MyDerived) + assert isinstance(d2, m.MyDerived) + + +def test_implicit_conversion_life_support(): + """Ensure the lifetime of temporary objects created for implicit conversions""" + assert m.implicitly_convert_argument(UserType(5)) == 5 + assert m.implicitly_convert_variable(UserType(5)) == 5 + + assert "outside a bound function" in m.implicitly_convert_variable_fail(UserType(5)) + + +def test_operator_new_delete(capture): + """Tests that class-specific operator new/delete functions are invoked""" + + class SubAliased(m.AliasedHasOpNewDelSize): + pass + + with capture: + a = m.HasOpNewDel() + b = m.HasOpNewDelSize() + d = m.HasOpNewDelBoth() + assert capture == """ + A new 8 + B new 4 + D new 32 + """ + sz_alias = str(m.AliasedHasOpNewDelSize.size_alias) + sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias) + with capture: + c = m.AliasedHasOpNewDelSize() + c2 = SubAliased() + assert capture == ( + "C new " + sz_noalias + "\n" + + "C new " + sz_alias + "\n" + ) + + with capture: + del a + pytest.gc_collect() + del b + pytest.gc_collect() + del d + pytest.gc_collect() + assert capture == """ + A delete + B delete 4 + D delete + """ + + with capture: + del c + pytest.gc_collect() + del c2 + pytest.gc_collect() + assert capture == ( + "C delete " + sz_noalias + "\n" + + "C delete " + sz_alias + "\n" + ) + + +def test_bind_protected_functions(): + """Expose protected member functions to Python using a helper class""" + a = m.ProtectedA() + assert a.foo() == 42 + + b = m.ProtectedB() + assert b.foo() == 42 + + class C(m.ProtectedB): + def __init__(self): + m.ProtectedB.__init__(self) + + def foo(self): + return 0 + + c = C() + assert c.foo() == 0 + + +def test_brace_initialization(): + """ Tests that simple POD classes can be constructed using C++11 brace initialization """ + a = m.BraceInitialization(123, "test") + assert a.field1 == 123 + assert a.field2 == "test" + + # Tests that a non-simple class doesn't get brace initialization (if the + # class defines an initializer_list constructor, in particular, it would + # win over the expected constructor). + b = m.NoBraceInitialization([123, 456]) + assert b.vec == [123, 456] + + +@pytest.mark.xfail("env.PYPY") +def test_class_refcount(): + """Instances must correctly increase/decrease the reference count of their types (#1029)""" + from sys import getrefcount + + class PyDog(m.Dog): + pass + + for cls in m.Dog, PyDog: + refcount_1 = getrefcount(cls) + molly = [cls("Molly") for _ in range(10)] + refcount_2 = getrefcount(cls) + + del molly + pytest.gc_collect() + refcount_3 = getrefcount(cls) + + assert refcount_1 == refcount_3 + assert refcount_2 > refcount_1 + + +def test_reentrant_implicit_conversion_failure(msg): + # ensure that there is no runaway reentrant implicit conversion (#1035) + with pytest.raises(TypeError) as excinfo: + m.BogusImplicitConversion(0) + assert msg(excinfo.value) == ''' + __init__(): incompatible constructor arguments. The following argument types are supported: + 1. m.class_.BogusImplicitConversion(arg0: m.class_.BogusImplicitConversion) + + Invoked with: 0 + ''' + + +def test_error_after_conversions(): + with pytest.raises(TypeError) as exc_info: + m.test_error_after_conversions("hello") + assert str(exc_info.value).startswith( + "Unable to convert function return value to a Python type!") + + +def test_aligned(): + if hasattr(m, "Aligned"): + p = m.Aligned().ptr() + assert p % 1024 == 0 + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2742 +@pytest.mark.xfail("env.PYPY") +def test_final(): + with pytest.raises(TypeError) as exc_info: + class PyFinalChild(m.IsFinal): + pass + assert str(exc_info.value).endswith("is not an acceptable base type") + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2742 +@pytest.mark.xfail("env.PYPY") +def test_non_final_final(): + with pytest.raises(TypeError) as exc_info: + class PyNonFinalFinalChild(m.IsNonFinalFinal): + pass + assert str(exc_info.value).endswith("is not an acceptable base type") + + +# https://github.com/pybind/pybind11/issues/1878 +def test_exception_rvalue_abort(): + with pytest.raises(RuntimeError): + m.PyPrintDestructor().throw_something() diff --git a/diffvg/pybind11/tests/test_cmake_build/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c0578ad3d3cead093940452968a0b165f2a3fdc --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/CMakeLists.txt @@ -0,0 +1,79 @@ +# Built-in in CMake 3.5+ +include(CMakeParseArguments) + +add_custom_target(test_cmake_build) + +function(pybind11_add_build_test name) + cmake_parse_arguments(ARG "INSTALL" "" "" ${ARGN}) + + set(build_options "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}") + + if(PYBIND11_FINDPYTHON) + list(APPEND build_options "-DPYBIND11_FINDPYTHON=${PYBIND11_FINDPYTHON}") + + if(DEFINED Python_ROOT_DIR) + list(APPEND build_options "-DPython_ROOT_DIR=${Python_ROOT_DIR}") + endif() + + list(APPEND build_options "-DPython_EXECUTABLE=${Python_EXECUTABLE}") + else() + list(APPEND build_options "-DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}") + endif() + + if(DEFINED CMAKE_CXX_STANDARD) + list(APPEND build_options "-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}") + endif() + + if(NOT ARG_INSTALL) + list(APPEND build_options "-DPYBIND11_PROJECT_DIR=${pybind11_SOURCE_DIR}") + else() + list(APPEND build_options "-DCMAKE_PREFIX_PATH=${pybind11_BINARY_DIR}/mock_install") + endif() + + add_custom_target( + test_build_${name} + ${CMAKE_CTEST_COMMAND} + --build-and-test + "${CMAKE_CURRENT_SOURCE_DIR}/${name}" + "${CMAKE_CURRENT_BINARY_DIR}/${name}" + --build-config + Release + --build-noclean + --build-generator + ${CMAKE_GENERATOR} + $<$:--build-generator-platform> + ${CMAKE_GENERATOR_PLATFORM} + --build-makeprogram + ${CMAKE_MAKE_PROGRAM} + --build-target + check_${name} + --build-options + ${build_options}) + if(ARG_INSTALL) + add_dependencies(test_build_${name} mock_install) + endif() + add_dependencies(test_cmake_build test_build_${name}) +endfunction() + +pybind11_add_build_test(subdirectory_function) +pybind11_add_build_test(subdirectory_target) +if("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy") + message(STATUS "Skipping embed test on PyPy") +else() + pybind11_add_build_test(subdirectory_embed) +endif() + +if(PYBIND11_INSTALL) + add_custom_target( + mock_install ${CMAKE_COMMAND} "-DCMAKE_INSTALL_PREFIX=${pybind11_BINARY_DIR}/mock_install" -P + "${pybind11_BINARY_DIR}/cmake_install.cmake") + + pybind11_add_build_test(installed_function INSTALL) + pybind11_add_build_test(installed_target INSTALL) + if(NOT ("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy" + )) + pybind11_add_build_test(installed_embed INSTALL) + endif() +endif() + +add_dependencies(check test_cmake_build) diff --git a/diffvg/pybind11/tests/test_cmake_build/embed.cpp b/diffvg/pybind11/tests/test_cmake_build/embed.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b9581d2fdb0a1629b9d0839acc033c20fecbe880 --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/embed.cpp @@ -0,0 +1,21 @@ +#include +namespace py = pybind11; + +PYBIND11_EMBEDDED_MODULE(test_cmake_build, m) { + m.def("add", [](int i, int j) { return i + j; }); +} + +int main(int argc, char *argv[]) { + if (argc != 2) + throw std::runtime_error("Expected test.py file as the first argument"); + auto test_py_file = argv[1]; + + py::scoped_interpreter guard{}; + + auto m = py::module::import("test_cmake_build"); + if (m.attr("add")(1, 2).cast() != 3) + throw std::runtime_error("embed.cpp failed"); + + py::module::import("sys").attr("argv") = py::make_tuple("test.py", "embed.cpp"); + py::eval_file(test_py_file, py::globals()); +} diff --git a/diffvg/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..64ae5c4bff13d32c06639d310aebb682ca376d4e --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_installed_embed CXX) + +find_package(pybind11 CONFIG REQUIRED) +message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") + +add_executable(test_installed_embed ../embed.cpp) +target_link_libraries(test_installed_embed PRIVATE pybind11::embed) +set_target_properties(test_installed_embed PROPERTIES OUTPUT_NAME test_cmake_build) + +# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::embed). +# This may be needed to resolve header conflicts, e.g. between Python release and debug headers. +set_target_properties(test_installed_embed PROPERTIES NO_SYSTEM_FROM_IMPORTED ON) + +add_custom_target(check_installed_embed $ + ${PROJECT_SOURCE_DIR}/../test.py) diff --git a/diffvg/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a502863c0c64ad891ebe159a548f0c47dc6ce34 --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 3.4) +project(test_installed_module CXX) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_installed_function CXX) + +find_package(pybind11 CONFIG REQUIRED) +message( + STATUS "Found pybind11 v${pybind11_VERSION} ${pybind11_VERSION_TYPE}: ${pybind11_INCLUDE_DIRS}") + +pybind11_add_module(test_installed_function SHARED NO_EXTRAS ../main.cpp) +set_target_properties(test_installed_function PROPERTIES OUTPUT_NAME test_cmake_build) + +if(DEFINED Python_EXECUTABLE) + set(_Python_EXECUTABLE "${Python_EXECUTABLE}") +elseif(DEFINED PYTHON_EXECUTABLE) + set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}") +else() + message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)") +endif() + +add_custom_target( + check_installed_function + ${CMAKE_COMMAND} + -E + env + PYTHONPATH=$ + ${_Python_EXECUTABLE} + ${PROJECT_SOURCE_DIR}/../test.py + ${PROJECT_NAME}) diff --git a/diffvg/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b38eb77470e3efa45d8ceb490312b0461118ab82 --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt @@ -0,0 +1,45 @@ +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_installed_target CXX) + +find_package(pybind11 CONFIG REQUIRED) +message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") + +add_library(test_installed_target MODULE ../main.cpp) + +target_link_libraries(test_installed_target PRIVATE pybind11::module) +set_target_properties(test_installed_target PROPERTIES OUTPUT_NAME test_cmake_build) + +# Make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib +pybind11_extension(test_installed_target) + +# Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::module). +# This may be needed to resolve header conflicts, e.g. between Python release and debug headers. +set_target_properties(test_installed_target PROPERTIES NO_SYSTEM_FROM_IMPORTED ON) + +if(DEFINED Python_EXECUTABLE) + set(_Python_EXECUTABLE "${Python_EXECUTABLE}") +elseif(DEFINED PYTHON_EXECUTABLE) + set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}") +else() + message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)") +endif() + +add_custom_target( + check_installed_target + ${CMAKE_COMMAND} + -E + env + PYTHONPATH=$ + ${_Python_EXECUTABLE} + ${PROJECT_SOURCE_DIR}/../test.py + ${PROJECT_NAME}) diff --git a/diffvg/pybind11/tests/test_cmake_build/main.cpp b/diffvg/pybind11/tests/test_cmake_build/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e30f2c4b9a31205185d2b221a994dc001a30730a --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/main.cpp @@ -0,0 +1,6 @@ +#include +namespace py = pybind11; + +PYBIND11_MODULE(test_cmake_build, m) { + m.def("add", [](int i, int j) { return i + j; }); +} diff --git a/diffvg/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7df0cf77c99b5ca39571fd00aea421bf647256d --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_subdirectory_embed CXX) + +set(PYBIND11_INSTALL + ON + CACHE BOOL "") +set(PYBIND11_EXPORT_NAME test_export) + +add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11) + +# Test basic target functionality +add_executable(test_subdirectory_embed ../embed.cpp) +target_link_libraries(test_subdirectory_embed PRIVATE pybind11::embed) +set_target_properties(test_subdirectory_embed PROPERTIES OUTPUT_NAME test_cmake_build) + +add_custom_target(check_subdirectory_embed $ + ${PROJECT_SOURCE_DIR}/../test.py) + +# Test custom export group -- PYBIND11_EXPORT_NAME +add_library(test_embed_lib ../embed.cpp) +target_link_libraries(test_embed_lib PRIVATE pybind11::embed) + +install( + TARGETS test_embed_lib + EXPORT test_export + ARCHIVE DESTINATION bin + LIBRARY DESTINATION lib + RUNTIME DESTINATION lib) +install(EXPORT test_export DESTINATION lib/cmake/test_export/test_export-Targets.cmake) diff --git a/diffvg/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..624c600f8511bfc2950e702dfd453918375a79af --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt @@ -0,0 +1,34 @@ +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_subdirectory_function CXX) + +add_subdirectory("${PYBIND11_PROJECT_DIR}" pybind11) +pybind11_add_module(test_subdirectory_function ../main.cpp) +set_target_properties(test_subdirectory_function PROPERTIES OUTPUT_NAME test_cmake_build) + +if(DEFINED Python_EXECUTABLE) + set(_Python_EXECUTABLE "${Python_EXECUTABLE}") +elseif(DEFINED PYTHON_EXECUTABLE) + set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}") +else() + message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)") +endif() + +add_custom_target( + check_subdirectory_function + ${CMAKE_COMMAND} + -E + env + PYTHONPATH=$ + ${_Python_EXECUTABLE} + ${PROJECT_SOURCE_DIR}/../test.py + ${PROJECT_NAME}) diff --git a/diffvg/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt b/diffvg/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2471941fb682951dd6e5224dbeee38a7a738862b --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt @@ -0,0 +1,40 @@ +cmake_minimum_required(VERSION 3.4) + +# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with +# some versions of VS that have a patched CMake 3.11. This forces us to emulate +# the behavior using the following workaround: +if(${CMAKE_VERSION} VERSION_LESS 3.18) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.18) +endif() + +project(test_subdirectory_target CXX) + +add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11) + +add_library(test_subdirectory_target MODULE ../main.cpp) +set_target_properties(test_subdirectory_target PROPERTIES OUTPUT_NAME test_cmake_build) + +target_link_libraries(test_subdirectory_target PRIVATE pybind11::module) + +# Make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib +pybind11_extension(test_subdirectory_target) + +if(DEFINED Python_EXECUTABLE) + set(_Python_EXECUTABLE "${Python_EXECUTABLE}") +elseif(DEFINED PYTHON_EXECUTABLE) + set(_Python_EXECUTABLE "${PYTHON_EXECUTABLE}") +else() + message(FATAL_ERROR "No Python executable defined (should not be possible at this stage)") +endif() + +add_custom_target( + check_subdirectory_target + ${CMAKE_COMMAND} + -E + env + PYTHONPATH=$ + ${_Python_EXECUTABLE} + ${PROJECT_SOURCE_DIR}/../test.py + ${PROJECT_NAME}) diff --git a/diffvg/pybind11/tests/test_cmake_build/test.py b/diffvg/pybind11/tests/test_cmake_build/test.py new file mode 100644 index 0000000000000000000000000000000000000000..87ed5135ff415bab7a56bf4ab8dea3200fd53cca --- /dev/null +++ b/diffvg/pybind11/tests/test_cmake_build/test.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +import sys +import test_cmake_build + +assert test_cmake_build.add(1, 2) == 3 +print("{} imports, runs, and adds: 1 + 2 = 3".format(sys.argv[1])) diff --git a/diffvg/pybind11/tests/test_constants_and_functions.cpp b/diffvg/pybind11/tests/test_constants_and_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e8ec74b7bc77c9ddf87073d40e9a8c8c9c2115f0 --- /dev/null +++ b/diffvg/pybind11/tests/test_constants_and_functions.cpp @@ -0,0 +1,127 @@ +/* + tests/test_constants_and_functions.cpp -- global constants and functions, enumerations, raw byte strings + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +enum MyEnum { EFirstEntry = 1, ESecondEntry }; + +std::string test_function1() { + return "test_function()"; +} + +std::string test_function2(MyEnum k) { + return "test_function(enum=" + std::to_string(k) + ")"; +} + +std::string test_function3(int i) { + return "test_function(" + std::to_string(i) + ")"; +} + +py::str test_function4() { return "test_function()"; } +py::str test_function4(char *) { return "test_function(char *)"; } +py::str test_function4(int, float) { return "test_function(int, float)"; } +py::str test_function4(float, int) { return "test_function(float, int)"; } + +py::bytes return_bytes() { + const char *data = "\x01\x00\x02\x00"; + return std::string(data, 4); +} + +std::string print_bytes(py::bytes bytes) { + std::string ret = "bytes["; + const auto value = static_cast(bytes); + for (size_t i = 0; i < value.length(); ++i) { + ret += std::to_string(static_cast(value[i])) + " "; + } + ret.back() = ']'; + return ret; +} + +// Test that we properly handle C++17 exception specifiers (which are part of the function signature +// in C++17). These should all still work before C++17, but don't affect the function signature. +namespace test_exc_sp { +int f1(int x) noexcept { return x+1; } +int f2(int x) noexcept(true) { return x+2; } +int f3(int x) noexcept(false) { return x+3; } +#if defined(__GNUG__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated" +#endif +int f4(int x) throw() { return x+4; } // Deprecated equivalent to noexcept(true) +#if defined(__GNUG__) +# pragma GCC diagnostic pop +#endif +struct C { + int m1(int x) noexcept { return x-1; } + int m2(int x) const noexcept { return x-2; } + int m3(int x) noexcept(true) { return x-3; } + int m4(int x) const noexcept(true) { return x-4; } + int m5(int x) noexcept(false) { return x-5; } + int m6(int x) const noexcept(false) { return x-6; } +#if defined(__GNUG__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated" +#endif + int m7(int x) throw() { return x-7; } + int m8(int x) const throw() { return x-8; } +#if defined(__GNUG__) +# pragma GCC diagnostic pop +#endif +}; +} + + +TEST_SUBMODULE(constants_and_functions, m) { + // test_constants + m.attr("some_constant") = py::int_(14); + + // test_function_overloading + m.def("test_function", &test_function1); + m.def("test_function", &test_function2); + m.def("test_function", &test_function3); + +#if defined(PYBIND11_OVERLOAD_CAST) + m.def("test_function", py::overload_cast<>(&test_function4)); + m.def("test_function", py::overload_cast(&test_function4)); + m.def("test_function", py::overload_cast(&test_function4)); + m.def("test_function", py::overload_cast(&test_function4)); +#else + m.def("test_function", static_cast(&test_function4)); + m.def("test_function", static_cast(&test_function4)); + m.def("test_function", static_cast(&test_function4)); + m.def("test_function", static_cast(&test_function4)); +#endif + + py::enum_(m, "MyEnum") + .value("EFirstEntry", EFirstEntry) + .value("ESecondEntry", ESecondEntry) + .export_values(); + + // test_bytes + m.def("return_bytes", &return_bytes); + m.def("print_bytes", &print_bytes); + + // test_exception_specifiers + using namespace test_exc_sp; + py::class_(m, "C") + .def(py::init<>()) + .def("m1", &C::m1) + .def("m2", &C::m2) + .def("m3", &C::m3) + .def("m4", &C::m4) + .def("m5", &C::m5) + .def("m6", &C::m6) + .def("m7", &C::m7) + .def("m8", &C::m8) + ; + m.def("f1", f1); + m.def("f2", f2); + m.def("f3", f3); + m.def("f4", f4); +} diff --git a/diffvg/pybind11/tests/test_constants_and_functions.py b/diffvg/pybind11/tests/test_constants_and_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..36b1aa64b1201eb16f4424c968c774f68b7abec2 --- /dev/null +++ b/diffvg/pybind11/tests/test_constants_and_functions.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +from pybind11_tests import constants_and_functions as m + + +def test_constants(): + assert m.some_constant == 14 + + +def test_function_overloading(): + assert m.test_function() == "test_function()" + assert m.test_function(7) == "test_function(7)" + assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)" + assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)" + + assert m.test_function() == "test_function()" + assert m.test_function("abcd") == "test_function(char *)" + assert m.test_function(1, 1.0) == "test_function(int, float)" + assert m.test_function(1, 1.0) == "test_function(int, float)" + assert m.test_function(2.0, 2) == "test_function(float, int)" + + +def test_bytes(): + assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]" + + +def test_exception_specifiers(): + c = m.C() + assert c.m1(2) == 1 + assert c.m2(3) == 1 + assert c.m3(5) == 2 + assert c.m4(7) == 3 + assert c.m5(10) == 5 + assert c.m6(14) == 8 + assert c.m7(20) == 13 + assert c.m8(29) == 21 + + assert m.f1(33) == 34 + assert m.f2(53) == 55 + assert m.f3(86) == 89 + assert m.f4(140) == 144 diff --git a/diffvg/pybind11/tests/test_copy_move.cpp b/diffvg/pybind11/tests/test_copy_move.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f698bdf058dc53fceb21e504959fe334973bafb --- /dev/null +++ b/diffvg/pybind11/tests/test_copy_move.cpp @@ -0,0 +1,213 @@ +/* + tests/test_copy_move_policies.cpp -- 'copy' and 'move' return value policies + and related tests + + Copyright (c) 2016 Ben North + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include + +template +struct empty { + static const derived& get_one() { return instance_; } + static derived instance_; +}; + +struct lacking_copy_ctor : public empty { + lacking_copy_ctor() {} + lacking_copy_ctor(const lacking_copy_ctor& other) = delete; +}; + +template <> lacking_copy_ctor empty::instance_ = {}; + +struct lacking_move_ctor : public empty { + lacking_move_ctor() {} + lacking_move_ctor(const lacking_move_ctor& other) = delete; + lacking_move_ctor(lacking_move_ctor&& other) = delete; +}; + +template <> lacking_move_ctor empty::instance_ = {}; + +/* Custom type caster move/copy test classes */ +class MoveOnlyInt { +public: + MoveOnlyInt() { print_default_created(this); } + MoveOnlyInt(int v) : value{std::move(v)} { print_created(this, value); } + MoveOnlyInt(MoveOnlyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); } + MoveOnlyInt &operator=(MoveOnlyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; } + MoveOnlyInt(const MoveOnlyInt &) = delete; + MoveOnlyInt &operator=(const MoveOnlyInt &) = delete; + ~MoveOnlyInt() { print_destroyed(this); } + + int value; +}; +class MoveOrCopyInt { +public: + MoveOrCopyInt() { print_default_created(this); } + MoveOrCopyInt(int v) : value{std::move(v)} { print_created(this, value); } + MoveOrCopyInt(MoveOrCopyInt &&m) { print_move_created(this, m.value); std::swap(value, m.value); } + MoveOrCopyInt &operator=(MoveOrCopyInt &&m) { print_move_assigned(this, m.value); std::swap(value, m.value); return *this; } + MoveOrCopyInt(const MoveOrCopyInt &c) { print_copy_created(this, c.value); value = c.value; } + MoveOrCopyInt &operator=(const MoveOrCopyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; } + ~MoveOrCopyInt() { print_destroyed(this); } + + int value; +}; +class CopyOnlyInt { +public: + CopyOnlyInt() { print_default_created(this); } + CopyOnlyInt(int v) : value{std::move(v)} { print_created(this, value); } + CopyOnlyInt(const CopyOnlyInt &c) { print_copy_created(this, c.value); value = c.value; } + CopyOnlyInt &operator=(const CopyOnlyInt &c) { print_copy_assigned(this, c.value); value = c.value; return *this; } + ~CopyOnlyInt() { print_destroyed(this); } + + int value; +}; +PYBIND11_NAMESPACE_BEGIN(pybind11) +PYBIND11_NAMESPACE_BEGIN(detail) +template <> struct type_caster { + PYBIND11_TYPE_CASTER(MoveOnlyInt, _("MoveOnlyInt")); + bool load(handle src, bool) { value = MoveOnlyInt(src.cast()); return true; } + static handle cast(const MoveOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); } +}; + +template <> struct type_caster { + PYBIND11_TYPE_CASTER(MoveOrCopyInt, _("MoveOrCopyInt")); + bool load(handle src, bool) { value = MoveOrCopyInt(src.cast()); return true; } + static handle cast(const MoveOrCopyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); } +}; + +template <> struct type_caster { +protected: + CopyOnlyInt value; +public: + static constexpr auto name = _("CopyOnlyInt"); + bool load(handle src, bool) { value = CopyOnlyInt(src.cast()); return true; } + static handle cast(const CopyOnlyInt &m, return_value_policy r, handle p) { return pybind11::cast(m.value, r, p); } + static handle cast(const CopyOnlyInt *src, return_value_policy policy, handle parent) { + if (!src) return none().release(); + return cast(*src, policy, parent); + } + operator CopyOnlyInt*() { return &value; } + operator CopyOnlyInt&() { return value; } + template using cast_op_type = pybind11::detail::cast_op_type; +}; +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(pybind11) + +TEST_SUBMODULE(copy_move_policies, m) { + // test_lacking_copy_ctor + py::class_(m, "lacking_copy_ctor") + .def_static("get_one", &lacking_copy_ctor::get_one, + py::return_value_policy::copy); + // test_lacking_move_ctor + py::class_(m, "lacking_move_ctor") + .def_static("get_one", &lacking_move_ctor::get_one, + py::return_value_policy::move); + + // test_move_and_copy_casts + m.def("move_and_copy_casts", [](py::object o) { + int r = 0; + r += py::cast(o).value; /* moves */ + r += py::cast(o).value; /* moves */ + r += py::cast(o).value; /* copies */ + MoveOrCopyInt m1(py::cast(o)); /* moves */ + MoveOnlyInt m2(py::cast(o)); /* moves */ + CopyOnlyInt m3(py::cast(o)); /* copies */ + r += m1.value + m2.value + m3.value; + + return r; + }); + + // test_move_and_copy_loads + m.def("move_only", [](MoveOnlyInt m) { return m.value; }); + m.def("move_or_copy", [](MoveOrCopyInt m) { return m.value; }); + m.def("copy_only", [](CopyOnlyInt m) { return m.value; }); + m.def("move_pair", [](std::pair p) { + return p.first.value + p.second.value; + }); + m.def("move_tuple", [](std::tuple t) { + return std::get<0>(t).value + std::get<1>(t).value + std::get<2>(t).value; + }); + m.def("copy_tuple", [](std::tuple t) { + return std::get<0>(t).value + std::get<1>(t).value; + }); + m.def("move_copy_nested", [](std::pair>, MoveOrCopyInt>> x) { + return x.first.value + std::get<0>(x.second.first).value + std::get<1>(x.second.first).value + + std::get<0>(std::get<2>(x.second.first)).value + x.second.second.value; + }); + m.def("move_and_copy_cstats", []() { + ConstructorStats::gc(); + // Reset counts to 0 so that previous tests don't affect later ones: + auto &mc = ConstructorStats::get(); + mc.move_assignments = mc.move_constructions = mc.copy_assignments = mc.copy_constructions = 0; + auto &mo = ConstructorStats::get(); + mo.move_assignments = mo.move_constructions = mo.copy_assignments = mo.copy_constructions = 0; + auto &co = ConstructorStats::get(); + co.move_assignments = co.move_constructions = co.copy_assignments = co.copy_constructions = 0; + py::dict d; + d["MoveOrCopyInt"] = py::cast(mc, py::return_value_policy::reference); + d["MoveOnlyInt"] = py::cast(mo, py::return_value_policy::reference); + d["CopyOnlyInt"] = py::cast(co, py::return_value_policy::reference); + return d; + }); +#ifdef PYBIND11_HAS_OPTIONAL + // test_move_and_copy_load_optional + m.attr("has_optional") = true; + m.def("move_optional", [](std::optional o) { + return o->value; + }); + m.def("move_or_copy_optional", [](std::optional o) { + return o->value; + }); + m.def("copy_optional", [](std::optional o) { + return o->value; + }); + m.def("move_optional_tuple", [](std::optional> x) { + return std::get<0>(*x).value + std::get<1>(*x).value + std::get<2>(*x).value; + }); +#else + m.attr("has_optional") = false; +#endif + + // #70 compilation issue if operator new is not public + struct PrivateOpNew { + int value = 1; + private: +#if defined(_MSC_VER) +# pragma warning(disable: 4822) // warning C4822: local class member function does not have a body +#endif + void *operator new(size_t bytes); + }; + py::class_(m, "PrivateOpNew").def_readonly("value", &PrivateOpNew::value); + m.def("private_op_new_value", []() { return PrivateOpNew(); }); + m.def("private_op_new_reference", []() -> const PrivateOpNew & { + static PrivateOpNew x{}; + return x; + }, py::return_value_policy::reference); + + // test_move_fallback + // #389: rvp::move should fall-through to copy on non-movable objects + struct MoveIssue1 { + int v; + MoveIssue1(int v) : v{v} {} + MoveIssue1(const MoveIssue1 &c) = default; + MoveIssue1(MoveIssue1 &&) = delete; + }; + py::class_(m, "MoveIssue1").def(py::init()).def_readwrite("value", &MoveIssue1::v); + + struct MoveIssue2 { + int v; + MoveIssue2(int v) : v{v} {} + MoveIssue2(MoveIssue2 &&) = default; + }; + py::class_(m, "MoveIssue2").def(py::init()).def_readwrite("value", &MoveIssue2::v); + + m.def("get_moveissue1", [](int i) { return new MoveIssue1(i); }, py::return_value_policy::move); + m.def("get_moveissue2", [](int i) { return MoveIssue2(i); }, py::return_value_policy::move); +} diff --git a/diffvg/pybind11/tests/test_copy_move.py b/diffvg/pybind11/tests/test_copy_move.py new file mode 100644 index 0000000000000000000000000000000000000000..6b53993a91187c5518212f6fc41ac3a1792cc1df --- /dev/null +++ b/diffvg/pybind11/tests/test_copy_move.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import copy_move_policies as m + + +def test_lacking_copy_ctor(): + with pytest.raises(RuntimeError) as excinfo: + m.lacking_copy_ctor.get_one() + assert "is non-copyable!" in str(excinfo.value) + + +def test_lacking_move_ctor(): + with pytest.raises(RuntimeError) as excinfo: + m.lacking_move_ctor.get_one() + assert "is neither movable nor copyable!" in str(excinfo.value) + + +def test_move_and_copy_casts(): + """Cast some values in C++ via custom type casters and count the number of moves/copies.""" + + cstats = m.move_and_copy_cstats() + c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"] + + # The type move constructions/assignments below each get incremented: the move assignment comes + # from the type_caster load; the move construction happens when extracting that via a cast or + # loading into an argument. + assert m.move_and_copy_casts(3) == 18 + assert c_m.copy_assignments + c_m.copy_constructions == 0 + assert c_m.move_assignments == 2 + assert c_m.move_constructions >= 2 + assert c_mc.alive() == 0 + assert c_mc.copy_assignments + c_mc.copy_constructions == 0 + assert c_mc.move_assignments == 2 + assert c_mc.move_constructions >= 2 + assert c_c.alive() == 0 + assert c_c.copy_assignments == 2 + assert c_c.copy_constructions >= 2 + assert c_m.alive() + c_mc.alive() + c_c.alive() == 0 + + +def test_move_and_copy_loads(): + """Call some functions that load arguments via custom type casters and count the number of + moves/copies.""" + + cstats = m.move_and_copy_cstats() + c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"] + + assert m.move_only(10) == 10 # 1 move, c_m + assert m.move_or_copy(11) == 11 # 1 move, c_mc + assert m.copy_only(12) == 12 # 1 copy, c_c + assert m.move_pair((13, 14)) == 27 # 1 c_m move, 1 c_mc move + assert m.move_tuple((15, 16, 17)) == 48 # 2 c_m moves, 1 c_mc move + assert m.copy_tuple((18, 19)) == 37 # 2 c_c copies + # Direct constructions: 2 c_m moves, 2 c_mc moves, 1 c_c copy + # Extra moves/copies when moving pairs/tuples: 3 c_m, 3 c_mc, 2 c_c + assert m.move_copy_nested((1, ((2, 3, (4,)), 5))) == 15 + + assert c_m.copy_assignments + c_m.copy_constructions == 0 + assert c_m.move_assignments == 6 + assert c_m.move_constructions == 9 + assert c_mc.copy_assignments + c_mc.copy_constructions == 0 + assert c_mc.move_assignments == 5 + assert c_mc.move_constructions == 8 + assert c_c.copy_assignments == 4 + assert c_c.copy_constructions == 6 + assert c_m.alive() + c_mc.alive() + c_c.alive() == 0 + + +@pytest.mark.skipif(not m.has_optional, reason='no ') +def test_move_and_copy_load_optional(): + """Tests move/copy loads of std::optional arguments""" + + cstats = m.move_and_copy_cstats() + c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"] + + # The extra move/copy constructions below come from the std::optional move (which has to move + # its arguments): + assert m.move_optional(10) == 10 # c_m: 1 move assign, 2 move construct + assert m.move_or_copy_optional(11) == 11 # c_mc: 1 move assign, 2 move construct + assert m.copy_optional(12) == 12 # c_c: 1 copy assign, 2 copy construct + # 1 move assign + move construct moves each of c_m, c_mc, 1 c_c copy + # +1 move/copy construct each from moving the tuple + # +1 move/copy construct each from moving the optional (which moves the tuple again) + assert m.move_optional_tuple((3, 4, 5)) == 12 + + assert c_m.copy_assignments + c_m.copy_constructions == 0 + assert c_m.move_assignments == 2 + assert c_m.move_constructions == 5 + assert c_mc.copy_assignments + c_mc.copy_constructions == 0 + assert c_mc.move_assignments == 2 + assert c_mc.move_constructions == 5 + assert c_c.copy_assignments == 2 + assert c_c.copy_constructions == 5 + assert c_m.alive() + c_mc.alive() + c_c.alive() == 0 + + +def test_private_op_new(): + """An object with a private `operator new` cannot be returned by value""" + + with pytest.raises(RuntimeError) as excinfo: + m.private_op_new_value() + assert "is neither movable nor copyable" in str(excinfo.value) + + assert m.private_op_new_reference().value == 1 + + +def test_move_fallback(): + """#389: rvp::move should fall-through to copy on non-movable objects""" + + m2 = m.get_moveissue2(2) + assert m2.value == 2 + m1 = m.get_moveissue1(1) + assert m1.value == 1 diff --git a/diffvg/pybind11/tests/test_custom_type_casters.cpp b/diffvg/pybind11/tests/test_custom_type_casters.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9485d3cdb207b14fd74eb1d8afe1c31d92891b7b --- /dev/null +++ b/diffvg/pybind11/tests/test_custom_type_casters.cpp @@ -0,0 +1,125 @@ +/* + tests/test_custom_type_casters.cpp -- tests type_caster + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + + +// py::arg/py::arg_v testing: these arguments just record their argument when invoked +class ArgInspector1 { public: std::string arg = "(default arg inspector 1)"; }; +class ArgInspector2 { public: std::string arg = "(default arg inspector 2)"; }; +class ArgAlwaysConverts { }; +namespace pybind11 { namespace detail { +template <> struct type_caster { +public: + PYBIND11_TYPE_CASTER(ArgInspector1, _("ArgInspector1")); + + bool load(handle src, bool convert) { + value.arg = "loading ArgInspector1 argument " + + std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. " + "Argument value = " + (std::string) str(src); + return true; + } + + static handle cast(const ArgInspector1 &src, return_value_policy, handle) { + return str(src.arg).release(); + } +}; +template <> struct type_caster { +public: + PYBIND11_TYPE_CASTER(ArgInspector2, _("ArgInspector2")); + + bool load(handle src, bool convert) { + value.arg = "loading ArgInspector2 argument " + + std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. " + "Argument value = " + (std::string) str(src); + return true; + } + + static handle cast(const ArgInspector2 &src, return_value_policy, handle) { + return str(src.arg).release(); + } +}; +template <> struct type_caster { +public: + PYBIND11_TYPE_CASTER(ArgAlwaysConverts, _("ArgAlwaysConverts")); + + bool load(handle, bool convert) { + return convert; + } + + static handle cast(const ArgAlwaysConverts &, return_value_policy, handle) { + return py::none().release(); + } +}; +}} + +// test_custom_caster_destruction +class DestructionTester { +public: + DestructionTester() { print_default_created(this); } + ~DestructionTester() { print_destroyed(this); } + DestructionTester(const DestructionTester &) { print_copy_created(this); } + DestructionTester(DestructionTester &&) { print_move_created(this); } + DestructionTester &operator=(const DestructionTester &) { print_copy_assigned(this); return *this; } + DestructionTester &operator=(DestructionTester &&) { print_move_assigned(this); return *this; } +}; +namespace pybind11 { namespace detail { +template <> struct type_caster { + PYBIND11_TYPE_CASTER(DestructionTester, _("DestructionTester")); + bool load(handle, bool) { return true; } + + static handle cast(const DestructionTester &, return_value_policy, handle) { + return py::bool_(true).release(); + } +}; +}} + +TEST_SUBMODULE(custom_type_casters, m) { + // test_custom_type_casters + + // test_noconvert_args + // + // Test converting. The ArgAlwaysConverts is just there to make the first no-conversion pass + // fail so that our call always ends up happening via the second dispatch (the one that allows + // some conversion). + class ArgInspector { + public: + ArgInspector1 f(ArgInspector1 a, ArgAlwaysConverts) { return a; } + std::string g(ArgInspector1 a, const ArgInspector1 &b, int c, ArgInspector2 *d, ArgAlwaysConverts) { + return a.arg + "\n" + b.arg + "\n" + std::to_string(c) + "\n" + d->arg; + } + static ArgInspector2 h(ArgInspector2 a, ArgAlwaysConverts) { return a; } + }; + py::class_(m, "ArgInspector") + .def(py::init<>()) + .def("f", &ArgInspector::f, py::arg(), py::arg() = ArgAlwaysConverts()) + .def("g", &ArgInspector::g, "a"_a.noconvert(), "b"_a, "c"_a.noconvert()=13, "d"_a=ArgInspector2(), py::arg() = ArgAlwaysConverts()) + .def_static("h", &ArgInspector::h, py::arg().noconvert(), py::arg() = ArgAlwaysConverts()) + ; + m.def("arg_inspect_func", [](ArgInspector2 a, ArgInspector1 b, ArgAlwaysConverts) { return a.arg + "\n" + b.arg; }, + py::arg().noconvert(false), py::arg_v(nullptr, ArgInspector1()).noconvert(true), py::arg() = ArgAlwaysConverts()); + + m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f")); + m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert()); + m.def("ints_preferred", [](int i) { return i / 2; }, py::arg("i")); + m.def("ints_only", [](int i) { return i / 2; }, py::arg("i").noconvert()); + + // test_custom_caster_destruction + // Test that `take_ownership` works on types with a custom type caster when given a pointer + + // default policy: don't take ownership: + m.def("custom_caster_no_destroy", []() { static auto *dt = new DestructionTester(); return dt; }); + + m.def("custom_caster_destroy", []() { return new DestructionTester(); }, + py::return_value_policy::take_ownership); // Takes ownership: destroy when finished + m.def("custom_caster_destroy_const", []() -> const DestructionTester * { return new DestructionTester(); }, + py::return_value_policy::take_ownership); // Likewise (const doesn't inhibit destruction) + m.def("destruction_tester_cstats", &ConstructorStats::get, py::return_value_policy::reference); +} diff --git a/diffvg/pybind11/tests/test_custom_type_casters.py b/diffvg/pybind11/tests/test_custom_type_casters.py new file mode 100644 index 0000000000000000000000000000000000000000..9475c4516845632da6c6c5b918ae05401d8f3f01 --- /dev/null +++ b/diffvg/pybind11/tests/test_custom_type_casters.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import custom_type_casters as m + + +def test_noconvert_args(msg): + a = m.ArgInspector() + assert msg(a.f("hi")) == """ + loading ArgInspector1 argument WITH conversion allowed. Argument value = hi + """ + assert msg(a.g("this is a", "this is b")) == """ + loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a + loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b + 13 + loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) + """ # noqa: E501 line too long + assert msg(a.g("this is a", "this is b", 42)) == """ + loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a + loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b + 42 + loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2) + """ # noqa: E501 line too long + assert msg(a.g("this is a", "this is b", 42, "this is d")) == """ + loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a + loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b + 42 + loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d + """ + assert (a.h("arg 1") == + "loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1") + assert msg(m.arg_inspect_func("A1", "A2")) == """ + loading ArgInspector2 argument WITH conversion allowed. Argument value = A1 + loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2 + """ + + assert m.floats_preferred(4) == 2.0 + assert m.floats_only(4.0) == 2.0 + with pytest.raises(TypeError) as excinfo: + m.floats_only(4) + assert msg(excinfo.value) == """ + floats_only(): incompatible function arguments. The following argument types are supported: + 1. (f: float) -> float + + Invoked with: 4 + """ + + assert m.ints_preferred(4) == 2 + assert m.ints_preferred(True) == 0 + with pytest.raises(TypeError) as excinfo: + m.ints_preferred(4.0) + assert msg(excinfo.value) == """ + ints_preferred(): incompatible function arguments. The following argument types are supported: + 1. (i: int) -> int + + Invoked with: 4.0 + """ # noqa: E501 line too long + + assert m.ints_only(4) == 2 + with pytest.raises(TypeError) as excinfo: + m.ints_only(4.0) + assert msg(excinfo.value) == """ + ints_only(): incompatible function arguments. The following argument types are supported: + 1. (i: int) -> int + + Invoked with: 4.0 + """ + + +def test_custom_caster_destruction(): + """Tests that returning a pointer to a type that gets converted with a custom type caster gets + destroyed when the function has py::return_value_policy::take_ownership policy applied.""" + + cstats = m.destruction_tester_cstats() + # This one *doesn't* have take_ownership: the pointer should be used but not destroyed: + z = m.custom_caster_no_destroy() + assert cstats.alive() == 1 and cstats.default_constructions == 1 + assert z + + # take_ownership applied: this constructs a new object, casts it, then destroys it: + z = m.custom_caster_destroy() + assert z + assert cstats.default_constructions == 2 + + # Same, but with a const pointer return (which should *not* inhibit destruction): + z = m.custom_caster_destroy_const() + assert z + assert cstats.default_constructions == 3 + + # Make sure we still only have the original object (from ..._no_destroy()) alive: + assert cstats.alive() == 1 diff --git a/diffvg/pybind11/tests/test_docstring_options.cpp b/diffvg/pybind11/tests/test_docstring_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8c8f79fd5f6308caab1ee2d22525af2a408eca07 --- /dev/null +++ b/diffvg/pybind11/tests/test_docstring_options.cpp @@ -0,0 +1,61 @@ +/* + tests/test_docstring_options.cpp -- generation of docstrings and signatures + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +TEST_SUBMODULE(docstring_options, m) { + // test_docstring_options + { + py::options options; + options.disable_function_signatures(); + + m.def("test_function1", [](int, int) {}, py::arg("a"), py::arg("b")); + m.def("test_function2", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); + + m.def("test_overloaded1", [](int) {}, py::arg("i"), "Overload docstring"); + m.def("test_overloaded1", [](double) {}, py::arg("d")); + + m.def("test_overloaded2", [](int) {}, py::arg("i"), "overload docstring 1"); + m.def("test_overloaded2", [](double) {}, py::arg("d"), "overload docstring 2"); + + m.def("test_overloaded3", [](int) {}, py::arg("i")); + m.def("test_overloaded3", [](double) {}, py::arg("d"), "Overload docstr"); + + options.enable_function_signatures(); + + m.def("test_function3", [](int, int) {}, py::arg("a"), py::arg("b")); + m.def("test_function4", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); + + options.disable_function_signatures().disable_user_defined_docstrings(); + + m.def("test_function5", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); + + { + py::options nested_options; + nested_options.enable_user_defined_docstrings(); + m.def("test_function6", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); + } + } + + m.def("test_function7", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); + + { + py::options options; + options.disable_user_defined_docstrings(); + + struct DocstringTestFoo { + int value; + void setValue(int v) { value = v; } + int getValue() const { return value; } + }; + py::class_(m, "DocstringTestFoo", "This is a class docstring") + .def_property("value_prop", &DocstringTestFoo::getValue, &DocstringTestFoo::setValue, "This is a property docstring") + ; + } +} diff --git a/diffvg/pybind11/tests/test_docstring_options.py b/diffvg/pybind11/tests/test_docstring_options.py new file mode 100644 index 0000000000000000000000000000000000000000..80ade0f158c3fc7b8e21cf79461a430be7c82f3a --- /dev/null +++ b/diffvg/pybind11/tests/test_docstring_options.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +from pybind11_tests import docstring_options as m + + +def test_docstring_options(): + # options.disable_function_signatures() + assert not m.test_function1.__doc__ + + assert m.test_function2.__doc__ == "A custom docstring" + + # docstring specified on just the first overload definition: + assert m.test_overloaded1.__doc__ == "Overload docstring" + + # docstring on both overloads: + assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2" + + # docstring on only second overload: + assert m.test_overloaded3.__doc__ == "Overload docstr" + + # options.enable_function_signatures() + assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None") + + assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None") + assert m.test_function4.__doc__ .endswith("A custom docstring\n") + + # options.disable_function_signatures() + # options.disable_user_defined_docstrings() + assert not m.test_function5.__doc__ + + # nested options.enable_user_defined_docstrings() + assert m.test_function6.__doc__ == "A custom docstring" + + # RAII destructor + assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None") + assert m.test_function7.__doc__ .endswith("A custom docstring\n") + + # Suppression of user-defined docstrings for non-function objects + assert not m.DocstringTestFoo.__doc__ + assert not m.DocstringTestFoo.value_prop.__doc__ diff --git a/diffvg/pybind11/tests/test_eigen.cpp b/diffvg/pybind11/tests/test_eigen.cpp new file mode 100644 index 0000000000000000000000000000000000000000..56aa1a4a6fe6b60a1d85c54cd40ee70ddde3528f --- /dev/null +++ b/diffvg/pybind11/tests/test_eigen.cpp @@ -0,0 +1,327 @@ +/* + tests/eigen.cpp -- automatic conversion of Eigen types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include +#include + +#if defined(_MSC_VER) +# pragma warning(disable: 4996) // C4996: std::unary_negation is deprecated +#endif + +#include + +using MatrixXdR = Eigen::Matrix; + + + +// Sets/resets a testing reference matrix to have values of 10*r + c, where r and c are the +// (1-based) row/column number. +template void reset_ref(M &x) { + for (int i = 0; i < x.rows(); i++) for (int j = 0; j < x.cols(); j++) + x(i, j) = 11 + 10*i + j; +} + +// Returns a static, column-major matrix +Eigen::MatrixXd &get_cm() { + static Eigen::MatrixXd *x; + if (!x) { + x = new Eigen::MatrixXd(3, 3); + reset_ref(*x); + } + return *x; +} +// Likewise, but row-major +MatrixXdR &get_rm() { + static MatrixXdR *x; + if (!x) { + x = new MatrixXdR(3, 3); + reset_ref(*x); + } + return *x; +} +// Resets the values of the static matrices returned by get_cm()/get_rm() +void reset_refs() { + reset_ref(get_cm()); + reset_ref(get_rm()); +} + +// Returns element 2,1 from a matrix (used to test copy/nocopy) +double get_elem(Eigen::Ref m) { return m(2, 1); }; + + +// Returns a matrix with 10*r + 100*c added to each matrix element (to help test that the matrix +// reference is referencing rows/columns correctly). +template Eigen::MatrixXd adjust_matrix(MatrixArgType m) { + Eigen::MatrixXd ret(m); + for (int c = 0; c < m.cols(); c++) for (int r = 0; r < m.rows(); r++) + ret(r, c) += 10*r + 100*c; + return ret; +} + +struct CustomOperatorNew { + CustomOperatorNew() = default; + + Eigen::Matrix4d a = Eigen::Matrix4d::Zero(); + Eigen::Matrix4d b = Eigen::Matrix4d::Identity(); + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW; +}; + +TEST_SUBMODULE(eigen, m) { + using FixedMatrixR = Eigen::Matrix; + using FixedMatrixC = Eigen::Matrix; + using DenseMatrixR = Eigen::Matrix; + using DenseMatrixC = Eigen::Matrix; + using FourRowMatrixC = Eigen::Matrix; + using FourColMatrixC = Eigen::Matrix; + using FourRowMatrixR = Eigen::Matrix; + using FourColMatrixR = Eigen::Matrix; + using SparseMatrixR = Eigen::SparseMatrix; + using SparseMatrixC = Eigen::SparseMatrix; + + // various tests + m.def("double_col", [](const Eigen::VectorXf &x) -> Eigen::VectorXf { return 2.0f * x; }); + m.def("double_row", [](const Eigen::RowVectorXf &x) -> Eigen::RowVectorXf { return 2.0f * x; }); + m.def("double_complex", [](const Eigen::VectorXcf &x) -> Eigen::VectorXcf { return 2.0f * x; }); + m.def("double_threec", [](py::EigenDRef x) { x *= 2; }); + m.def("double_threer", [](py::EigenDRef x) { x *= 2; }); + m.def("double_mat_cm", [](Eigen::MatrixXf x) -> Eigen::MatrixXf { return 2.0f * x; }); + m.def("double_mat_rm", [](DenseMatrixR x) -> DenseMatrixR { return 2.0f * x; }); + + // test_eigen_ref_to_python + // Different ways of passing via Eigen::Ref; the first and second are the Eigen-recommended + m.def("cholesky1", [](Eigen::Ref x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); + m.def("cholesky2", [](const Eigen::Ref &x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); + m.def("cholesky3", [](const Eigen::Ref &x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); + m.def("cholesky4", [](Eigen::Ref x) -> Eigen::MatrixXd { return x.llt().matrixL(); }); + + // test_eigen_ref_mutators + // Mutators: these add some value to the given element using Eigen, but Eigen should be mapping into + // the numpy array data and so the result should show up there. There are three versions: one that + // works on a contiguous-row matrix (numpy's default), one for a contiguous-column matrix, and one + // for any matrix. + auto add_rm = [](Eigen::Ref x, int r, int c, double v) { x(r,c) += v; }; + auto add_cm = [](Eigen::Ref x, int r, int c, double v) { x(r,c) += v; }; + + // Mutators (Eigen maps into numpy variables): + m.def("add_rm", add_rm); // Only takes row-contiguous + m.def("add_cm", add_cm); // Only takes column-contiguous + // Overloaded versions that will accept either row or column contiguous: + m.def("add1", add_rm); + m.def("add1", add_cm); + m.def("add2", add_cm); + m.def("add2", add_rm); + // This one accepts a matrix of any stride: + m.def("add_any", [](py::EigenDRef x, int r, int c, double v) { x(r,c) += v; }); + + // Return mutable references (numpy maps into eigen variables) + m.def("get_cm_ref", []() { return Eigen::Ref(get_cm()); }); + m.def("get_rm_ref", []() { return Eigen::Ref(get_rm()); }); + // The same references, but non-mutable (numpy maps into eigen variables, but is !writeable) + m.def("get_cm_const_ref", []() { return Eigen::Ref(get_cm()); }); + m.def("get_rm_const_ref", []() { return Eigen::Ref(get_rm()); }); + + m.def("reset_refs", reset_refs); // Restores get_{cm,rm}_ref to original values + + // Increments and returns ref to (same) matrix + m.def("incr_matrix", [](Eigen::Ref m, double v) { + m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v); + return m; + }, py::return_value_policy::reference); + + // Same, but accepts a matrix of any strides + m.def("incr_matrix_any", [](py::EigenDRef m, double v) { + m += Eigen::MatrixXd::Constant(m.rows(), m.cols(), v); + return m; + }, py::return_value_policy::reference); + + // Returns an eigen slice of even rows + m.def("even_rows", [](py::EigenDRef m) { + return py::EigenDMap( + m.data(), (m.rows() + 1) / 2, m.cols(), + py::EigenDStride(m.outerStride(), 2 * m.innerStride())); + }, py::return_value_policy::reference); + + // Returns an eigen slice of even columns + m.def("even_cols", [](py::EigenDRef m) { + return py::EigenDMap( + m.data(), m.rows(), (m.cols() + 1) / 2, + py::EigenDStride(2 * m.outerStride(), m.innerStride())); + }, py::return_value_policy::reference); + + // Returns diagonals: a vector-like object with an inner stride != 1 + m.def("diagonal", [](const Eigen::Ref &x) { return x.diagonal(); }); + m.def("diagonal_1", [](const Eigen::Ref &x) { return x.diagonal<1>(); }); + m.def("diagonal_n", [](const Eigen::Ref &x, int index) { return x.diagonal(index); }); + + // Return a block of a matrix (gives non-standard strides) + m.def("block", [](const Eigen::Ref &x, int start_row, int start_col, int block_rows, int block_cols) { + return x.block(start_row, start_col, block_rows, block_cols); + }); + + // test_eigen_return_references, test_eigen_keepalive + // return value referencing/copying tests: + class ReturnTester { + Eigen::MatrixXd mat = create(); + public: + ReturnTester() { print_created(this); } + ~ReturnTester() { print_destroyed(this); } + static Eigen::MatrixXd create() { return Eigen::MatrixXd::Ones(10, 10); } + static const Eigen::MatrixXd createConst() { return Eigen::MatrixXd::Ones(10, 10); } + Eigen::MatrixXd &get() { return mat; } + Eigen::MatrixXd *getPtr() { return &mat; } + const Eigen::MatrixXd &view() { return mat; } + const Eigen::MatrixXd *viewPtr() { return &mat; } + Eigen::Ref ref() { return mat; } + Eigen::Ref refConst() { return mat; } + Eigen::Block block(int r, int c, int nrow, int ncol) { return mat.block(r, c, nrow, ncol); } + Eigen::Block blockConst(int r, int c, int nrow, int ncol) const { return mat.block(r, c, nrow, ncol); } + py::EigenDMap corners() { return py::EigenDMap(mat.data(), + py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); } + py::EigenDMap cornersConst() const { return py::EigenDMap(mat.data(), + py::EigenDStride(mat.outerStride() * (mat.outerSize()-1), mat.innerStride() * (mat.innerSize()-1))); } + }; + using rvp = py::return_value_policy; + py::class_(m, "ReturnTester") + .def(py::init<>()) + .def_static("create", &ReturnTester::create) + .def_static("create_const", &ReturnTester::createConst) + .def("get", &ReturnTester::get, rvp::reference_internal) + .def("get_ptr", &ReturnTester::getPtr, rvp::reference_internal) + .def("view", &ReturnTester::view, rvp::reference_internal) + .def("view_ptr", &ReturnTester::view, rvp::reference_internal) + .def("copy_get", &ReturnTester::get) // Default rvp: copy + .def("copy_view", &ReturnTester::view) // " + .def("ref", &ReturnTester::ref) // Default for Ref is to reference + .def("ref_const", &ReturnTester::refConst) // Likewise, but const + .def("ref_safe", &ReturnTester::ref, rvp::reference_internal) + .def("ref_const_safe", &ReturnTester::refConst, rvp::reference_internal) + .def("copy_ref", &ReturnTester::ref, rvp::copy) + .def("copy_ref_const", &ReturnTester::refConst, rvp::copy) + .def("block", &ReturnTester::block) + .def("block_safe", &ReturnTester::block, rvp::reference_internal) + .def("block_const", &ReturnTester::blockConst, rvp::reference_internal) + .def("copy_block", &ReturnTester::block, rvp::copy) + .def("corners", &ReturnTester::corners, rvp::reference_internal) + .def("corners_const", &ReturnTester::cornersConst, rvp::reference_internal) + ; + + // test_special_matrix_objects + // Returns a DiagonalMatrix with diagonal (1,2,3,...) + m.def("incr_diag", [](int k) { + Eigen::DiagonalMatrix m(k); + for (int i = 0; i < k; i++) m.diagonal()[i] = i+1; + return m; + }); + + // Returns a SelfAdjointView referencing the lower triangle of m + m.def("symmetric_lower", [](const Eigen::MatrixXi &m) { + return m.selfadjointView(); + }); + // Returns a SelfAdjointView referencing the lower triangle of m + m.def("symmetric_upper", [](const Eigen::MatrixXi &m) { + return m.selfadjointView(); + }); + + // Test matrix for various functions below. + Eigen::MatrixXf mat(5, 6); + mat << 0, 3, 0, 0, 0, 11, + 22, 0, 0, 0, 17, 11, + 7, 5, 0, 1, 0, 11, + 0, 0, 0, 0, 0, 11, + 0, 0, 14, 0, 8, 11; + + // test_fixed, and various other tests + m.def("fixed_r", [mat]() -> FixedMatrixR { return FixedMatrixR(mat); }); + m.def("fixed_r_const", [mat]() -> const FixedMatrixR { return FixedMatrixR(mat); }); + m.def("fixed_c", [mat]() -> FixedMatrixC { return FixedMatrixC(mat); }); + m.def("fixed_copy_r", [](const FixedMatrixR &m) -> FixedMatrixR { return m; }); + m.def("fixed_copy_c", [](const FixedMatrixC &m) -> FixedMatrixC { return m; }); + // test_mutator_descriptors + m.def("fixed_mutator_r", [](Eigen::Ref) {}); + m.def("fixed_mutator_c", [](Eigen::Ref) {}); + m.def("fixed_mutator_a", [](py::EigenDRef) {}); + // test_dense + m.def("dense_r", [mat]() -> DenseMatrixR { return DenseMatrixR(mat); }); + m.def("dense_c", [mat]() -> DenseMatrixC { return DenseMatrixC(mat); }); + m.def("dense_copy_r", [](const DenseMatrixR &m) -> DenseMatrixR { return m; }); + m.def("dense_copy_c", [](const DenseMatrixC &m) -> DenseMatrixC { return m; }); + // test_sparse, test_sparse_signature + m.def("sparse_r", [mat]() -> SparseMatrixR { return Eigen::SparseView(mat); }); + m.def("sparse_c", [mat]() -> SparseMatrixC { return Eigen::SparseView(mat); }); + m.def("sparse_copy_r", [](const SparseMatrixR &m) -> SparseMatrixR { return m; }); + m.def("sparse_copy_c", [](const SparseMatrixC &m) -> SparseMatrixC { return m; }); + // test_partially_fixed + m.def("partial_copy_four_rm_r", [](const FourRowMatrixR &m) -> FourRowMatrixR { return m; }); + m.def("partial_copy_four_rm_c", [](const FourColMatrixR &m) -> FourColMatrixR { return m; }); + m.def("partial_copy_four_cm_r", [](const FourRowMatrixC &m) -> FourRowMatrixC { return m; }); + m.def("partial_copy_four_cm_c", [](const FourColMatrixC &m) -> FourColMatrixC { return m; }); + + // test_cpp_casting + // Test that we can cast a numpy object to a Eigen::MatrixXd explicitly + m.def("cpp_copy", [](py::handle m) { return m.cast()(1, 0); }); + m.def("cpp_ref_c", [](py::handle m) { return m.cast>()(1, 0); }); + m.def("cpp_ref_r", [](py::handle m) { return m.cast>()(1, 0); }); + m.def("cpp_ref_any", [](py::handle m) { return m.cast>()(1, 0); }); + + + // test_nocopy_wrapper + // Test that we can prevent copying into an argument that would normally copy: First a version + // that would allow copying (if types or strides don't match) for comparison: + m.def("get_elem", &get_elem); + // Now this alternative that calls the tells pybind to fail rather than copy: + m.def("get_elem_nocopy", [](Eigen::Ref m) -> double { return get_elem(m); }, + py::arg().noconvert()); + // Also test a row-major-only no-copy const ref: + m.def("get_elem_rm_nocopy", [](Eigen::Ref> &m) -> long { return m(2, 1); }, + py::arg().noconvert()); + + // test_issue738 + // Issue #738: 1xN or Nx1 2D matrices were neither accepted nor properly copied with an + // incompatible stride value on the length-1 dimension--but that should be allowed (without + // requiring a copy!) because the stride value can be safely ignored on a size-1 dimension. + m.def("iss738_f1", &adjust_matrix &>, py::arg().noconvert()); + m.def("iss738_f2", &adjust_matrix> &>, py::arg().noconvert()); + + // test_issue1105 + // Issue #1105: when converting from a numpy two-dimensional (Nx1) or (1xN) value into a dense + // eigen Vector or RowVector, the argument would fail to load because the numpy copy would fail: + // numpy won't broadcast a Nx1 into a 1-dimensional vector. + m.def("iss1105_col", [](Eigen::VectorXd) { return true; }); + m.def("iss1105_row", [](Eigen::RowVectorXd) { return true; }); + + // test_named_arguments + // Make sure named arguments are working properly: + m.def("matrix_multiply", [](const py::EigenDRef A, const py::EigenDRef B) + -> Eigen::MatrixXd { + if (A.cols() != B.rows()) throw std::domain_error("Nonconformable matrices!"); + return A * B; + }, py::arg("A"), py::arg("B")); + + // test_custom_operator_new + py::class_(m, "CustomOperatorNew") + .def(py::init<>()) + .def_readonly("a", &CustomOperatorNew::a) + .def_readonly("b", &CustomOperatorNew::b); + + // test_eigen_ref_life_support + // In case of a failure (the caster's temp array does not live long enough), creating + // a new array (np.ones(10)) increases the chances that the temp array will be garbage + // collected and/or that its memory will be overridden with different values. + m.def("get_elem_direct", [](Eigen::Ref v) { + py::module::import("numpy").attr("ones")(10); + return v(5); + }); + m.def("get_elem_indirect", [](std::vector> v) { + py::module::import("numpy").attr("ones")(10); + return v[0](5); + }); +} diff --git a/diffvg/pybind11/tests/test_eigen.py b/diffvg/pybind11/tests/test_eigen.py new file mode 100644 index 0000000000000000000000000000000000000000..ac68471474a869b59f786fd35cc69a3f2f1b27d5 --- /dev/null +++ b/diffvg/pybind11/tests/test_eigen.py @@ -0,0 +1,697 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import ConstructorStats + +np = pytest.importorskip("numpy") +m = pytest.importorskip("pybind11_tests.eigen") + + +ref = np.array([[ 0., 3, 0, 0, 0, 11], + [22, 0, 0, 0, 17, 11], + [ 7, 5, 0, 1, 0, 11], + [ 0, 0, 0, 0, 0, 11], + [ 0, 0, 14, 0, 8, 11]]) + + +def assert_equal_ref(mat): + np.testing.assert_array_equal(mat, ref) + + +def assert_sparse_equal_ref(sparse_mat): + assert_equal_ref(sparse_mat.toarray()) + + +def test_fixed(): + assert_equal_ref(m.fixed_c()) + assert_equal_ref(m.fixed_r()) + assert_equal_ref(m.fixed_copy_r(m.fixed_r())) + assert_equal_ref(m.fixed_copy_c(m.fixed_c())) + assert_equal_ref(m.fixed_copy_r(m.fixed_c())) + assert_equal_ref(m.fixed_copy_c(m.fixed_r())) + + +def test_dense(): + assert_equal_ref(m.dense_r()) + assert_equal_ref(m.dense_c()) + assert_equal_ref(m.dense_copy_r(m.dense_r())) + assert_equal_ref(m.dense_copy_c(m.dense_c())) + assert_equal_ref(m.dense_copy_r(m.dense_c())) + assert_equal_ref(m.dense_copy_c(m.dense_r())) + + +def test_partially_fixed(): + ref2 = np.array([[0., 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]) + np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2), ref2) + np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2), ref2) + np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, 1]), ref2[:, [1]]) + np.testing.assert_array_equal(m.partial_copy_four_rm_c(ref2[0, :]), ref2[[0], :]) + np.testing.assert_array_equal(m.partial_copy_four_rm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)]) + np.testing.assert_array_equal( + m.partial_copy_four_rm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :]) + + np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2), ref2) + np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2), ref2) + np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, 1]), ref2[:, [1]]) + np.testing.assert_array_equal(m.partial_copy_four_cm_c(ref2[0, :]), ref2[[0], :]) + np.testing.assert_array_equal(m.partial_copy_four_cm_r(ref2[:, (0, 2)]), ref2[:, (0, 2)]) + np.testing.assert_array_equal( + m.partial_copy_four_cm_c(ref2[(3, 1, 2), :]), ref2[(3, 1, 2), :]) + + # TypeError should be raise for a shape mismatch + functions = [m.partial_copy_four_rm_r, m.partial_copy_four_rm_c, + m.partial_copy_four_cm_r, m.partial_copy_four_cm_c] + matrix_with_wrong_shape = [[1, 2], + [3, 4]] + for f in functions: + with pytest.raises(TypeError) as excinfo: + f(matrix_with_wrong_shape) + assert "incompatible function arguments" in str(excinfo.value) + + +def test_mutator_descriptors(): + zr = np.arange(30, dtype='float32').reshape(5, 6) # row-major + zc = zr.reshape(6, 5).transpose() # column-major + + m.fixed_mutator_r(zr) + m.fixed_mutator_c(zc) + m.fixed_mutator_a(zr) + m.fixed_mutator_a(zc) + with pytest.raises(TypeError) as excinfo: + m.fixed_mutator_r(zc) + assert ('(arg0: numpy.ndarray[numpy.float32[5, 6],' + ' flags.writeable, flags.c_contiguous]) -> None' + in str(excinfo.value)) + with pytest.raises(TypeError) as excinfo: + m.fixed_mutator_c(zr) + assert ('(arg0: numpy.ndarray[numpy.float32[5, 6],' + ' flags.writeable, flags.f_contiguous]) -> None' + in str(excinfo.value)) + with pytest.raises(TypeError) as excinfo: + m.fixed_mutator_a(np.array([[1, 2], [3, 4]], dtype='float32')) + assert ('(arg0: numpy.ndarray[numpy.float32[5, 6], flags.writeable]) -> None' + in str(excinfo.value)) + zr.flags.writeable = False + with pytest.raises(TypeError): + m.fixed_mutator_r(zr) + with pytest.raises(TypeError): + m.fixed_mutator_a(zr) + + +def test_cpp_casting(): + assert m.cpp_copy(m.fixed_r()) == 22. + assert m.cpp_copy(m.fixed_c()) == 22. + z = np.array([[5., 6], [7, 8]]) + assert m.cpp_copy(z) == 7. + assert m.cpp_copy(m.get_cm_ref()) == 21. + assert m.cpp_copy(m.get_rm_ref()) == 21. + assert m.cpp_ref_c(m.get_cm_ref()) == 21. + assert m.cpp_ref_r(m.get_rm_ref()) == 21. + with pytest.raises(RuntimeError) as excinfo: + # Can't reference m.fixed_c: it contains floats, m.cpp_ref_any wants doubles + m.cpp_ref_any(m.fixed_c()) + assert 'Unable to cast Python instance' in str(excinfo.value) + with pytest.raises(RuntimeError) as excinfo: + # Can't reference m.fixed_r: it contains floats, m.cpp_ref_any wants doubles + m.cpp_ref_any(m.fixed_r()) + assert 'Unable to cast Python instance' in str(excinfo.value) + assert m.cpp_ref_any(m.ReturnTester.create()) == 1. + + assert m.cpp_ref_any(m.get_cm_ref()) == 21. + assert m.cpp_ref_any(m.get_cm_ref()) == 21. + + +def test_pass_readonly_array(): + z = np.full((5, 6), 42.0) + z.flags.writeable = False + np.testing.assert_array_equal(z, m.fixed_copy_r(z)) + np.testing.assert_array_equal(m.fixed_r_const(), m.fixed_r()) + assert not m.fixed_r_const().flags.writeable + np.testing.assert_array_equal(m.fixed_copy_r(m.fixed_r_const()), m.fixed_r_const()) + + +def test_nonunit_stride_from_python(): + counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3)) + second_row = counting_mat[1, :] + second_col = counting_mat[:, 1] + np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col) + np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col) + np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col) + + counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3)) + slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]] + for ref_mat in slices: + np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat) + np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat) + + # Mutator: + m.double_threer(second_row) + m.double_threec(second_col) + np.testing.assert_array_equal(counting_mat, [[0., 2, 2], [6, 16, 10], [6, 14, 8]]) + + +def test_negative_stride_from_python(msg): + """Eigen doesn't support (as of yet) negative strides. When a function takes an Eigen matrix by + copy or const reference, we can pass a numpy array that has negative strides. Otherwise, an + exception will be thrown as Eigen will not be able to map the numpy array.""" + + counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3)) + counting_mat = counting_mat[::-1, ::-1] + second_row = counting_mat[1, :] + second_col = counting_mat[:, 1] + np.testing.assert_array_equal(m.double_row(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_col(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_complex(second_row), 2.0 * second_row) + np.testing.assert_array_equal(m.double_row(second_col), 2.0 * second_col) + np.testing.assert_array_equal(m.double_col(second_col), 2.0 * second_col) + np.testing.assert_array_equal(m.double_complex(second_col), 2.0 * second_col) + + counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3)) + counting_3d = counting_3d[::-1, ::-1, ::-1] + slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]] + for ref_mat in slices: + np.testing.assert_array_equal(m.double_mat_cm(ref_mat), 2.0 * ref_mat) + np.testing.assert_array_equal(m.double_mat_rm(ref_mat), 2.0 * ref_mat) + + # Mutator: + with pytest.raises(TypeError) as excinfo: + m.double_threer(second_row) + assert msg(excinfo.value) == """ + double_threer(): incompatible function arguments. The following argument types are supported: + 1. (arg0: numpy.ndarray[numpy.float32[1, 3], flags.writeable]) -> None + + Invoked with: """ + repr(np.array([ 5., 4., 3.], dtype='float32')) # noqa: E501 line too long + + with pytest.raises(TypeError) as excinfo: + m.double_threec(second_col) + assert msg(excinfo.value) == """ + double_threec(): incompatible function arguments. The following argument types are supported: + 1. (arg0: numpy.ndarray[numpy.float32[3, 1], flags.writeable]) -> None + + Invoked with: """ + repr(np.array([ 7., 4., 1.], dtype='float32')) # noqa: E501 line too long + + +def test_nonunit_stride_to_python(): + assert np.all(m.diagonal(ref) == ref.diagonal()) + assert np.all(m.diagonal_1(ref) == ref.diagonal(1)) + for i in range(-5, 7): + assert np.all(m.diagonal_n(ref, i) == ref.diagonal(i)), "m.diagonal_n({})".format(i) + + assert np.all(m.block(ref, 2, 1, 3, 3) == ref[2:5, 1:4]) + assert np.all(m.block(ref, 1, 4, 4, 2) == ref[1:, 4:]) + assert np.all(m.block(ref, 1, 4, 3, 2) == ref[1:4, 4:]) + + +def test_eigen_ref_to_python(): + chols = [m.cholesky1, m.cholesky2, m.cholesky3, m.cholesky4] + for i, chol in enumerate(chols, start=1): + mymat = chol(np.array([[1., 2, 4], [2, 13, 23], [4, 23, 77]])) + assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), "cholesky{}".format(i) + + +def assign_both(a1, a2, r, c, v): + a1[r, c] = v + a2[r, c] = v + + +def array_copy_but_one(a, r, c, v): + z = np.array(a, copy=True) + z[r, c] = v + return z + + +def test_eigen_return_references(): + """Tests various ways of returning references and non-referencing copies""" + + master = np.ones((10, 10)) + a = m.ReturnTester() + a_get1 = a.get() + assert not a_get1.flags.owndata and a_get1.flags.writeable + assign_both(a_get1, master, 3, 3, 5) + a_get2 = a.get_ptr() + assert not a_get2.flags.owndata and a_get2.flags.writeable + assign_both(a_get1, master, 2, 3, 6) + + a_view1 = a.view() + assert not a_view1.flags.owndata and not a_view1.flags.writeable + with pytest.raises(ValueError): + a_view1[2, 3] = 4 + a_view2 = a.view_ptr() + assert not a_view2.flags.owndata and not a_view2.flags.writeable + with pytest.raises(ValueError): + a_view2[2, 3] = 4 + + a_copy1 = a.copy_get() + assert a_copy1.flags.owndata and a_copy1.flags.writeable + np.testing.assert_array_equal(a_copy1, master) + a_copy1[7, 7] = -44 # Shouldn't affect anything else + c1want = array_copy_but_one(master, 7, 7, -44) + a_copy2 = a.copy_view() + assert a_copy2.flags.owndata and a_copy2.flags.writeable + np.testing.assert_array_equal(a_copy2, master) + a_copy2[4, 4] = -22 # Shouldn't affect anything else + c2want = array_copy_but_one(master, 4, 4, -22) + + a_ref1 = a.ref() + assert not a_ref1.flags.owndata and a_ref1.flags.writeable + assign_both(a_ref1, master, 1, 1, 15) + a_ref2 = a.ref_const() + assert not a_ref2.flags.owndata and not a_ref2.flags.writeable + with pytest.raises(ValueError): + a_ref2[5, 5] = 33 + a_ref3 = a.ref_safe() + assert not a_ref3.flags.owndata and a_ref3.flags.writeable + assign_both(a_ref3, master, 0, 7, 99) + a_ref4 = a.ref_const_safe() + assert not a_ref4.flags.owndata and not a_ref4.flags.writeable + with pytest.raises(ValueError): + a_ref4[7, 0] = 987654321 + + a_copy3 = a.copy_ref() + assert a_copy3.flags.owndata and a_copy3.flags.writeable + np.testing.assert_array_equal(a_copy3, master) + a_copy3[8, 1] = 11 + c3want = array_copy_but_one(master, 8, 1, 11) + a_copy4 = a.copy_ref_const() + assert a_copy4.flags.owndata and a_copy4.flags.writeable + np.testing.assert_array_equal(a_copy4, master) + a_copy4[8, 4] = 88 + c4want = array_copy_but_one(master, 8, 4, 88) + + a_block1 = a.block(3, 3, 2, 2) + assert not a_block1.flags.owndata and a_block1.flags.writeable + a_block1[0, 0] = 55 + master[3, 3] = 55 + a_block2 = a.block_safe(2, 2, 3, 2) + assert not a_block2.flags.owndata and a_block2.flags.writeable + a_block2[2, 1] = -123 + master[4, 3] = -123 + a_block3 = a.block_const(6, 7, 4, 3) + assert not a_block3.flags.owndata and not a_block3.flags.writeable + with pytest.raises(ValueError): + a_block3[2, 2] = -44444 + + a_copy5 = a.copy_block(2, 2, 2, 3) + assert a_copy5.flags.owndata and a_copy5.flags.writeable + np.testing.assert_array_equal(a_copy5, master[2:4, 2:5]) + a_copy5[1, 1] = 777 + c5want = array_copy_but_one(master[2:4, 2:5], 1, 1, 777) + + a_corn1 = a.corners() + assert not a_corn1.flags.owndata and a_corn1.flags.writeable + a_corn1 *= 50 + a_corn1[1, 1] = 999 + master[0, 0] = 50 + master[0, 9] = 50 + master[9, 0] = 50 + master[9, 9] = 999 + a_corn2 = a.corners_const() + assert not a_corn2.flags.owndata and not a_corn2.flags.writeable + with pytest.raises(ValueError): + a_corn2[1, 0] = 51 + + # All of the changes made all the way along should be visible everywhere + # now (except for the copies, of course) + np.testing.assert_array_equal(a_get1, master) + np.testing.assert_array_equal(a_get2, master) + np.testing.assert_array_equal(a_view1, master) + np.testing.assert_array_equal(a_view2, master) + np.testing.assert_array_equal(a_ref1, master) + np.testing.assert_array_equal(a_ref2, master) + np.testing.assert_array_equal(a_ref3, master) + np.testing.assert_array_equal(a_ref4, master) + np.testing.assert_array_equal(a_block1, master[3:5, 3:5]) + np.testing.assert_array_equal(a_block2, master[2:5, 2:4]) + np.testing.assert_array_equal(a_block3, master[6:10, 7:10]) + np.testing.assert_array_equal(a_corn1, master[0::master.shape[0] - 1, 0::master.shape[1] - 1]) + np.testing.assert_array_equal(a_corn2, master[0::master.shape[0] - 1, 0::master.shape[1] - 1]) + + np.testing.assert_array_equal(a_copy1, c1want) + np.testing.assert_array_equal(a_copy2, c2want) + np.testing.assert_array_equal(a_copy3, c3want) + np.testing.assert_array_equal(a_copy4, c4want) + np.testing.assert_array_equal(a_copy5, c5want) + + +def assert_keeps_alive(cl, method, *args): + cstats = ConstructorStats.get(cl) + start_with = cstats.alive() + a = cl() + assert cstats.alive() == start_with + 1 + z = method(a, *args) + assert cstats.alive() == start_with + 1 + del a + # Here's the keep alive in action: + assert cstats.alive() == start_with + 1 + del z + # Keep alive should have expired: + assert cstats.alive() == start_with + + +def test_eigen_keepalive(): + a = m.ReturnTester() + cstats = ConstructorStats.get(m.ReturnTester) + assert cstats.alive() == 1 + unsafe = [a.ref(), a.ref_const(), a.block(1, 2, 3, 4)] + copies = [a.copy_get(), a.copy_view(), a.copy_ref(), a.copy_ref_const(), + a.copy_block(4, 3, 2, 1)] + del a + assert cstats.alive() == 0 + del unsafe + del copies + + for meth in [m.ReturnTester.get, m.ReturnTester.get_ptr, m.ReturnTester.view, + m.ReturnTester.view_ptr, m.ReturnTester.ref_safe, m.ReturnTester.ref_const_safe, + m.ReturnTester.corners, m.ReturnTester.corners_const]: + assert_keeps_alive(m.ReturnTester, meth) + + for meth in [m.ReturnTester.block_safe, m.ReturnTester.block_const]: + assert_keeps_alive(m.ReturnTester, meth, 4, 3, 2, 1) + + +def test_eigen_ref_mutators(): + """Tests Eigen's ability to mutate numpy values""" + + orig = np.array([[1., 2, 3], [4, 5, 6], [7, 8, 9]]) + zr = np.array(orig) + zc = np.array(orig, order='F') + m.add_rm(zr, 1, 0, 100) + assert np.all(zr == np.array([[1., 2, 3], [104, 5, 6], [7, 8, 9]])) + m.add_cm(zc, 1, 0, 200) + assert np.all(zc == np.array([[1., 2, 3], [204, 5, 6], [7, 8, 9]])) + + m.add_any(zr, 1, 0, 20) + assert np.all(zr == np.array([[1., 2, 3], [124, 5, 6], [7, 8, 9]])) + m.add_any(zc, 1, 0, 10) + assert np.all(zc == np.array([[1., 2, 3], [214, 5, 6], [7, 8, 9]])) + + # Can't reference a col-major array with a row-major Ref, and vice versa: + with pytest.raises(TypeError): + m.add_rm(zc, 1, 0, 1) + with pytest.raises(TypeError): + m.add_cm(zr, 1, 0, 1) + + # Overloads: + m.add1(zr, 1, 0, -100) + m.add2(zr, 1, 0, -20) + assert np.all(zr == orig) + m.add1(zc, 1, 0, -200) + m.add2(zc, 1, 0, -10) + assert np.all(zc == orig) + + # a non-contiguous slice (this won't work on either the row- or + # column-contiguous refs, but should work for the any) + cornersr = zr[0::2, 0::2] + cornersc = zc[0::2, 0::2] + + assert np.all(cornersr == np.array([[1., 3], [7, 9]])) + assert np.all(cornersc == np.array([[1., 3], [7, 9]])) + + with pytest.raises(TypeError): + m.add_rm(cornersr, 0, 1, 25) + with pytest.raises(TypeError): + m.add_cm(cornersr, 0, 1, 25) + with pytest.raises(TypeError): + m.add_rm(cornersc, 0, 1, 25) + with pytest.raises(TypeError): + m.add_cm(cornersc, 0, 1, 25) + m.add_any(cornersr, 0, 1, 25) + m.add_any(cornersc, 0, 1, 44) + assert np.all(zr == np.array([[1., 2, 28], [4, 5, 6], [7, 8, 9]])) + assert np.all(zc == np.array([[1., 2, 47], [4, 5, 6], [7, 8, 9]])) + + # You shouldn't be allowed to pass a non-writeable array to a mutating Eigen method: + zro = zr[0:4, 0:4] + zro.flags.writeable = False + with pytest.raises(TypeError): + m.add_rm(zro, 0, 0, 0) + with pytest.raises(TypeError): + m.add_any(zro, 0, 0, 0) + with pytest.raises(TypeError): + m.add1(zro, 0, 0, 0) + with pytest.raises(TypeError): + m.add2(zro, 0, 0, 0) + + # integer array shouldn't be passable to a double-matrix-accepting mutating func: + zi = np.array([[1, 2], [3, 4]]) + with pytest.raises(TypeError): + m.add_rm(zi) + + +def test_numpy_ref_mutators(): + """Tests numpy mutating Eigen matrices (for returned Eigen::Ref<...>s)""" + + m.reset_refs() # In case another test already changed it + + zc = m.get_cm_ref() + zcro = m.get_cm_const_ref() + zr = m.get_rm_ref() + zrro = m.get_rm_const_ref() + + assert [zc[1, 2], zcro[1, 2], zr[1, 2], zrro[1, 2]] == [23] * 4 + + assert not zc.flags.owndata and zc.flags.writeable + assert not zr.flags.owndata and zr.flags.writeable + assert not zcro.flags.owndata and not zcro.flags.writeable + assert not zrro.flags.owndata and not zrro.flags.writeable + + zc[1, 2] = 99 + expect = np.array([[11., 12, 13], [21, 22, 99], [31, 32, 33]]) + # We should have just changed zc, of course, but also zcro and the original eigen matrix + assert np.all(zc == expect) + assert np.all(zcro == expect) + assert np.all(m.get_cm_ref() == expect) + + zr[1, 2] = 99 + assert np.all(zr == expect) + assert np.all(zrro == expect) + assert np.all(m.get_rm_ref() == expect) + + # Make sure the readonly ones are numpy-readonly: + with pytest.raises(ValueError): + zcro[1, 2] = 6 + with pytest.raises(ValueError): + zrro[1, 2] = 6 + + # We should be able to explicitly copy like this (and since we're copying, + # the const should drop away) + y1 = np.array(m.get_cm_const_ref()) + + assert y1.flags.owndata and y1.flags.writeable + # We should get copies of the eigen data, which was modified above: + assert y1[1, 2] == 99 + y1[1, 2] += 12 + assert y1[1, 2] == 111 + assert zc[1, 2] == 99 # Make sure we aren't referencing the original + + +def test_both_ref_mutators(): + """Tests a complex chain of nested eigen/numpy references""" + + m.reset_refs() # In case another test already changed it + + z = m.get_cm_ref() # numpy -> eigen + z[0, 2] -= 3 + z2 = m.incr_matrix(z, 1) # numpy -> eigen -> numpy -> eigen + z2[1, 1] += 6 + z3 = m.incr_matrix(z, 2) # (numpy -> eigen)^3 + z3[2, 2] += -5 + z4 = m.incr_matrix(z, 3) # (numpy -> eigen)^4 + z4[1, 1] -= 1 + z5 = m.incr_matrix(z, 4) # (numpy -> eigen)^5 + z5[0, 0] = 0 + assert np.all(z == z2) + assert np.all(z == z3) + assert np.all(z == z4) + assert np.all(z == z5) + expect = np.array([[0., 22, 20], [31, 37, 33], [41, 42, 38]]) + assert np.all(z == expect) + + y = np.array(range(100), dtype='float64').reshape(10, 10) + y2 = m.incr_matrix_any(y, 10) # np -> eigen -> np + y3 = m.incr_matrix_any(y2[0::2, 0::2], -33) # np -> eigen -> np slice -> np -> eigen -> np + y4 = m.even_rows(y3) # numpy -> eigen slice -> (... y3) + y5 = m.even_cols(y4) # numpy -> eigen slice -> (... y4) + y6 = m.incr_matrix_any(y5, 1000) # numpy -> eigen -> (... y5) + + # Apply same mutations using just numpy: + yexpect = np.array(range(100), dtype='float64').reshape(10, 10) + yexpect += 10 + yexpect[0::2, 0::2] -= 33 + yexpect[0::4, 0::4] += 1000 + assert np.all(y6 == yexpect[0::4, 0::4]) + assert np.all(y5 == yexpect[0::4, 0::4]) + assert np.all(y4 == yexpect[0::4, 0::2]) + assert np.all(y3 == yexpect[0::2, 0::2]) + assert np.all(y2 == yexpect) + assert np.all(y == yexpect) + + +def test_nocopy_wrapper(): + # get_elem requires a column-contiguous matrix reference, but should be + # callable with other types of matrix (via copying): + int_matrix_colmajor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], order='F') + dbl_matrix_colmajor = np.array(int_matrix_colmajor, dtype='double', order='F', copy=True) + int_matrix_rowmajor = np.array(int_matrix_colmajor, order='C', copy=True) + dbl_matrix_rowmajor = np.array(int_matrix_rowmajor, dtype='double', order='C', copy=True) + + # All should be callable via get_elem: + assert m.get_elem(int_matrix_colmajor) == 8 + assert m.get_elem(dbl_matrix_colmajor) == 8 + assert m.get_elem(int_matrix_rowmajor) == 8 + assert m.get_elem(dbl_matrix_rowmajor) == 8 + + # All but the second should fail with m.get_elem_nocopy: + with pytest.raises(TypeError) as excinfo: + m.get_elem_nocopy(int_matrix_colmajor) + assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.f_contiguous' in str(excinfo.value)) + assert m.get_elem_nocopy(dbl_matrix_colmajor) == 8 + with pytest.raises(TypeError) as excinfo: + m.get_elem_nocopy(int_matrix_rowmajor) + assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.f_contiguous' in str(excinfo.value)) + with pytest.raises(TypeError) as excinfo: + m.get_elem_nocopy(dbl_matrix_rowmajor) + assert ('get_elem_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.f_contiguous' in str(excinfo.value)) + + # For the row-major test, we take a long matrix in row-major, so only the third is allowed: + with pytest.raises(TypeError) as excinfo: + m.get_elem_rm_nocopy(int_matrix_colmajor) + assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.c_contiguous' in str(excinfo.value)) + with pytest.raises(TypeError) as excinfo: + m.get_elem_rm_nocopy(dbl_matrix_colmajor) + assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.c_contiguous' in str(excinfo.value)) + assert m.get_elem_rm_nocopy(int_matrix_rowmajor) == 8 + with pytest.raises(TypeError) as excinfo: + m.get_elem_rm_nocopy(dbl_matrix_rowmajor) + assert ('get_elem_rm_nocopy(): incompatible function arguments.' in str(excinfo.value) and + ', flags.c_contiguous' in str(excinfo.value)) + + +def test_eigen_ref_life_support(): + """Ensure the lifetime of temporary arrays created by the `Ref` caster + + The `Ref` caster sometimes creates a copy which needs to stay alive. This needs to + happen both for directs casts (just the array) or indirectly (e.g. list of arrays). + """ + + a = np.full(shape=10, fill_value=8, dtype=np.int8) + assert m.get_elem_direct(a) == 8 + + list_of_a = [a] + assert m.get_elem_indirect(list_of_a) == 8 + + +def test_special_matrix_objects(): + assert np.all(m.incr_diag(7) == np.diag([1., 2, 3, 4, 5, 6, 7])) + + asymm = np.array([[ 1., 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12], + [13, 14, 15, 16]]) + symm_lower = np.array(asymm) + symm_upper = np.array(asymm) + for i in range(4): + for j in range(i + 1, 4): + symm_lower[i, j] = symm_lower[j, i] + symm_upper[j, i] = symm_upper[i, j] + + assert np.all(m.symmetric_lower(asymm) == symm_lower) + assert np.all(m.symmetric_upper(asymm) == symm_upper) + + +def test_dense_signature(doc): + assert doc(m.double_col) == """ + double_col(arg0: numpy.ndarray[numpy.float32[m, 1]]) -> numpy.ndarray[numpy.float32[m, 1]] + """ + assert doc(m.double_row) == """ + double_row(arg0: numpy.ndarray[numpy.float32[1, n]]) -> numpy.ndarray[numpy.float32[1, n]] + """ + assert doc(m.double_complex) == (""" + double_complex(arg0: numpy.ndarray[numpy.complex64[m, 1]])""" + """ -> numpy.ndarray[numpy.complex64[m, 1]] + """) + assert doc(m.double_mat_rm) == (""" + double_mat_rm(arg0: numpy.ndarray[numpy.float32[m, n]])""" + """ -> numpy.ndarray[numpy.float32[m, n]] + """) + + +def test_named_arguments(): + a = np.array([[1.0, 2], [3, 4], [5, 6]]) + b = np.ones((2, 1)) + + assert np.all(m.matrix_multiply(a, b) == np.array([[3.], [7], [11]])) + assert np.all(m.matrix_multiply(A=a, B=b) == np.array([[3.], [7], [11]])) + assert np.all(m.matrix_multiply(B=b, A=a) == np.array([[3.], [7], [11]])) + + with pytest.raises(ValueError) as excinfo: + m.matrix_multiply(b, a) + assert str(excinfo.value) == 'Nonconformable matrices!' + + with pytest.raises(ValueError) as excinfo: + m.matrix_multiply(A=b, B=a) + assert str(excinfo.value) == 'Nonconformable matrices!' + + with pytest.raises(ValueError) as excinfo: + m.matrix_multiply(B=a, A=b) + assert str(excinfo.value) == 'Nonconformable matrices!' + + +def test_sparse(): + pytest.importorskip("scipy") + assert_sparse_equal_ref(m.sparse_r()) + assert_sparse_equal_ref(m.sparse_c()) + assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_r())) + assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_c())) + assert_sparse_equal_ref(m.sparse_copy_r(m.sparse_c())) + assert_sparse_equal_ref(m.sparse_copy_c(m.sparse_r())) + + +def test_sparse_signature(doc): + pytest.importorskip("scipy") + assert doc(m.sparse_copy_r) == """ + sparse_copy_r(arg0: scipy.sparse.csr_matrix[numpy.float32]) -> scipy.sparse.csr_matrix[numpy.float32] + """ # noqa: E501 line too long + assert doc(m.sparse_copy_c) == """ + sparse_copy_c(arg0: scipy.sparse.csc_matrix[numpy.float32]) -> scipy.sparse.csc_matrix[numpy.float32] + """ # noqa: E501 line too long + + +def test_issue738(): + """Ignore strides on a length-1 dimension (even if they would be incompatible length > 1)""" + assert np.all(m.iss738_f1(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]])) + assert np.all(m.iss738_f1(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]])) + + assert np.all(m.iss738_f2(np.array([[1., 2, 3]])) == np.array([[1., 102, 203]])) + assert np.all(m.iss738_f2(np.array([[1.], [2], [3]])) == np.array([[1.], [12], [23]])) + + +def test_issue1105(): + """Issue 1105: 1xN or Nx1 input arrays weren't accepted for eigen + compile-time row vectors or column vector""" + assert m.iss1105_row(np.ones((1, 7))) + assert m.iss1105_col(np.ones((7, 1))) + + # These should still fail (incompatible dimensions): + with pytest.raises(TypeError) as excinfo: + m.iss1105_row(np.ones((7, 1))) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.iss1105_col(np.ones((1, 7))) + assert "incompatible function arguments" in str(excinfo.value) + + +def test_custom_operator_new(): + """Using Eigen types as member variables requires a class-specific + operator new with proper alignment""" + + o = m.CustomOperatorNew() + np.testing.assert_allclose(o.a, 0.0) + np.testing.assert_allclose(o.b.diagonal(), 1.0) diff --git a/diffvg/pybind11/tests/test_embed/CMakeLists.txt b/diffvg/pybind11/tests/test_embed/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e298fa7e44684cfae77184e6218f83b62d90c1c --- /dev/null +++ b/diffvg/pybind11/tests/test_embed/CMakeLists.txt @@ -0,0 +1,43 @@ +if("${PYTHON_MODULE_EXTENSION}" MATCHES "pypy" OR "${Python_INTERPRETER_ID}" STREQUAL "PyPy") + add_custom_target(cpptest) # Dummy target on PyPy. Embedding is not supported. + set(_suppress_unused_variable_warning "${DOWNLOAD_CATCH}") + return() +endif() + +find_package(Catch 2.13.0) + +if(CATCH_FOUND) + message(STATUS "Building interpreter tests using Catch v${CATCH_VERSION}") +else() + message(STATUS "Catch not detected. Interpreter tests will be skipped. Install Catch headers" + " manually or use `cmake -DDOWNLOAD_CATCH=ON` to fetch them automatically.") + return() +endif() + +find_package(Threads REQUIRED) + +add_executable(test_embed catch.cpp test_interpreter.cpp) +pybind11_enable_warnings(test_embed) + +target_link_libraries(test_embed PRIVATE pybind11::embed Catch2::Catch2 Threads::Threads) + +if(NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) + file(COPY test_interpreter.py DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +endif() + +add_custom_target( + cpptest + COMMAND "$" + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}") + +pybind11_add_module(external_module THIN_LTO external_module.cpp) +set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY + "${CMAKE_CURRENT_BINARY_DIR}") +foreach(config ${CMAKE_CONFIGURATION_TYPES}) + string(TOUPPER ${config} config) + set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY_${config} + "${CMAKE_CURRENT_BINARY_DIR}") +endforeach() +add_dependencies(cpptest external_module) + +add_dependencies(check cpptest) diff --git a/diffvg/pybind11/tests/test_embed/catch.cpp b/diffvg/pybind11/tests/test_embed/catch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd137385cb32250b8640169934fb96aa5e80f069 --- /dev/null +++ b/diffvg/pybind11/tests/test_embed/catch.cpp @@ -0,0 +1,22 @@ +// The Catch implementation is compiled here. This is a standalone +// translation unit to avoid recompiling it for every test change. + +#include + +#ifdef _MSC_VER +// Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch +// 2.0.1; this should be fixed in the next catch release after 2.0.1). +# pragma warning(disable: 4996) +#endif + +#define CATCH_CONFIG_RUNNER +#include + +namespace py = pybind11; + +int main(int argc, char *argv[]) { + py::scoped_interpreter guard{}; + auto result = Catch::Session().run(argc, argv); + + return result < 0xff ? result : 0xff; +} diff --git a/diffvg/pybind11/tests/test_embed/external_module.cpp b/diffvg/pybind11/tests/test_embed/external_module.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9a6058b179400545479412e5549d7a54f94caeb --- /dev/null +++ b/diffvg/pybind11/tests/test_embed/external_module.cpp @@ -0,0 +1,23 @@ +#include + +namespace py = pybind11; + +/* Simple test module/test class to check that the referenced internals data of external pybind11 + * modules aren't preserved over a finalize/initialize. + */ + +PYBIND11_MODULE(external_module, m) { + class A { + public: + A(int value) : v{value} {}; + int v; + }; + + py::class_(m, "A") + .def(py::init()) + .def_readwrite("value", &A::v); + + m.def("internals_at", []() { + return reinterpret_cast(&py::detail::get_internals()); + }); +} diff --git a/diffvg/pybind11/tests/test_embed/test_interpreter.cpp b/diffvg/pybind11/tests/test_embed/test_interpreter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..222bd565fbffd6484db09876ae9cceabffcb69cd --- /dev/null +++ b/diffvg/pybind11/tests/test_embed/test_interpreter.cpp @@ -0,0 +1,284 @@ +#include + +#ifdef _MSC_VER +// Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch +// 2.0.1; this should be fixed in the next catch release after 2.0.1). +# pragma warning(disable: 4996) +#endif + +#include + +#include +#include +#include + +namespace py = pybind11; +using namespace py::literals; + +class Widget { +public: + Widget(std::string message) : message(message) { } + virtual ~Widget() = default; + + std::string the_message() const { return message; } + virtual int the_answer() const = 0; + +private: + std::string message; +}; + +class PyWidget final : public Widget { + using Widget::Widget; + + int the_answer() const override { PYBIND11_OVERLOAD_PURE(int, Widget, the_answer); } +}; + +PYBIND11_EMBEDDED_MODULE(widget_module, m) { + py::class_(m, "Widget") + .def(py::init()) + .def_property_readonly("the_message", &Widget::the_message); + + m.def("add", [](int i, int j) { return i + j; }); +} + +PYBIND11_EMBEDDED_MODULE(throw_exception, ) { + throw std::runtime_error("C++ Error"); +} + +PYBIND11_EMBEDDED_MODULE(throw_error_already_set, ) { + auto d = py::dict(); + d["missing"].cast(); +} + +TEST_CASE("Pass classes and data between modules defined in C++ and Python") { + auto module = py::module::import("test_interpreter"); + REQUIRE(py::hasattr(module, "DerivedWidget")); + + auto locals = py::dict("hello"_a="Hello, World!", "x"_a=5, **module.attr("__dict__")); + py::exec(R"( + widget = DerivedWidget("{} - {}".format(hello, x)) + message = widget.the_message + )", py::globals(), locals); + REQUIRE(locals["message"].cast() == "Hello, World! - 5"); + + auto py_widget = module.attr("DerivedWidget")("The question"); + auto message = py_widget.attr("the_message"); + REQUIRE(message.cast() == "The question"); + + const auto &cpp_widget = py_widget.cast(); + REQUIRE(cpp_widget.the_answer() == 42); +} + +TEST_CASE("Import error handling") { + REQUIRE_NOTHROW(py::module::import("widget_module")); + REQUIRE_THROWS_WITH(py::module::import("throw_exception"), + "ImportError: C++ Error"); + REQUIRE_THROWS_WITH(py::module::import("throw_error_already_set"), + Catch::Contains("ImportError: KeyError")); +} + +TEST_CASE("There can be only one interpreter") { + static_assert(std::is_move_constructible::value, ""); + static_assert(!std::is_move_assignable::value, ""); + static_assert(!std::is_copy_constructible::value, ""); + static_assert(!std::is_copy_assignable::value, ""); + + REQUIRE_THROWS_WITH(py::initialize_interpreter(), "The interpreter is already running"); + REQUIRE_THROWS_WITH(py::scoped_interpreter(), "The interpreter is already running"); + + py::finalize_interpreter(); + REQUIRE_NOTHROW(py::scoped_interpreter()); + { + auto pyi1 = py::scoped_interpreter(); + auto pyi2 = std::move(pyi1); + } + py::initialize_interpreter(); +} + +bool has_pybind11_internals_builtin() { + auto builtins = py::handle(PyEval_GetBuiltins()); + return builtins.contains(PYBIND11_INTERNALS_ID); +}; + +bool has_pybind11_internals_static() { + auto **&ipp = py::detail::get_internals_pp(); + return ipp && *ipp; +} + +TEST_CASE("Restart the interpreter") { + // Verify pre-restart state. + REQUIRE(py::module::import("widget_module").attr("add")(1, 2).cast() == 3); + REQUIRE(has_pybind11_internals_builtin()); + REQUIRE(has_pybind11_internals_static()); + REQUIRE(py::module::import("external_module").attr("A")(123).attr("value").cast() == 123); + + // local and foreign module internals should point to the same internals: + REQUIRE(reinterpret_cast(*py::detail::get_internals_pp()) == + py::module::import("external_module").attr("internals_at")().cast()); + + // Restart the interpreter. + py::finalize_interpreter(); + REQUIRE(Py_IsInitialized() == 0); + + py::initialize_interpreter(); + REQUIRE(Py_IsInitialized() == 1); + + // Internals are deleted after a restart. + REQUIRE_FALSE(has_pybind11_internals_builtin()); + REQUIRE_FALSE(has_pybind11_internals_static()); + pybind11::detail::get_internals(); + REQUIRE(has_pybind11_internals_builtin()); + REQUIRE(has_pybind11_internals_static()); + REQUIRE(reinterpret_cast(*py::detail::get_internals_pp()) == + py::module::import("external_module").attr("internals_at")().cast()); + + // Make sure that an interpreter with no get_internals() created until finalize still gets the + // internals destroyed + py::finalize_interpreter(); + py::initialize_interpreter(); + bool ran = false; + py::module::import("__main__").attr("internals_destroy_test") = + py::capsule(&ran, [](void *ran) { py::detail::get_internals(); *static_cast(ran) = true; }); + REQUIRE_FALSE(has_pybind11_internals_builtin()); + REQUIRE_FALSE(has_pybind11_internals_static()); + REQUIRE_FALSE(ran); + py::finalize_interpreter(); + REQUIRE(ran); + py::initialize_interpreter(); + REQUIRE_FALSE(has_pybind11_internals_builtin()); + REQUIRE_FALSE(has_pybind11_internals_static()); + + // C++ modules can be reloaded. + auto cpp_module = py::module::import("widget_module"); + REQUIRE(cpp_module.attr("add")(1, 2).cast() == 3); + + // C++ type information is reloaded and can be used in python modules. + auto py_module = py::module::import("test_interpreter"); + auto py_widget = py_module.attr("DerivedWidget")("Hello after restart"); + REQUIRE(py_widget.attr("the_message").cast() == "Hello after restart"); +} + +TEST_CASE("Subinterpreter") { + // Add tags to the modules in the main interpreter and test the basics. + py::module::import("__main__").attr("main_tag") = "main interpreter"; + { + auto m = py::module::import("widget_module"); + m.attr("extension_module_tag") = "added to module in main interpreter"; + + REQUIRE(m.attr("add")(1, 2).cast() == 3); + } + REQUIRE(has_pybind11_internals_builtin()); + REQUIRE(has_pybind11_internals_static()); + + /// Create and switch to a subinterpreter. + auto main_tstate = PyThreadState_Get(); + auto sub_tstate = Py_NewInterpreter(); + + // Subinterpreters get their own copy of builtins. detail::get_internals() still + // works by returning from the static variable, i.e. all interpreters share a single + // global pybind11::internals; + REQUIRE_FALSE(has_pybind11_internals_builtin()); + REQUIRE(has_pybind11_internals_static()); + + // Modules tags should be gone. + REQUIRE_FALSE(py::hasattr(py::module::import("__main__"), "tag")); + { + auto m = py::module::import("widget_module"); + REQUIRE_FALSE(py::hasattr(m, "extension_module_tag")); + + // Function bindings should still work. + REQUIRE(m.attr("add")(1, 2).cast() == 3); + } + + // Restore main interpreter. + Py_EndInterpreter(sub_tstate); + PyThreadState_Swap(main_tstate); + + REQUIRE(py::hasattr(py::module::import("__main__"), "main_tag")); + REQUIRE(py::hasattr(py::module::import("widget_module"), "extension_module_tag")); +} + +TEST_CASE("Execution frame") { + // When the interpreter is embedded, there is no execution frame, but `py::exec` + // should still function by using reasonable globals: `__main__.__dict__`. + py::exec("var = dict(number=42)"); + REQUIRE(py::globals()["var"]["number"].cast() == 42); +} + +TEST_CASE("Threads") { + // Restart interpreter to ensure threads are not initialized + py::finalize_interpreter(); + py::initialize_interpreter(); + REQUIRE_FALSE(has_pybind11_internals_static()); + + constexpr auto num_threads = 10; + auto locals = py::dict("count"_a=0); + + { + py::gil_scoped_release gil_release{}; + REQUIRE(has_pybind11_internals_static()); + + auto threads = std::vector(); + for (auto i = 0; i < num_threads; ++i) { + threads.emplace_back([&]() { + py::gil_scoped_acquire gil{}; + locals["count"] = locals["count"].cast() + 1; + }); + } + + for (auto &thread : threads) { + thread.join(); + } + } + + REQUIRE(locals["count"].cast() == num_threads); +} + +// Scope exit utility https://stackoverflow.com/a/36644501/7255855 +struct scope_exit { + std::function f_; + explicit scope_exit(std::function f) noexcept : f_(std::move(f)) {} + ~scope_exit() { if (f_) f_(); } +}; + +TEST_CASE("Reload module from file") { + // Disable generation of cached bytecode (.pyc files) for this test, otherwise + // Python might pick up an old version from the cache instead of the new versions + // of the .py files generated below + auto sys = py::module::import("sys"); + bool dont_write_bytecode = sys.attr("dont_write_bytecode").cast(); + sys.attr("dont_write_bytecode") = true; + // Reset the value at scope exit + scope_exit reset_dont_write_bytecode([&]() { + sys.attr("dont_write_bytecode") = dont_write_bytecode; + }); + + std::string module_name = "test_module_reload"; + std::string module_file = module_name + ".py"; + + // Create the module .py file + std::ofstream test_module(module_file); + test_module << "def test():\n"; + test_module << " return 1\n"; + test_module.close(); + // Delete the file at scope exit + scope_exit delete_module_file([&]() { + std::remove(module_file.c_str()); + }); + + // Import the module from file + auto module = py::module::import(module_name.c_str()); + int result = module.attr("test")().cast(); + REQUIRE(result == 1); + + // Update the module .py file with a small change + test_module.open(module_file); + test_module << "def test():\n"; + test_module << " return 2\n"; + test_module.close(); + + // Reload the module + module.reload(); + result = module.attr("test")().cast(); + REQUIRE(result == 2); +} diff --git a/diffvg/pybind11/tests/test_embed/test_interpreter.py b/diffvg/pybind11/tests/test_embed/test_interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..6174ede446f0356fbdf61aee4136535a78a32479 --- /dev/null +++ b/diffvg/pybind11/tests/test_embed/test_interpreter.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from widget_module import Widget + + +class DerivedWidget(Widget): + def __init__(self, message): + super(DerivedWidget, self).__init__(message) + + def the_answer(self): + return 42 diff --git a/diffvg/pybind11/tests/test_enum.cpp b/diffvg/pybind11/tests/test_enum.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3153089208c964346e2fc39cafad8d0b372f1154 --- /dev/null +++ b/diffvg/pybind11/tests/test_enum.cpp @@ -0,0 +1,87 @@ +/* + tests/test_enums.cpp -- enumerations + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +TEST_SUBMODULE(enums, m) { + // test_unscoped_enum + enum UnscopedEnum { + EOne = 1, + ETwo, + EThree + }; + py::enum_(m, "UnscopedEnum", py::arithmetic(), "An unscoped enumeration") + .value("EOne", EOne, "Docstring for EOne") + .value("ETwo", ETwo, "Docstring for ETwo") + .value("EThree", EThree, "Docstring for EThree") + .export_values(); + + // test_scoped_enum + enum class ScopedEnum { + Two = 2, + Three + }; + py::enum_(m, "ScopedEnum", py::arithmetic()) + .value("Two", ScopedEnum::Two) + .value("Three", ScopedEnum::Three); + + m.def("test_scoped_enum", [](ScopedEnum z) { + return "ScopedEnum::" + std::string(z == ScopedEnum::Two ? "Two" : "Three"); + }); + + // test_binary_operators + enum Flags { + Read = 4, + Write = 2, + Execute = 1 + }; + py::enum_(m, "Flags", py::arithmetic()) + .value("Read", Flags::Read) + .value("Write", Flags::Write) + .value("Execute", Flags::Execute) + .export_values(); + + // test_implicit_conversion + class ClassWithUnscopedEnum { + public: + enum EMode { + EFirstMode = 1, + ESecondMode + }; + + static EMode test_function(EMode mode) { + return mode; + } + }; + py::class_ exenum_class(m, "ClassWithUnscopedEnum"); + exenum_class.def_static("test_function", &ClassWithUnscopedEnum::test_function); + py::enum_(exenum_class, "EMode") + .value("EFirstMode", ClassWithUnscopedEnum::EFirstMode) + .value("ESecondMode", ClassWithUnscopedEnum::ESecondMode) + .export_values(); + + // test_enum_to_int + m.def("test_enum_to_int", [](int) { }); + m.def("test_enum_to_uint", [](uint32_t) { }); + m.def("test_enum_to_long_long", [](long long) { }); + + // test_duplicate_enum_name + enum SimpleEnum + { + ONE, TWO, THREE + }; + + m.def("register_bad_enum", [m]() { + py::enum_(m, "SimpleEnum") + .value("ONE", SimpleEnum::ONE) //NOTE: all value function calls are called with the same first parameter value + .value("ONE", SimpleEnum::TWO) + .value("ONE", SimpleEnum::THREE) + .export_values(); + }); +} diff --git a/diffvg/pybind11/tests/test_enum.py b/diffvg/pybind11/tests/test_enum.py new file mode 100644 index 0000000000000000000000000000000000000000..bfaa193e9ba86295e249c20b96a150ce2ca0b88a --- /dev/null +++ b/diffvg/pybind11/tests/test_enum.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import enums as m + + +def test_unscoped_enum(): + assert str(m.UnscopedEnum.EOne) == "UnscopedEnum.EOne" + assert str(m.UnscopedEnum.ETwo) == "UnscopedEnum.ETwo" + assert str(m.EOne) == "UnscopedEnum.EOne" + + # name property + assert m.UnscopedEnum.EOne.name == "EOne" + assert m.UnscopedEnum.ETwo.name == "ETwo" + assert m.EOne.name == "EOne" + # name readonly + with pytest.raises(AttributeError): + m.UnscopedEnum.EOne.name = "" + # name returns a copy + foo = m.UnscopedEnum.EOne.name + foo = "bar" + assert m.UnscopedEnum.EOne.name == "EOne" + + # __members__ property + assert m.UnscopedEnum.__members__ == \ + {"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree} + # __members__ readonly + with pytest.raises(AttributeError): + m.UnscopedEnum.__members__ = {} + # __members__ returns a copy + foo = m.UnscopedEnum.__members__ + foo["bar"] = "baz" + assert m.UnscopedEnum.__members__ == \ + {"EOne": m.UnscopedEnum.EOne, "ETwo": m.UnscopedEnum.ETwo, "EThree": m.UnscopedEnum.EThree} + + for docstring_line in '''An unscoped enumeration + +Members: + + EOne : Docstring for EOne + + ETwo : Docstring for ETwo + + EThree : Docstring for EThree'''.split('\n'): + assert docstring_line in m.UnscopedEnum.__doc__ + + # Unscoped enums will accept ==/!= int comparisons + y = m.UnscopedEnum.ETwo + assert y == 2 + assert 2 == y + assert y != 3 + assert 3 != y + # Compare with None + assert (y != None) # noqa: E711 + assert not (y == None) # noqa: E711 + # Compare with an object + assert (y != object()) + assert not (y == object()) + # Compare with string + assert y != "2" + assert "2" != y + assert not ("2" == y) + assert not (y == "2") + + with pytest.raises(TypeError): + y < object() + + with pytest.raises(TypeError): + y <= object() + + with pytest.raises(TypeError): + y > object() + + with pytest.raises(TypeError): + y >= object() + + with pytest.raises(TypeError): + y | object() + + with pytest.raises(TypeError): + y & object() + + with pytest.raises(TypeError): + y ^ object() + + assert int(m.UnscopedEnum.ETwo) == 2 + assert str(m.UnscopedEnum(2)) == "UnscopedEnum.ETwo" + + # order + assert m.UnscopedEnum.EOne < m.UnscopedEnum.ETwo + assert m.UnscopedEnum.EOne < 2 + assert m.UnscopedEnum.ETwo > m.UnscopedEnum.EOne + assert m.UnscopedEnum.ETwo > 1 + assert m.UnscopedEnum.ETwo <= 2 + assert m.UnscopedEnum.ETwo >= 2 + assert m.UnscopedEnum.EOne <= m.UnscopedEnum.ETwo + assert m.UnscopedEnum.EOne <= 2 + assert m.UnscopedEnum.ETwo >= m.UnscopedEnum.EOne + assert m.UnscopedEnum.ETwo >= 1 + assert not (m.UnscopedEnum.ETwo < m.UnscopedEnum.EOne) + assert not (2 < m.UnscopedEnum.EOne) + + # arithmetic + assert m.UnscopedEnum.EOne & m.UnscopedEnum.EThree == m.UnscopedEnum.EOne + assert m.UnscopedEnum.EOne | m.UnscopedEnum.ETwo == m.UnscopedEnum.EThree + assert m.UnscopedEnum.EOne ^ m.UnscopedEnum.EThree == m.UnscopedEnum.ETwo + + +def test_scoped_enum(): + assert m.test_scoped_enum(m.ScopedEnum.Three) == "ScopedEnum::Three" + z = m.ScopedEnum.Two + assert m.test_scoped_enum(z) == "ScopedEnum::Two" + + # Scoped enums will *NOT* accept ==/!= int comparisons (Will always return False) + assert not z == 3 + assert not 3 == z + assert z != 3 + assert 3 != z + # Compare with None + assert (z != None) # noqa: E711 + assert not (z == None) # noqa: E711 + # Compare with an object + assert (z != object()) + assert not (z == object()) + # Scoped enums will *NOT* accept >, <, >= and <= int comparisons (Will throw exceptions) + with pytest.raises(TypeError): + z > 3 + with pytest.raises(TypeError): + z < 3 + with pytest.raises(TypeError): + z >= 3 + with pytest.raises(TypeError): + z <= 3 + + # order + assert m.ScopedEnum.Two < m.ScopedEnum.Three + assert m.ScopedEnum.Three > m.ScopedEnum.Two + assert m.ScopedEnum.Two <= m.ScopedEnum.Three + assert m.ScopedEnum.Two <= m.ScopedEnum.Two + assert m.ScopedEnum.Two >= m.ScopedEnum.Two + assert m.ScopedEnum.Three >= m.ScopedEnum.Two + + +def test_implicit_conversion(): + assert str(m.ClassWithUnscopedEnum.EMode.EFirstMode) == "EMode.EFirstMode" + assert str(m.ClassWithUnscopedEnum.EFirstMode) == "EMode.EFirstMode" + + f = m.ClassWithUnscopedEnum.test_function + first = m.ClassWithUnscopedEnum.EFirstMode + second = m.ClassWithUnscopedEnum.ESecondMode + + assert f(first) == 1 + + assert f(first) == f(first) + assert not f(first) != f(first) + + assert f(first) != f(second) + assert not f(first) == f(second) + + assert f(first) == int(f(first)) + assert not f(first) != int(f(first)) + + assert f(first) != int(f(second)) + assert not f(first) == int(f(second)) + + # noinspection PyDictCreation + x = {f(first): 1, f(second): 2} + x[f(first)] = 3 + x[f(second)] = 4 + # Hashing test + assert str(x) == "{EMode.EFirstMode: 3, EMode.ESecondMode: 4}" + + +def test_binary_operators(): + assert int(m.Flags.Read) == 4 + assert int(m.Flags.Write) == 2 + assert int(m.Flags.Execute) == 1 + assert int(m.Flags.Read | m.Flags.Write | m.Flags.Execute) == 7 + assert int(m.Flags.Read | m.Flags.Write) == 6 + assert int(m.Flags.Read | m.Flags.Execute) == 5 + assert int(m.Flags.Write | m.Flags.Execute) == 3 + assert int(m.Flags.Write | 1) == 3 + assert ~m.Flags.Write == -3 + + state = m.Flags.Read | m.Flags.Write + assert (state & m.Flags.Read) != 0 + assert (state & m.Flags.Write) != 0 + assert (state & m.Flags.Execute) == 0 + assert (state & 1) == 0 + + state2 = ~state + assert state2 == -7 + assert int(state ^ state2) == -1 + + +def test_enum_to_int(): + m.test_enum_to_int(m.Flags.Read) + m.test_enum_to_int(m.ClassWithUnscopedEnum.EMode.EFirstMode) + m.test_enum_to_uint(m.Flags.Read) + m.test_enum_to_uint(m.ClassWithUnscopedEnum.EMode.EFirstMode) + m.test_enum_to_long_long(m.Flags.Read) + m.test_enum_to_long_long(m.ClassWithUnscopedEnum.EMode.EFirstMode) + + +def test_duplicate_enum_name(): + with pytest.raises(ValueError) as excinfo: + m.register_bad_enum() + assert str(excinfo.value) == 'SimpleEnum: element "ONE" already exists!' diff --git a/diffvg/pybind11/tests/test_eval.cpp b/diffvg/pybind11/tests/test_eval.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e0948219117df7d8fd64dba3130d36e1307f272b --- /dev/null +++ b/diffvg/pybind11/tests/test_eval.cpp @@ -0,0 +1,91 @@ +/* + tests/test_eval.cpp -- Usage of eval() and eval_file() + + Copyright (c) 2016 Klemens D. Morgenstern + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + + +#include +#include "pybind11_tests.h" + +TEST_SUBMODULE(eval_, m) { + // test_evals + + auto global = py::dict(py::module::import("__main__").attr("__dict__")); + + m.def("test_eval_statements", [global]() { + auto local = py::dict(); + local["call_test"] = py::cpp_function([&]() -> int { + return 42; + }); + + // Regular string literal + py::exec( + "message = 'Hello World!'\n" + "x = call_test()", + global, local + ); + + // Multi-line raw string literal + py::exec(R"( + if x == 42: + print(message) + else: + raise RuntimeError + )", global, local + ); + auto x = local["x"].cast(); + + return x == 42; + }); + + m.def("test_eval", [global]() { + auto local = py::dict(); + local["x"] = py::int_(42); + auto x = py::eval("x", global, local); + return x.cast() == 42; + }); + + m.def("test_eval_single_statement", []() { + auto local = py::dict(); + local["call_test"] = py::cpp_function([&]() -> int { + return 42; + }); + + auto result = py::eval("x = call_test()", py::dict(), local); + auto x = local["x"].cast(); + return result.is_none() && x == 42; + }); + + m.def("test_eval_file", [global](py::str filename) { + auto local = py::dict(); + local["y"] = py::int_(43); + + int val_out; + local["call_test2"] = py::cpp_function([&](int value) { val_out = value; }); + + auto result = py::eval_file(filename, global, local); + return val_out == 43 && result.is_none(); + }); + + m.def("test_eval_failure", []() { + try { + py::eval("nonsense code ..."); + } catch (py::error_already_set &) { + return true; + } + return false; + }); + + m.def("test_eval_file_failure", []() { + try { + py::eval_file("non-existing file"); + } catch (std::exception &) { + return true; + } + return false; + }); +} diff --git a/diffvg/pybind11/tests/test_eval.py b/diffvg/pybind11/tests/test_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..b6f9d1881db8d3154c73226414fa87f257a20bc8 --- /dev/null +++ b/diffvg/pybind11/tests/test_eval.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +import os + +import pytest + +import env # noqa: F401 + +from pybind11_tests import eval_ as m + + +def test_evals(capture): + with capture: + assert m.test_eval_statements() + assert capture == "Hello World!" + + assert m.test_eval() + assert m.test_eval_single_statement() + + assert m.test_eval_failure() + + +@pytest.mark.xfail("env.PYPY and not env.PY2", raises=RuntimeError) +def test_eval_file(): + filename = os.path.join(os.path.dirname(__file__), "test_eval_call.py") + assert m.test_eval_file(filename) + + assert m.test_eval_file_failure() diff --git a/diffvg/pybind11/tests/test_eval_call.py b/diffvg/pybind11/tests/test_eval_call.py new file mode 100644 index 0000000000000000000000000000000000000000..d42a0a6d3062777557e23ca40e5881f97b43f6a9 --- /dev/null +++ b/diffvg/pybind11/tests/test_eval_call.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# This file is called from 'test_eval.py' + +if 'call_test2' in locals(): + call_test2(y) # noqa: F821 undefined name diff --git a/diffvg/pybind11/tests/test_exceptions.cpp b/diffvg/pybind11/tests/test_exceptions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..537819d987a46746cf65ccb812c312219fcd41ba --- /dev/null +++ b/diffvg/pybind11/tests/test_exceptions.cpp @@ -0,0 +1,224 @@ +/* + tests/test_custom-exceptions.cpp -- exception translation + + Copyright (c) 2016 Pim Schellart + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +// A type that should be raised as an exception in Python +class MyException : public std::exception { +public: + explicit MyException(const char * m) : message{m} {} + virtual const char * what() const noexcept override {return message.c_str();} +private: + std::string message = ""; +}; + +// A type that should be translated to a standard Python exception +class MyException2 : public std::exception { +public: + explicit MyException2(const char * m) : message{m} {} + virtual const char * what() const noexcept override {return message.c_str();} +private: + std::string message = ""; +}; + +// A type that is not derived from std::exception (and is thus unknown) +class MyException3 { +public: + explicit MyException3(const char * m) : message{m} {} + virtual const char * what() const noexcept {return message.c_str();} +private: + std::string message = ""; +}; + +// A type that should be translated to MyException +// and delegated to its exception translator +class MyException4 : public std::exception { +public: + explicit MyException4(const char * m) : message{m} {} + virtual const char * what() const noexcept override {return message.c_str();} +private: + std::string message = ""; +}; + + +// Like the above, but declared via the helper function +class MyException5 : public std::logic_error { +public: + explicit MyException5(const std::string &what) : std::logic_error(what) {} +}; + +// Inherits from MyException5 +class MyException5_1 : public MyException5 { + using MyException5::MyException5; +}; + +struct PythonCallInDestructor { + PythonCallInDestructor(const py::dict &d) : d(d) {} + ~PythonCallInDestructor() { d["good"] = true; } + + py::dict d; +}; + + + +struct PythonAlreadySetInDestructor { + PythonAlreadySetInDestructor(const py::str &s) : s(s) {} + ~PythonAlreadySetInDestructor() { + py::dict foo; + try { + // Assign to a py::object to force read access of nonexistent dict entry + py::object o = foo["bar"]; + } + catch (py::error_already_set& ex) { + ex.discard_as_unraisable(s); + } + } + + py::str s; +}; + + +TEST_SUBMODULE(exceptions, m) { + m.def("throw_std_exception", []() { + throw std::runtime_error("This exception was intentionally thrown."); + }); + + // make a new custom exception and use it as a translation target + static py::exception ex(m, "MyException"); + py::register_exception_translator([](std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (const MyException &e) { + // Set MyException as the active python error + ex(e.what()); + } + }); + + // register new translator for MyException2 + // no need to store anything here because this type will + // never by visible from Python + py::register_exception_translator([](std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (const MyException2 &e) { + // Translate this exception to a standard RuntimeError + PyErr_SetString(PyExc_RuntimeError, e.what()); + } + }); + + // register new translator for MyException4 + // which will catch it and delegate to the previously registered + // translator for MyException by throwing a new exception + py::register_exception_translator([](std::exception_ptr p) { + try { + if (p) std::rethrow_exception(p); + } catch (const MyException4 &e) { + throw MyException(e.what()); + } + }); + + // A simple exception translation: + auto ex5 = py::register_exception(m, "MyException5"); + // A slightly more complicated one that declares MyException5_1 as a subclass of MyException5 + py::register_exception(m, "MyException5_1", ex5.ptr()); + + m.def("throws1", []() { throw MyException("this error should go to a custom type"); }); + m.def("throws2", []() { throw MyException2("this error should go to a standard Python exception"); }); + m.def("throws3", []() { throw MyException3("this error cannot be translated"); }); + m.def("throws4", []() { throw MyException4("this error is rethrown"); }); + m.def("throws5", []() { throw MyException5("this is a helper-defined translated exception"); }); + m.def("throws5_1", []() { throw MyException5_1("MyException5 subclass"); }); + m.def("throws_logic_error", []() { throw std::logic_error("this error should fall through to the standard handler"); }); + m.def("throws_overflow_error", []() {throw std::overflow_error(""); }); + m.def("exception_matches", []() { + py::dict foo; + try { + // Assign to a py::object to force read access of nonexistent dict entry + py::object o = foo["bar"]; + } + catch (py::error_already_set& ex) { + if (!ex.matches(PyExc_KeyError)) throw; + return true; + } + return false; + }); + m.def("exception_matches_base", []() { + py::dict foo; + try { + // Assign to a py::object to force read access of nonexistent dict entry + py::object o = foo["bar"]; + } + catch (py::error_already_set &ex) { + if (!ex.matches(PyExc_Exception)) throw; + return true; + } + return false; + }); + m.def("modulenotfound_exception_matches_base", []() { + try { + // On Python >= 3.6, this raises a ModuleNotFoundError, a subclass of ImportError + py::module::import("nonexistent"); + } + catch (py::error_already_set &ex) { + if (!ex.matches(PyExc_ImportError)) throw; + return true; + } + return false; + }); + + m.def("throw_already_set", [](bool err) { + if (err) + PyErr_SetString(PyExc_ValueError, "foo"); + try { + throw py::error_already_set(); + } catch (const std::runtime_error& e) { + if ((err && e.what() != std::string("ValueError: foo")) || + (!err && e.what() != std::string("Unknown internal error occurred"))) + { + PyErr_Clear(); + throw std::runtime_error("error message mismatch"); + } + } + PyErr_Clear(); + if (err) + PyErr_SetString(PyExc_ValueError, "foo"); + throw py::error_already_set(); + }); + + m.def("python_call_in_destructor", [](py::dict d) { + try { + PythonCallInDestructor set_dict_in_destructor(d); + PyErr_SetString(PyExc_ValueError, "foo"); + throw py::error_already_set(); + } catch (const py::error_already_set&) { + return true; + } + return false; + }); + + m.def("python_alreadyset_in_destructor", [](py::str s) { + PythonAlreadySetInDestructor alreadyset_in_destructor(s); + return true; + }); + + // test_nested_throws + m.def("try_catch", [m](py::object exc_type, py::function f, py::args args) { + try { f(*args); } + catch (py::error_already_set &ex) { + if (ex.matches(exc_type)) + py::print(ex.what()); + else + throw; + } + }); + + // Test repr that cannot be displayed + m.def("simple_bool_passthrough", [](bool x) {return x;}); + +} diff --git a/diffvg/pybind11/tests/test_exceptions.py b/diffvg/pybind11/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7088d00b8fec6aeab23f02c2646e3254b53917 --- /dev/null +++ b/diffvg/pybind11/tests/test_exceptions.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +import sys + +import pytest + +from pybind11_tests import exceptions as m +import pybind11_cross_module_tests as cm + + +def test_std_exception(msg): + with pytest.raises(RuntimeError) as excinfo: + m.throw_std_exception() + assert msg(excinfo.value) == "This exception was intentionally thrown." + + +def test_error_already_set(msg): + with pytest.raises(RuntimeError) as excinfo: + m.throw_already_set(False) + assert msg(excinfo.value) == "Unknown internal error occurred" + + with pytest.raises(ValueError) as excinfo: + m.throw_already_set(True) + assert msg(excinfo.value) == "foo" + + +def test_cross_module_exceptions(): + with pytest.raises(RuntimeError) as excinfo: + cm.raise_runtime_error() + assert str(excinfo.value) == "My runtime error" + + with pytest.raises(ValueError) as excinfo: + cm.raise_value_error() + assert str(excinfo.value) == "My value error" + + with pytest.raises(ValueError) as excinfo: + cm.throw_pybind_value_error() + assert str(excinfo.value) == "pybind11 value error" + + with pytest.raises(TypeError) as excinfo: + cm.throw_pybind_type_error() + assert str(excinfo.value) == "pybind11 type error" + + with pytest.raises(StopIteration) as excinfo: + cm.throw_stop_iteration() + + +def test_python_call_in_catch(): + d = {} + assert m.python_call_in_destructor(d) is True + assert d["good"] is True + + +def test_python_alreadyset_in_destructor(monkeypatch, capsys): + hooked = False + triggered = [False] # mutable, so Python 2.7 closure can modify it + + if hasattr(sys, 'unraisablehook'): # Python 3.8+ + hooked = True + default_hook = sys.unraisablehook + + def hook(unraisable_hook_args): + exc_type, exc_value, exc_tb, err_msg, obj = unraisable_hook_args + if obj == 'already_set demo': + triggered[0] = True + default_hook(unraisable_hook_args) + return + + # Use monkeypatch so pytest can apply and remove the patch as appropriate + monkeypatch.setattr(sys, 'unraisablehook', hook) + + assert m.python_alreadyset_in_destructor('already_set demo') is True + if hooked: + assert triggered[0] is True + + _, captured_stderr = capsys.readouterr() + # Error message is different in Python 2 and 3, check for words that appear in both + assert 'ignored' in captured_stderr and 'already_set demo' in captured_stderr + + +def test_exception_matches(): + assert m.exception_matches() + assert m.exception_matches_base() + assert m.modulenotfound_exception_matches_base() + + +def test_custom(msg): + # Can we catch a MyException? + with pytest.raises(m.MyException) as excinfo: + m.throws1() + assert msg(excinfo.value) == "this error should go to a custom type" + + # Can we translate to standard Python exceptions? + with pytest.raises(RuntimeError) as excinfo: + m.throws2() + assert msg(excinfo.value) == "this error should go to a standard Python exception" + + # Can we handle unknown exceptions? + with pytest.raises(RuntimeError) as excinfo: + m.throws3() + assert msg(excinfo.value) == "Caught an unknown exception!" + + # Can we delegate to another handler by rethrowing? + with pytest.raises(m.MyException) as excinfo: + m.throws4() + assert msg(excinfo.value) == "this error is rethrown" + + # Can we fall-through to the default handler? + with pytest.raises(RuntimeError) as excinfo: + m.throws_logic_error() + assert msg(excinfo.value) == "this error should fall through to the standard handler" + + # OverFlow error translation. + with pytest.raises(OverflowError) as excinfo: + m.throws_overflow_error() + + # Can we handle a helper-declared exception? + with pytest.raises(m.MyException5) as excinfo: + m.throws5() + assert msg(excinfo.value) == "this is a helper-defined translated exception" + + # Exception subclassing: + with pytest.raises(m.MyException5) as excinfo: + m.throws5_1() + assert msg(excinfo.value) == "MyException5 subclass" + assert isinstance(excinfo.value, m.MyException5_1) + + with pytest.raises(m.MyException5_1) as excinfo: + m.throws5_1() + assert msg(excinfo.value) == "MyException5 subclass" + + with pytest.raises(m.MyException5) as excinfo: + try: + m.throws5() + except m.MyException5_1: + raise RuntimeError("Exception error: caught child from parent") + assert msg(excinfo.value) == "this is a helper-defined translated exception" + + +def test_nested_throws(capture): + """Tests nested (e.g. C++ -> Python -> C++) exception handling""" + + def throw_myex(): + raise m.MyException("nested error") + + def throw_myex5(): + raise m.MyException5("nested error 5") + + # In the comments below, the exception is caught in the first step, thrown in the last step + + # C++ -> Python + with capture: + m.try_catch(m.MyException5, throw_myex5) + assert str(capture).startswith("MyException5: nested error 5") + + # Python -> C++ -> Python + with pytest.raises(m.MyException) as excinfo: + m.try_catch(m.MyException5, throw_myex) + assert str(excinfo.value) == "nested error" + + def pycatch(exctype, f, *args): + try: + f(*args) + except m.MyException as e: + print(e) + + # C++ -> Python -> C++ -> Python + with capture: + m.try_catch( + m.MyException5, pycatch, m.MyException, m.try_catch, m.MyException, throw_myex5) + assert str(capture).startswith("MyException5: nested error 5") + + # C++ -> Python -> C++ + with capture: + m.try_catch(m.MyException, pycatch, m.MyException5, m.throws4) + assert capture == "this error is rethrown" + + # Python -> C++ -> Python -> C++ + with pytest.raises(m.MyException5) as excinfo: + m.try_catch(m.MyException, pycatch, m.MyException, m.throws5) + assert str(excinfo.value) == "this is a helper-defined translated exception" + + +# This can often happen if you wrap a pybind11 class in a Python wrapper +def test_invalid_repr(): + + class MyRepr(object): + def __repr__(self): + raise AttributeError("Example error") + + with pytest.raises(TypeError): + m.simple_bool_passthrough(MyRepr()) diff --git a/diffvg/pybind11/tests/test_factory_constructors.cpp b/diffvg/pybind11/tests/test_factory_constructors.cpp new file mode 100644 index 0000000000000000000000000000000000000000..61cf33d16ed404563a3da803a4c2ecea4453a3b4 --- /dev/null +++ b/diffvg/pybind11/tests/test_factory_constructors.cpp @@ -0,0 +1,342 @@ +/* + tests/test_factory_constructors.cpp -- tests construction from a factory function + via py::init_factory() + + Copyright (c) 2017 Jason Rhinelander + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include + +// Classes for testing python construction via C++ factory function: +// Not publicly constructible, copyable, or movable: +class TestFactory1 { + friend class TestFactoryHelper; + TestFactory1() : value("(empty)") { print_default_created(this); } + TestFactory1(int v) : value(std::to_string(v)) { print_created(this, value); } + TestFactory1(std::string v) : value(std::move(v)) { print_created(this, value); } + TestFactory1(TestFactory1 &&) = delete; + TestFactory1(const TestFactory1 &) = delete; + TestFactory1 &operator=(TestFactory1 &&) = delete; + TestFactory1 &operator=(const TestFactory1 &) = delete; +public: + std::string value; + ~TestFactory1() { print_destroyed(this); } +}; +// Non-public construction, but moveable: +class TestFactory2 { + friend class TestFactoryHelper; + TestFactory2() : value("(empty2)") { print_default_created(this); } + TestFactory2(int v) : value(std::to_string(v)) { print_created(this, value); } + TestFactory2(std::string v) : value(std::move(v)) { print_created(this, value); } +public: + TestFactory2(TestFactory2 &&m) { value = std::move(m.value); print_move_created(this); } + TestFactory2 &operator=(TestFactory2 &&m) { value = std::move(m.value); print_move_assigned(this); return *this; } + std::string value; + ~TestFactory2() { print_destroyed(this); } +}; +// Mixed direct/factory construction: +class TestFactory3 { +protected: + friend class TestFactoryHelper; + TestFactory3() : value("(empty3)") { print_default_created(this); } + TestFactory3(int v) : value(std::to_string(v)) { print_created(this, value); } +public: + TestFactory3(std::string v) : value(std::move(v)) { print_created(this, value); } + TestFactory3(TestFactory3 &&m) { value = std::move(m.value); print_move_created(this); } + TestFactory3 &operator=(TestFactory3 &&m) { value = std::move(m.value); print_move_assigned(this); return *this; } + std::string value; + virtual ~TestFactory3() { print_destroyed(this); } +}; +// Inheritance test +class TestFactory4 : public TestFactory3 { +public: + TestFactory4() : TestFactory3() { print_default_created(this); } + TestFactory4(int v) : TestFactory3(v) { print_created(this, v); } + virtual ~TestFactory4() { print_destroyed(this); } +}; +// Another class for an invalid downcast test +class TestFactory5 : public TestFactory3 { +public: + TestFactory5(int i) : TestFactory3(i) { print_created(this, i); } + virtual ~TestFactory5() { print_destroyed(this); } +}; + +class TestFactory6 { +protected: + int value; + bool alias = false; +public: + TestFactory6(int i) : value{i} { print_created(this, i); } + TestFactory6(TestFactory6 &&f) { print_move_created(this); value = f.value; alias = f.alias; } + TestFactory6(const TestFactory6 &f) { print_copy_created(this); value = f.value; alias = f.alias; } + virtual ~TestFactory6() { print_destroyed(this); } + virtual int get() { return value; } + bool has_alias() { return alias; } +}; +class PyTF6 : public TestFactory6 { +public: + // Special constructor that allows the factory to construct a PyTF6 from a TestFactory6 only + // when an alias is needed: + PyTF6(TestFactory6 &&base) : TestFactory6(std::move(base)) { alias = true; print_created(this, "move", value); } + PyTF6(int i) : TestFactory6(i) { alias = true; print_created(this, i); } + PyTF6(PyTF6 &&f) : TestFactory6(std::move(f)) { print_move_created(this); } + PyTF6(const PyTF6 &f) : TestFactory6(f) { print_copy_created(this); } + PyTF6(std::string s) : TestFactory6((int) s.size()) { alias = true; print_created(this, s); } + virtual ~PyTF6() { print_destroyed(this); } + int get() override { PYBIND11_OVERLOAD(int, TestFactory6, get, /*no args*/); } +}; + +class TestFactory7 { +protected: + int value; + bool alias = false; +public: + TestFactory7(int i) : value{i} { print_created(this, i); } + TestFactory7(TestFactory7 &&f) { print_move_created(this); value = f.value; alias = f.alias; } + TestFactory7(const TestFactory7 &f) { print_copy_created(this); value = f.value; alias = f.alias; } + virtual ~TestFactory7() { print_destroyed(this); } + virtual int get() { return value; } + bool has_alias() { return alias; } +}; +class PyTF7 : public TestFactory7 { +public: + PyTF7(int i) : TestFactory7(i) { alias = true; print_created(this, i); } + PyTF7(PyTF7 &&f) : TestFactory7(std::move(f)) { print_move_created(this); } + PyTF7(const PyTF7 &f) : TestFactory7(f) { print_copy_created(this); } + virtual ~PyTF7() { print_destroyed(this); } + int get() override { PYBIND11_OVERLOAD(int, TestFactory7, get, /*no args*/); } +}; + + +class TestFactoryHelper { +public: + // Non-movable, non-copyable type: + // Return via pointer: + static TestFactory1 *construct1() { return new TestFactory1(); } + // Holder: + static std::unique_ptr construct1(int a) { return std::unique_ptr(new TestFactory1(a)); } + // pointer again + static TestFactory1 *construct1_string(std::string a) { return new TestFactory1(a); } + + // Moveable type: + // pointer: + static TestFactory2 *construct2() { return new TestFactory2(); } + // holder: + static std::unique_ptr construct2(int a) { return std::unique_ptr(new TestFactory2(a)); } + // by value moving: + static TestFactory2 construct2(std::string a) { return TestFactory2(a); } + + // shared_ptr holder type: + // pointer: + static TestFactory3 *construct3() { return new TestFactory3(); } + // holder: + static std::shared_ptr construct3(int a) { return std::shared_ptr(new TestFactory3(a)); } +}; + +TEST_SUBMODULE(factory_constructors, m) { + + // Define various trivial types to allow simpler overload resolution: + py::module m_tag = m.def_submodule("tag"); +#define MAKE_TAG_TYPE(Name) \ + struct Name##_tag {}; \ + py::class_(m_tag, #Name "_tag").def(py::init<>()); \ + m_tag.attr(#Name) = py::cast(Name##_tag{}) + MAKE_TAG_TYPE(pointer); + MAKE_TAG_TYPE(unique_ptr); + MAKE_TAG_TYPE(move); + MAKE_TAG_TYPE(shared_ptr); + MAKE_TAG_TYPE(derived); + MAKE_TAG_TYPE(TF4); + MAKE_TAG_TYPE(TF5); + MAKE_TAG_TYPE(null_ptr); + MAKE_TAG_TYPE(null_unique_ptr); + MAKE_TAG_TYPE(null_shared_ptr); + MAKE_TAG_TYPE(base); + MAKE_TAG_TYPE(invalid_base); + MAKE_TAG_TYPE(alias); + MAKE_TAG_TYPE(unaliasable); + MAKE_TAG_TYPE(mixed); + + // test_init_factory_basic, test_bad_type + py::class_(m, "TestFactory1") + .def(py::init([](unique_ptr_tag, int v) { return TestFactoryHelper::construct1(v); })) + .def(py::init(&TestFactoryHelper::construct1_string)) // raw function pointer + .def(py::init([](pointer_tag) { return TestFactoryHelper::construct1(); })) + .def(py::init([](py::handle, int v, py::handle) { return TestFactoryHelper::construct1(v); })) + .def_readwrite("value", &TestFactory1::value) + ; + py::class_(m, "TestFactory2") + .def(py::init([](pointer_tag, int v) { return TestFactoryHelper::construct2(v); })) + .def(py::init([](unique_ptr_tag, std::string v) { return TestFactoryHelper::construct2(v); })) + .def(py::init([](move_tag) { return TestFactoryHelper::construct2(); })) + .def_readwrite("value", &TestFactory2::value) + ; + + // Stateful & reused: + int c = 1; + auto c4a = [c](pointer_tag, TF4_tag, int a) { (void) c; return new TestFactory4(a);}; + + // test_init_factory_basic, test_init_factory_casting + py::class_>(m, "TestFactory3") + .def(py::init([](pointer_tag, int v) { return TestFactoryHelper::construct3(v); })) + .def(py::init([](shared_ptr_tag) { return TestFactoryHelper::construct3(); })) + .def("__init__", [](TestFactory3 &self, std::string v) { new (&self) TestFactory3(v); }) // placement-new ctor + + // factories returning a derived type: + .def(py::init(c4a)) // derived ptr + .def(py::init([](pointer_tag, TF5_tag, int a) { return new TestFactory5(a); })) + // derived shared ptr: + .def(py::init([](shared_ptr_tag, TF4_tag, int a) { return std::make_shared(a); })) + .def(py::init([](shared_ptr_tag, TF5_tag, int a) { return std::make_shared(a); })) + + // Returns nullptr: + .def(py::init([](null_ptr_tag) { return (TestFactory3 *) nullptr; })) + .def(py::init([](null_unique_ptr_tag) { return std::unique_ptr(); })) + .def(py::init([](null_shared_ptr_tag) { return std::shared_ptr(); })) + + .def_readwrite("value", &TestFactory3::value) + ; + + // test_init_factory_casting + py::class_>(m, "TestFactory4") + .def(py::init(c4a)) // pointer + ; + + // Doesn't need to be registered, but registering makes getting ConstructorStats easier: + py::class_>(m, "TestFactory5"); + + // test_init_factory_alias + // Alias testing + py::class_(m, "TestFactory6") + .def(py::init([](base_tag, int i) { return TestFactory6(i); })) + .def(py::init([](alias_tag, int i) { return PyTF6(i); })) + .def(py::init([](alias_tag, std::string s) { return PyTF6(s); })) + .def(py::init([](alias_tag, pointer_tag, int i) { return new PyTF6(i); })) + .def(py::init([](base_tag, pointer_tag, int i) { return new TestFactory6(i); })) + .def(py::init([](base_tag, alias_tag, pointer_tag, int i) { return (TestFactory6 *) new PyTF6(i); })) + + .def("get", &TestFactory6::get) + .def("has_alias", &TestFactory6::has_alias) + + .def_static("get_cstats", &ConstructorStats::get, py::return_value_policy::reference) + .def_static("get_alias_cstats", &ConstructorStats::get, py::return_value_policy::reference) + ; + + // test_init_factory_dual + // Separate alias constructor testing + py::class_>(m, "TestFactory7") + .def(py::init( + [](int i) { return TestFactory7(i); }, + [](int i) { return PyTF7(i); })) + .def(py::init( + [](pointer_tag, int i) { return new TestFactory7(i); }, + [](pointer_tag, int i) { return new PyTF7(i); })) + .def(py::init( + [](mixed_tag, int i) { return new TestFactory7(i); }, + [](mixed_tag, int i) { return PyTF7(i); })) + .def(py::init( + [](mixed_tag, std::string s) { return TestFactory7((int) s.size()); }, + [](mixed_tag, std::string s) { return new PyTF7((int) s.size()); })) + .def(py::init( + [](base_tag, pointer_tag, int i) { return new TestFactory7(i); }, + [](base_tag, pointer_tag, int i) { return (TestFactory7 *) new PyTF7(i); })) + .def(py::init( + [](alias_tag, pointer_tag, int i) { return new PyTF7(i); }, + [](alias_tag, pointer_tag, int i) { return new PyTF7(10*i); })) + .def(py::init( + [](shared_ptr_tag, base_tag, int i) { return std::make_shared(i); }, + [](shared_ptr_tag, base_tag, int i) { auto *p = new PyTF7(i); return std::shared_ptr(p); })) + .def(py::init( + [](shared_ptr_tag, invalid_base_tag, int i) { return std::make_shared(i); }, + [](shared_ptr_tag, invalid_base_tag, int i) { return std::make_shared(i); })) // <-- invalid alias factory + + .def("get", &TestFactory7::get) + .def("has_alias", &TestFactory7::has_alias) + + .def_static("get_cstats", &ConstructorStats::get, py::return_value_policy::reference) + .def_static("get_alias_cstats", &ConstructorStats::get, py::return_value_policy::reference) + ; + + // test_placement_new_alternative + // Class with a custom new operator but *without* a placement new operator (issue #948) + class NoPlacementNew { + public: + NoPlacementNew(int i) : i(i) { } + static void *operator new(std::size_t s) { + auto *p = ::operator new(s); + py::print("operator new called, returning", reinterpret_cast(p)); + return p; + } + static void operator delete(void *p) { + py::print("operator delete called on", reinterpret_cast(p)); + ::operator delete(p); + } + int i; + }; + // As of 2.2, `py::init` no longer requires placement new + py::class_(m, "NoPlacementNew") + .def(py::init()) + .def(py::init([]() { return new NoPlacementNew(100); })) + .def_readwrite("i", &NoPlacementNew::i) + ; + + + // test_reallocations + // Class that has verbose operator_new/operator_delete calls + struct NoisyAlloc { + NoisyAlloc(const NoisyAlloc &) = default; + NoisyAlloc(int i) { py::print(py::str("NoisyAlloc(int {})").format(i)); } + NoisyAlloc(double d) { py::print(py::str("NoisyAlloc(double {})").format(d)); } + ~NoisyAlloc() { py::print("~NoisyAlloc()"); } + + static void *operator new(size_t s) { py::print("noisy new"); return ::operator new(s); } + static void *operator new(size_t, void *p) { py::print("noisy placement new"); return p; } + static void operator delete(void *p, size_t) { py::print("noisy delete"); ::operator delete(p); } + static void operator delete(void *, void *) { py::print("noisy placement delete"); } +#if defined(_MSC_VER) && _MSC_VER < 1910 + // MSVC 2015 bug: the above "noisy delete" isn't invoked (fixed in MSVC 2017) + static void operator delete(void *p) { py::print("noisy delete"); ::operator delete(p); } +#endif + }; + py::class_(m, "NoisyAlloc") + // Since these overloads have the same number of arguments, the dispatcher will try each of + // them until the arguments convert. Thus we can get a pre-allocation here when passing a + // single non-integer: + .def("__init__", [](NoisyAlloc *a, int i) { new (a) NoisyAlloc(i); }) // Regular constructor, runs first, requires preallocation + .def(py::init([](double d) { return new NoisyAlloc(d); })) + + // The two-argument version: first the factory pointer overload. + .def(py::init([](int i, int) { return new NoisyAlloc(i); })) + // Return-by-value: + .def(py::init([](double d, int) { return NoisyAlloc(d); })) + // Old-style placement new init; requires preallocation + .def("__init__", [](NoisyAlloc &a, double d, double) { new (&a) NoisyAlloc(d); }) + // Requires deallocation of previous overload preallocated value: + .def(py::init([](int i, double) { return new NoisyAlloc(i); })) + // Regular again: requires yet another preallocation + .def("__init__", [](NoisyAlloc &a, int i, std::string) { new (&a) NoisyAlloc(i); }) + ; + + + + + // static_assert testing (the following def's should all fail with appropriate compilation errors): +#if 0 + struct BadF1Base {}; + struct BadF1 : BadF1Base {}; + struct PyBadF1 : BadF1 {}; + py::class_> bf1(m, "BadF1"); + // wrapped factory function must return a compatible pointer, holder, or value + bf1.def(py::init([]() { return 3; })); + // incompatible factory function pointer return type + bf1.def(py::init([]() { static int three = 3; return &three; })); + // incompatible factory function std::shared_ptr return type: cannot convert shared_ptr to holder + // (non-polymorphic base) + bf1.def(py::init([]() { return std::shared_ptr(new BadF1()); })); +#endif +} diff --git a/diffvg/pybind11/tests/test_factory_constructors.py b/diffvg/pybind11/tests/test_factory_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..6c4bed165f6575950b7f0f17ec65a88397e0ff54 --- /dev/null +++ b/diffvg/pybind11/tests/test_factory_constructors.py @@ -0,0 +1,465 @@ +# -*- coding: utf-8 -*- +import pytest +import re + +import env # noqa: F401 + +from pybind11_tests import factory_constructors as m +from pybind11_tests.factory_constructors import tag +from pybind11_tests import ConstructorStats + + +def test_init_factory_basic(): + """Tests py::init_factory() wrapper around various ways of returning the object""" + + cstats = [ConstructorStats.get(c) for c in [m.TestFactory1, m.TestFactory2, m.TestFactory3]] + cstats[0].alive() # force gc + n_inst = ConstructorStats.detail_reg_inst() + + x1 = m.TestFactory1(tag.unique_ptr, 3) + assert x1.value == "3" + y1 = m.TestFactory1(tag.pointer) + assert y1.value == "(empty)" + z1 = m.TestFactory1("hi!") + assert z1.value == "hi!" + + assert ConstructorStats.detail_reg_inst() == n_inst + 3 + + x2 = m.TestFactory2(tag.move) + assert x2.value == "(empty2)" + y2 = m.TestFactory2(tag.pointer, 7) + assert y2.value == "7" + z2 = m.TestFactory2(tag.unique_ptr, "hi again") + assert z2.value == "hi again" + + assert ConstructorStats.detail_reg_inst() == n_inst + 6 + + x3 = m.TestFactory3(tag.shared_ptr) + assert x3.value == "(empty3)" + y3 = m.TestFactory3(tag.pointer, 42) + assert y3.value == "42" + z3 = m.TestFactory3("bye") + assert z3.value == "bye" + + for null_ptr_kind in [tag.null_ptr, + tag.null_unique_ptr, + tag.null_shared_ptr]: + with pytest.raises(TypeError) as excinfo: + m.TestFactory3(null_ptr_kind) + assert str(excinfo.value) == "pybind11::init(): factory function returned nullptr" + + assert [i.alive() for i in cstats] == [3, 3, 3] + assert ConstructorStats.detail_reg_inst() == n_inst + 9 + + del x1, y2, y3, z3 + assert [i.alive() for i in cstats] == [2, 2, 1] + assert ConstructorStats.detail_reg_inst() == n_inst + 5 + del x2, x3, y1, z1, z2 + assert [i.alive() for i in cstats] == [0, 0, 0] + assert ConstructorStats.detail_reg_inst() == n_inst + + assert [i.values() for i in cstats] == [ + ["3", "hi!"], + ["7", "hi again"], + ["42", "bye"] + ] + assert [i.default_constructions for i in cstats] == [1, 1, 1] + + +def test_init_factory_signature(msg): + with pytest.raises(TypeError) as excinfo: + m.TestFactory1("invalid", "constructor", "arguments") + assert msg(excinfo.value) == """ + __init__(): incompatible constructor arguments. The following argument types are supported: + 1. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int) + 2. m.factory_constructors.TestFactory1(arg0: str) + 3. m.factory_constructors.TestFactory1(arg0: m.factory_constructors.tag.pointer_tag) + 4. m.factory_constructors.TestFactory1(arg0: handle, arg1: int, arg2: handle) + + Invoked with: 'invalid', 'constructor', 'arguments' + """ # noqa: E501 line too long + + assert msg(m.TestFactory1.__init__.__doc__) == """ + __init__(*args, **kwargs) + Overloaded function. + + 1. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.unique_ptr_tag, arg1: int) -> None + + 2. __init__(self: m.factory_constructors.TestFactory1, arg0: str) -> None + + 3. __init__(self: m.factory_constructors.TestFactory1, arg0: m.factory_constructors.tag.pointer_tag) -> None + + 4. __init__(self: m.factory_constructors.TestFactory1, arg0: handle, arg1: int, arg2: handle) -> None + """ # noqa: E501 line too long + + +def test_init_factory_casting(): + """Tests py::init_factory() wrapper with various upcasting and downcasting returns""" + + cstats = [ConstructorStats.get(c) for c in [m.TestFactory3, m.TestFactory4, m.TestFactory5]] + cstats[0].alive() # force gc + n_inst = ConstructorStats.detail_reg_inst() + + # Construction from derived references: + a = m.TestFactory3(tag.pointer, tag.TF4, 4) + assert a.value == "4" + b = m.TestFactory3(tag.shared_ptr, tag.TF4, 5) + assert b.value == "5" + c = m.TestFactory3(tag.pointer, tag.TF5, 6) + assert c.value == "6" + d = m.TestFactory3(tag.shared_ptr, tag.TF5, 7) + assert d.value == "7" + + assert ConstructorStats.detail_reg_inst() == n_inst + 4 + + # Shared a lambda with TF3: + e = m.TestFactory4(tag.pointer, tag.TF4, 8) + assert e.value == "8" + + assert ConstructorStats.detail_reg_inst() == n_inst + 5 + assert [i.alive() for i in cstats] == [5, 3, 2] + + del a + assert [i.alive() for i in cstats] == [4, 2, 2] + assert ConstructorStats.detail_reg_inst() == n_inst + 4 + + del b, c, e + assert [i.alive() for i in cstats] == [1, 0, 1] + assert ConstructorStats.detail_reg_inst() == n_inst + 1 + + del d + assert [i.alive() for i in cstats] == [0, 0, 0] + assert ConstructorStats.detail_reg_inst() == n_inst + + assert [i.values() for i in cstats] == [ + ["4", "5", "6", "7", "8"], + ["4", "5", "8"], + ["6", "7"] + ] + + +def test_init_factory_alias(): + """Tests py::init_factory() wrapper with value conversions and alias types""" + + cstats = [m.TestFactory6.get_cstats(), m.TestFactory6.get_alias_cstats()] + cstats[0].alive() # force gc + n_inst = ConstructorStats.detail_reg_inst() + + a = m.TestFactory6(tag.base, 1) + assert a.get() == 1 + assert not a.has_alias() + b = m.TestFactory6(tag.alias, "hi there") + assert b.get() == 8 + assert b.has_alias() + c = m.TestFactory6(tag.alias, 3) + assert c.get() == 3 + assert c.has_alias() + d = m.TestFactory6(tag.alias, tag.pointer, 4) + assert d.get() == 4 + assert d.has_alias() + e = m.TestFactory6(tag.base, tag.pointer, 5) + assert e.get() == 5 + assert not e.has_alias() + f = m.TestFactory6(tag.base, tag.alias, tag.pointer, 6) + assert f.get() == 6 + assert f.has_alias() + + assert ConstructorStats.detail_reg_inst() == n_inst + 6 + assert [i.alive() for i in cstats] == [6, 4] + + del a, b, e + assert [i.alive() for i in cstats] == [3, 3] + assert ConstructorStats.detail_reg_inst() == n_inst + 3 + del f, c, d + assert [i.alive() for i in cstats] == [0, 0] + assert ConstructorStats.detail_reg_inst() == n_inst + + class MyTest(m.TestFactory6): + def __init__(self, *args): + m.TestFactory6.__init__(self, *args) + + def get(self): + return -5 + m.TestFactory6.get(self) + + # Return Class by value, moved into new alias: + z = MyTest(tag.base, 123) + assert z.get() == 118 + assert z.has_alias() + + # Return alias by value, moved into new alias: + y = MyTest(tag.alias, "why hello!") + assert y.get() == 5 + assert y.has_alias() + + # Return Class by pointer, moved into new alias then original destroyed: + x = MyTest(tag.base, tag.pointer, 47) + assert x.get() == 42 + assert x.has_alias() + + assert ConstructorStats.detail_reg_inst() == n_inst + 3 + assert [i.alive() for i in cstats] == [3, 3] + del x, y, z + assert [i.alive() for i in cstats] == [0, 0] + assert ConstructorStats.detail_reg_inst() == n_inst + + assert [i.values() for i in cstats] == [ + ["1", "8", "3", "4", "5", "6", "123", "10", "47"], + ["hi there", "3", "4", "6", "move", "123", "why hello!", "move", "47"] + ] + + +def test_init_factory_dual(): + """Tests init factory functions with dual main/alias factory functions""" + from pybind11_tests.factory_constructors import TestFactory7 + + cstats = [TestFactory7.get_cstats(), TestFactory7.get_alias_cstats()] + cstats[0].alive() # force gc + n_inst = ConstructorStats.detail_reg_inst() + + class PythFactory7(TestFactory7): + def get(self): + return 100 + TestFactory7.get(self) + + a1 = TestFactory7(1) + a2 = PythFactory7(2) + assert a1.get() == 1 + assert a2.get() == 102 + assert not a1.has_alias() + assert a2.has_alias() + + b1 = TestFactory7(tag.pointer, 3) + b2 = PythFactory7(tag.pointer, 4) + assert b1.get() == 3 + assert b2.get() == 104 + assert not b1.has_alias() + assert b2.has_alias() + + c1 = TestFactory7(tag.mixed, 5) + c2 = PythFactory7(tag.mixed, 6) + assert c1.get() == 5 + assert c2.get() == 106 + assert not c1.has_alias() + assert c2.has_alias() + + d1 = TestFactory7(tag.base, tag.pointer, 7) + d2 = PythFactory7(tag.base, tag.pointer, 8) + assert d1.get() == 7 + assert d2.get() == 108 + assert not d1.has_alias() + assert d2.has_alias() + + # Both return an alias; the second multiplies the value by 10: + e1 = TestFactory7(tag.alias, tag.pointer, 9) + e2 = PythFactory7(tag.alias, tag.pointer, 10) + assert e1.get() == 9 + assert e2.get() == 200 + assert e1.has_alias() + assert e2.has_alias() + + f1 = TestFactory7(tag.shared_ptr, tag.base, 11) + f2 = PythFactory7(tag.shared_ptr, tag.base, 12) + assert f1.get() == 11 + assert f2.get() == 112 + assert not f1.has_alias() + assert f2.has_alias() + + g1 = TestFactory7(tag.shared_ptr, tag.invalid_base, 13) + assert g1.get() == 13 + assert not g1.has_alias() + with pytest.raises(TypeError) as excinfo: + PythFactory7(tag.shared_ptr, tag.invalid_base, 14) + assert (str(excinfo.value) == + "pybind11::init(): construction failed: returned holder-wrapped instance is not an " + "alias instance") + + assert [i.alive() for i in cstats] == [13, 7] + assert ConstructorStats.detail_reg_inst() == n_inst + 13 + + del a1, a2, b1, d1, e1, e2 + assert [i.alive() for i in cstats] == [7, 4] + assert ConstructorStats.detail_reg_inst() == n_inst + 7 + del b2, c1, c2, d2, f1, f2, g1 + assert [i.alive() for i in cstats] == [0, 0] + assert ConstructorStats.detail_reg_inst() == n_inst + + assert [i.values() for i in cstats] == [ + ["1", "2", "3", "4", "5", "6", "7", "8", "9", "100", "11", "12", "13", "14"], + ["2", "4", "6", "8", "9", "100", "12"] + ] + + +def test_no_placement_new(capture): + """Prior to 2.2, `py::init<...>` relied on the type supporting placement + new; this tests a class without placement new support.""" + with capture: + a = m.NoPlacementNew(123) + + found = re.search(r'^operator new called, returning (\d+)\n$', str(capture)) + assert found + assert a.i == 123 + with capture: + del a + pytest.gc_collect() + assert capture == "operator delete called on " + found.group(1) + + with capture: + b = m.NoPlacementNew() + + found = re.search(r'^operator new called, returning (\d+)\n$', str(capture)) + assert found + assert b.i == 100 + with capture: + del b + pytest.gc_collect() + assert capture == "operator delete called on " + found.group(1) + + +def test_multiple_inheritance(): + class MITest(m.TestFactory1, m.TestFactory2): + def __init__(self): + m.TestFactory1.__init__(self, tag.unique_ptr, 33) + m.TestFactory2.__init__(self, tag.move) + + a = MITest() + assert m.TestFactory1.value.fget(a) == "33" + assert m.TestFactory2.value.fget(a) == "(empty2)" + + +def create_and_destroy(*args): + a = m.NoisyAlloc(*args) + print("---") + del a + pytest.gc_collect() + + +def strip_comments(s): + return re.sub(r'\s+#.*', '', s) + + +def test_reallocations(capture, msg): + """When the constructor is overloaded, previous overloads can require a preallocated value. + This test makes sure that such preallocated values only happen when they might be necessary, + and that they are deallocated properly""" + + pytest.gc_collect() + + with capture: + create_and_destroy(1) + assert msg(capture) == """ + noisy new + noisy placement new + NoisyAlloc(int 1) + --- + ~NoisyAlloc() + noisy delete + """ + with capture: + create_and_destroy(1.5) + assert msg(capture) == strip_comments(""" + noisy new # allocation required to attempt first overload + noisy delete # have to dealloc before considering factory init overload + noisy new # pointer factory calling "new", part 1: allocation + NoisyAlloc(double 1.5) # ... part two, invoking constructor + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + with capture: + create_and_destroy(2, 3) + assert msg(capture) == strip_comments(""" + noisy new # pointer factory calling "new", allocation + NoisyAlloc(int 2) # constructor + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + with capture: + create_and_destroy(2.5, 3) + assert msg(capture) == strip_comments(""" + NoisyAlloc(double 2.5) # construction (local func variable: operator_new not called) + noisy new # return-by-value "new" part 1: allocation + ~NoisyAlloc() # moved-away local func variable destruction + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + with capture: + create_and_destroy(3.5, 4.5) + assert msg(capture) == strip_comments(""" + noisy new # preallocation needed before invoking placement-new overload + noisy placement new # Placement new + NoisyAlloc(double 3.5) # construction + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + with capture: + create_and_destroy(4, 0.5) + assert msg(capture) == strip_comments(""" + noisy new # preallocation needed before invoking placement-new overload + noisy delete # deallocation of preallocated storage + noisy new # Factory pointer allocation + NoisyAlloc(int 4) # factory pointer construction + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + with capture: + create_and_destroy(5, "hi") + assert msg(capture) == strip_comments(""" + noisy new # preallocation needed before invoking first placement new + noisy delete # delete before considering new-style constructor + noisy new # preallocation for second placement new + noisy placement new # Placement new in the second placement new overload + NoisyAlloc(int 5) # construction + --- + ~NoisyAlloc() # Destructor + noisy delete # operator delete + """) + + +@pytest.mark.skipif("env.PY2") +def test_invalid_self(): + """Tests invocation of the pybind-registered base class with an invalid `self` argument. You + can only actually do this on Python 3: Python 2 raises an exception itself if you try.""" + class NotPybindDerived(object): + pass + + # Attempts to initialize with an invalid type passed as `self`: + class BrokenTF1(m.TestFactory1): + def __init__(self, bad): + if bad == 1: + a = m.TestFactory2(tag.pointer, 1) + m.TestFactory1.__init__(a, tag.pointer) + elif bad == 2: + a = NotPybindDerived() + m.TestFactory1.__init__(a, tag.pointer) + + # Same as above, but for a class with an alias: + class BrokenTF6(m.TestFactory6): + def __init__(self, bad): + if bad == 1: + a = m.TestFactory2(tag.pointer, 1) + m.TestFactory6.__init__(a, tag.base, 1) + elif bad == 2: + a = m.TestFactory2(tag.pointer, 1) + m.TestFactory6.__init__(a, tag.alias, 1) + elif bad == 3: + m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.base, 1) + elif bad == 4: + m.TestFactory6.__init__(NotPybindDerived.__new__(NotPybindDerived), tag.alias, 1) + + for arg in (1, 2): + with pytest.raises(TypeError) as excinfo: + BrokenTF1(arg) + assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument" + + for arg in (1, 2, 3, 4): + with pytest.raises(TypeError) as excinfo: + BrokenTF6(arg) + assert str(excinfo.value) == "__init__(self, ...) called with invalid `self` argument" diff --git a/diffvg/pybind11/tests/test_gil_scoped.cpp b/diffvg/pybind11/tests/test_gil_scoped.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dc9b7ed2243a832c19d6826836ac579456232441 --- /dev/null +++ b/diffvg/pybind11/tests/test_gil_scoped.cpp @@ -0,0 +1,54 @@ +/* + tests/test_gil_scoped.cpp -- acquire and release gil + + Copyright (c) 2017 Borja Zarco (Google LLC) + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include + + +class VirtClass { +public: + virtual ~VirtClass() = default; + VirtClass() = default; + VirtClass(const VirtClass&) = delete; + virtual void virtual_func() {} + virtual void pure_virtual_func() = 0; +}; + +class PyVirtClass : public VirtClass { + void virtual_func() override { + PYBIND11_OVERLOAD(void, VirtClass, virtual_func,); + } + void pure_virtual_func() override { + PYBIND11_OVERLOAD_PURE(void, VirtClass, pure_virtual_func,); + } +}; + +TEST_SUBMODULE(gil_scoped, m) { + py::class_(m, "VirtClass") + .def(py::init<>()) + .def("virtual_func", &VirtClass::virtual_func) + .def("pure_virtual_func", &VirtClass::pure_virtual_func); + + m.def("test_callback_py_obj", + [](py::object func) { func(); }); + m.def("test_callback_std_func", + [](const std::function &func) { func(); }); + m.def("test_callback_virtual_func", + [](VirtClass &virt) { virt.virtual_func(); }); + m.def("test_callback_pure_virtual_func", + [](VirtClass &virt) { virt.pure_virtual_func(); }); + m.def("test_cross_module_gil", + []() { + auto cm = py::module::import("cross_module_gil_utils"); + auto gil_acquire = reinterpret_cast( + PyLong_AsVoidPtr(cm.attr("gil_acquire_funcaddr").ptr())); + py::gil_scoped_release gil_release; + gil_acquire(); + }); +} diff --git a/diffvg/pybind11/tests/test_gil_scoped.py b/diffvg/pybind11/tests/test_gil_scoped.py new file mode 100644 index 0000000000000000000000000000000000000000..27122cca2818a3cd0b61f051d5c8ac631ba9d8fc --- /dev/null +++ b/diffvg/pybind11/tests/test_gil_scoped.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +import multiprocessing +import threading + +import pytest + +import env # noqa: F401 + +from pybind11_tests import gil_scoped as m + + +def _run_in_process(target, *args, **kwargs): + """Runs target in process and returns its exitcode after 10s (None if still alive).""" + process = multiprocessing.Process(target=target, args=args, kwargs=kwargs) + process.daemon = True + try: + process.start() + # Do not need to wait much, 10s should be more than enough. + process.join(timeout=10) + return process.exitcode + finally: + if process.is_alive(): + process.terminate() + + +def _python_to_cpp_to_python(): + """Calls different C++ functions that come back to Python.""" + class ExtendedVirtClass(m.VirtClass): + def virtual_func(self): + pass + + def pure_virtual_func(self): + pass + + extended = ExtendedVirtClass() + m.test_callback_py_obj(lambda: None) + m.test_callback_std_func(lambda: None) + m.test_callback_virtual_func(extended) + m.test_callback_pure_virtual_func(extended) + + +def _python_to_cpp_to_python_from_threads(num_threads, parallel=False): + """Calls different C++ functions that come back to Python, from Python threads.""" + threads = [] + for _ in range(num_threads): + thread = threading.Thread(target=_python_to_cpp_to_python) + thread.daemon = True + thread.start() + if parallel: + threads.append(thread) + else: + thread.join() + for thread in threads: + thread.join() + + +# TODO: FIXME, sometimes returns -11 instead of 0 +@pytest.mark.xfail("env.PY > (3,8) and env.MACOS", strict=False) +def test_python_to_cpp_to_python_from_thread(): + """Makes sure there is no GIL deadlock when running in a thread. + + It runs in a separate process to be able to stop and assert if it deadlocks. + """ + assert _run_in_process(_python_to_cpp_to_python_from_threads, 1) == 0 + + +# TODO: FIXME +@pytest.mark.xfail("env.PY > (3,8) and env.MACOS", strict=False) +def test_python_to_cpp_to_python_from_thread_multiple_parallel(): + """Makes sure there is no GIL deadlock when running in a thread multiple times in parallel. + + It runs in a separate process to be able to stop and assert if it deadlocks. + """ + assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=True) == 0 + + +# TODO: FIXME +@pytest.mark.xfail("env.PY > (3,8) and env.MACOS", strict=False) +def test_python_to_cpp_to_python_from_thread_multiple_sequential(): + """Makes sure there is no GIL deadlock when running in a thread multiple times sequentially. + + It runs in a separate process to be able to stop and assert if it deadlocks. + """ + assert _run_in_process(_python_to_cpp_to_python_from_threads, 8, parallel=False) == 0 + + +# TODO: FIXME +@pytest.mark.xfail("env.PY > (3,8) and env.MACOS", strict=False) +def test_python_to_cpp_to_python_from_process(): + """Makes sure there is no GIL deadlock when using processes. + + This test is for completion, but it was never an issue. + """ + assert _run_in_process(_python_to_cpp_to_python) == 0 + + +def test_cross_module_gil(): + """Makes sure that the GIL can be acquired by another module from a GIL-released state.""" + m.test_cross_module_gil() # Should not raise a SIGSEGV diff --git a/diffvg/pybind11/tests/test_iostream.cpp b/diffvg/pybind11/tests/test_iostream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e67f88af5fd2d377221a6fcd6c890dec5344df48 --- /dev/null +++ b/diffvg/pybind11/tests/test_iostream.cpp @@ -0,0 +1,73 @@ +/* + tests/test_iostream.cpp -- Usage of scoped_output_redirect + + Copyright (c) 2017 Henry F. Schreiner + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + + +#include +#include "pybind11_tests.h" +#include + + +void noisy_function(std::string msg, bool flush) { + + std::cout << msg; + if (flush) + std::cout << std::flush; +} + +void noisy_funct_dual(std::string msg, std::string emsg) { + std::cout << msg; + std::cerr << emsg; +} + +TEST_SUBMODULE(iostream, m) { + + add_ostream_redirect(m); + + // test_evals + + m.def("captured_output_default", [](std::string msg) { + py::scoped_ostream_redirect redir; + std::cout << msg << std::flush; + }); + + m.def("captured_output", [](std::string msg) { + py::scoped_ostream_redirect redir(std::cout, py::module::import("sys").attr("stdout")); + std::cout << msg << std::flush; + }); + + m.def("guard_output", &noisy_function, + py::call_guard(), + py::arg("msg"), py::arg("flush")=true); + + m.def("captured_err", [](std::string msg) { + py::scoped_ostream_redirect redir(std::cerr, py::module::import("sys").attr("stderr")); + std::cerr << msg << std::flush; + }); + + m.def("noisy_function", &noisy_function, py::arg("msg"), py::arg("flush") = true); + + m.def("dual_guard", &noisy_funct_dual, + py::call_guard(), + py::arg("msg"), py::arg("emsg")); + + m.def("raw_output", [](std::string msg) { + std::cout << msg << std::flush; + }); + + m.def("raw_err", [](std::string msg) { + std::cerr << msg << std::flush; + }); + + m.def("captured_dual", [](std::string msg, std::string emsg) { + py::scoped_ostream_redirect redirout(std::cout, py::module::import("sys").attr("stdout")); + py::scoped_ostream_redirect redirerr(std::cerr, py::module::import("sys").attr("stderr")); + std::cout << msg << std::flush; + std::cerr << emsg << std::flush; + }); +} diff --git a/diffvg/pybind11/tests/test_iostream.py b/diffvg/pybind11/tests/test_iostream.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac4fcece0b089c03e240a0ae89e54c0c33feedf --- /dev/null +++ b/diffvg/pybind11/tests/test_iostream.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +from pybind11_tests import iostream as m +import sys + +from contextlib import contextmanager + +try: + # Python 3 + from io import StringIO +except ImportError: + # Python 2 + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + +try: + # Python 3.4 + from contextlib import redirect_stdout +except ImportError: + @contextmanager + def redirect_stdout(target): + original = sys.stdout + sys.stdout = target + yield + sys.stdout = original + +try: + # Python 3.5 + from contextlib import redirect_stderr +except ImportError: + @contextmanager + def redirect_stderr(target): + original = sys.stderr + sys.stderr = target + yield + sys.stderr = original + + +def test_captured(capsys): + msg = "I've been redirected to Python, I hope!" + m.captured_output(msg) + stdout, stderr = capsys.readouterr() + assert stdout == msg + assert stderr == '' + + m.captured_output_default(msg) + stdout, stderr = capsys.readouterr() + assert stdout == msg + assert stderr == '' + + m.captured_err(msg) + stdout, stderr = capsys.readouterr() + assert stdout == '' + assert stderr == msg + + +def test_captured_large_string(capsys): + # Make this bigger than the buffer used on the C++ side: 1024 chars + msg = "I've been redirected to Python, I hope!" + msg = msg * (1024 // len(msg) + 1) + + m.captured_output_default(msg) + stdout, stderr = capsys.readouterr() + assert stdout == msg + assert stderr == '' + + +def test_guard_capture(capsys): + msg = "I've been redirected to Python, I hope!" + m.guard_output(msg) + stdout, stderr = capsys.readouterr() + assert stdout == msg + assert stderr == '' + + +def test_series_captured(capture): + with capture: + m.captured_output("a") + m.captured_output("b") + assert capture == "ab" + + +def test_flush(capfd): + msg = "(not flushed)" + msg2 = "(flushed)" + + with m.ostream_redirect(): + m.noisy_function(msg, flush=False) + stdout, stderr = capfd.readouterr() + assert stdout == '' + + m.noisy_function(msg2, flush=True) + stdout, stderr = capfd.readouterr() + assert stdout == msg + msg2 + + m.noisy_function(msg, flush=False) + + stdout, stderr = capfd.readouterr() + assert stdout == msg + + +def test_not_captured(capfd): + msg = "Something that should not show up in log" + stream = StringIO() + with redirect_stdout(stream): + m.raw_output(msg) + stdout, stderr = capfd.readouterr() + assert stdout == msg + assert stderr == '' + assert stream.getvalue() == '' + + stream = StringIO() + with redirect_stdout(stream): + m.captured_output(msg) + stdout, stderr = capfd.readouterr() + assert stdout == '' + assert stderr == '' + assert stream.getvalue() == msg + + +def test_err(capfd): + msg = "Something that should not show up in log" + stream = StringIO() + with redirect_stderr(stream): + m.raw_err(msg) + stdout, stderr = capfd.readouterr() + assert stdout == '' + assert stderr == msg + assert stream.getvalue() == '' + + stream = StringIO() + with redirect_stderr(stream): + m.captured_err(msg) + stdout, stderr = capfd.readouterr() + assert stdout == '' + assert stderr == '' + assert stream.getvalue() == msg + + +def test_multi_captured(capfd): + stream = StringIO() + with redirect_stdout(stream): + m.captured_output("a") + m.raw_output("b") + m.captured_output("c") + m.raw_output("d") + stdout, stderr = capfd.readouterr() + assert stdout == 'bd' + assert stream.getvalue() == 'ac' + + +def test_dual(capsys): + m.captured_dual("a", "b") + stdout, stderr = capsys.readouterr() + assert stdout == "a" + assert stderr == "b" + + +def test_redirect(capfd): + msg = "Should not be in log!" + stream = StringIO() + with redirect_stdout(stream): + m.raw_output(msg) + stdout, stderr = capfd.readouterr() + assert stdout == msg + assert stream.getvalue() == '' + + stream = StringIO() + with redirect_stdout(stream): + with m.ostream_redirect(): + m.raw_output(msg) + stdout, stderr = capfd.readouterr() + assert stdout == '' + assert stream.getvalue() == msg + + stream = StringIO() + with redirect_stdout(stream): + m.raw_output(msg) + stdout, stderr = capfd.readouterr() + assert stdout == msg + assert stream.getvalue() == '' + + +def test_redirect_err(capfd): + msg = "StdOut" + msg2 = "StdErr" + + stream = StringIO() + with redirect_stderr(stream): + with m.ostream_redirect(stdout=False): + m.raw_output(msg) + m.raw_err(msg2) + stdout, stderr = capfd.readouterr() + assert stdout == msg + assert stderr == '' + assert stream.getvalue() == msg2 + + +def test_redirect_both(capfd): + msg = "StdOut" + msg2 = "StdErr" + + stream = StringIO() + stream2 = StringIO() + with redirect_stdout(stream): + with redirect_stderr(stream2): + with m.ostream_redirect(): + m.raw_output(msg) + m.raw_err(msg2) + stdout, stderr = capfd.readouterr() + assert stdout == '' + assert stderr == '' + assert stream.getvalue() == msg + assert stream2.getvalue() == msg2 diff --git a/diffvg/pybind11/tests/test_kwargs_and_defaults.cpp b/diffvg/pybind11/tests/test_kwargs_and_defaults.cpp new file mode 100644 index 0000000000000000000000000000000000000000..64bc2377b255350a5a4e0f22ce0e5a3b1e4082ea --- /dev/null +++ b/diffvg/pybind11/tests/test_kwargs_and_defaults.cpp @@ -0,0 +1,131 @@ +/* + tests/test_kwargs_and_defaults.cpp -- keyword arguments and default values + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include + +TEST_SUBMODULE(kwargs_and_defaults, m) { + auto kw_func = [](int x, int y) { return "x=" + std::to_string(x) + ", y=" + std::to_string(y); }; + + // test_named_arguments + m.def("kw_func0", kw_func); + m.def("kw_func1", kw_func, py::arg("x"), py::arg("y")); + m.def("kw_func2", kw_func, py::arg("x") = 100, py::arg("y") = 200); + m.def("kw_func3", [](const char *) { }, py::arg("data") = std::string("Hello world!")); + + /* A fancier default argument */ + std::vector list{{13, 17}}; + m.def("kw_func4", [](const std::vector &entries) { + std::string ret = "{"; + for (int i : entries) + ret += std::to_string(i) + " "; + ret.back() = '}'; + return ret; + }, py::arg("myList") = list); + + m.def("kw_func_udl", kw_func, "x"_a, "y"_a=300); + m.def("kw_func_udl_z", kw_func, "x"_a, "y"_a=0); + + // test_args_and_kwargs + m.def("args_function", [](py::args args) -> py::tuple { + return std::move(args); + }); + m.def("args_kwargs_function", [](py::args args, py::kwargs kwargs) { + return py::make_tuple(args, kwargs); + }); + + // test_mixed_args_and_kwargs + m.def("mixed_plus_args", [](int i, double j, py::args args) { + return py::make_tuple(i, j, args); + }); + m.def("mixed_plus_kwargs", [](int i, double j, py::kwargs kwargs) { + return py::make_tuple(i, j, kwargs); + }); + auto mixed_plus_both = [](int i, double j, py::args args, py::kwargs kwargs) { + return py::make_tuple(i, j, args, kwargs); + }; + m.def("mixed_plus_args_kwargs", mixed_plus_both); + + m.def("mixed_plus_args_kwargs_defaults", mixed_plus_both, + py::arg("i") = 1, py::arg("j") = 3.14159); + + // test_args_refcount + // PyPy needs a garbage collection to get the reference count values to match CPython's behaviour + #ifdef PYPY_VERSION + #define GC_IF_NEEDED ConstructorStats::gc() + #else + #define GC_IF_NEEDED + #endif + m.def("arg_refcount_h", [](py::handle h) { GC_IF_NEEDED; return h.ref_count(); }); + m.def("arg_refcount_h", [](py::handle h, py::handle, py::handle) { GC_IF_NEEDED; return h.ref_count(); }); + m.def("arg_refcount_o", [](py::object o) { GC_IF_NEEDED; return o.ref_count(); }); + m.def("args_refcount", [](py::args a) { + GC_IF_NEEDED; + py::tuple t(a.size()); + for (size_t i = 0; i < a.size(); i++) + // Use raw Python API here to avoid an extra, intermediate incref on the tuple item: + t[i] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast(i))); + return t; + }); + m.def("mixed_args_refcount", [](py::object o, py::args a) { + GC_IF_NEEDED; + py::tuple t(a.size() + 1); + t[0] = o.ref_count(); + for (size_t i = 0; i < a.size(); i++) + // Use raw Python API here to avoid an extra, intermediate incref on the tuple item: + t[i + 1] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast(i))); + return t; + }); + + // pybind11 won't allow these to be bound: args and kwargs, if present, must be at the end. + // Uncomment these to test that the static_assert is indeed working: +// m.def("bad_args1", [](py::args, int) {}); +// m.def("bad_args2", [](py::kwargs, int) {}); +// m.def("bad_args3", [](py::kwargs, py::args) {}); +// m.def("bad_args4", [](py::args, int, py::kwargs) {}); +// m.def("bad_args5", [](py::args, py::kwargs, int) {}); +// m.def("bad_args6", [](py::args, py::args) {}); +// m.def("bad_args7", [](py::kwargs, py::kwargs) {}); + + // test_keyword_only_args + m.def("kwonly_all", [](int i, int j) { return py::make_tuple(i, j); }, + py::kwonly(), py::arg("i"), py::arg("j")); + m.def("kwonly_some", [](int i, int j, int k) { return py::make_tuple(i, j, k); }, + py::arg(), py::kwonly(), py::arg("j"), py::arg("k")); + m.def("kwonly_with_defaults", [](int i, int j, int k, int z) { return py::make_tuple(i, j, k, z); }, + py::arg() = 3, "j"_a = 4, py::kwonly(), "k"_a = 5, "z"_a); + m.def("kwonly_mixed", [](int i, int j) { return py::make_tuple(i, j); }, + "i"_a, py::kwonly(), "j"_a); + m.def("kwonly_plus_more", [](int i, int j, int k, py::kwargs kwargs) { + return py::make_tuple(i, j, k, kwargs); }, + py::arg() /* positional */, py::arg("j") = -1 /* both */, py::kwonly(), py::arg("k") /* kw-only */); + + m.def("register_invalid_kwonly", [](py::module m) { + m.def("bad_kwonly", [](int i, int j) { return py::make_tuple(i, j); }, + py::kwonly(), py::arg() /* invalid unnamed argument */, "j"_a); + }); + + // These should fail to compile: + // argument annotations are required when using kwonly +// m.def("bad_kwonly1", [](int) {}, py::kwonly()); + // can't specify both `py::kwonly` and a `py::args` argument +// m.def("bad_kwonly2", [](int i, py::args) {}, py::kwonly(), "i"_a); + + // test_function_signatures (along with most of the above) + struct KWClass { void foo(int, float) {} }; + py::class_(m, "KWClass") + .def("foo0", &KWClass::foo) + .def("foo1", &KWClass::foo, "x"_a, "y"_a); + + // Make sure a class (not an instance) can be used as a default argument. + // The return value doesn't matter, only that the module is importable. + m.def("class_default_argument", [](py::object a) { return py::repr(a); }, + "a"_a = py::module::import("decimal").attr("Decimal")); +} diff --git a/diffvg/pybind11/tests/test_kwargs_and_defaults.py b/diffvg/pybind11/tests/test_kwargs_and_defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..5257e0cd3061707f0dd1b79de54a0c6cdae81cd1 --- /dev/null +++ b/diffvg/pybind11/tests/test_kwargs_and_defaults.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import kwargs_and_defaults as m + + +def test_function_signatures(doc): + assert doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str" + assert doc(m.kw_func1) == "kw_func1(x: int, y: int) -> str" + assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str" + assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None" + assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str" + assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str" + assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str" + assert doc(m.args_function) == "args_function(*args) -> tuple" + assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple" + assert doc(m.KWClass.foo0) == \ + "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None" + assert doc(m.KWClass.foo1) == \ + "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None" + + +def test_named_arguments(msg): + assert m.kw_func0(5, 10) == "x=5, y=10" + + assert m.kw_func1(5, 10) == "x=5, y=10" + assert m.kw_func1(5, y=10) == "x=5, y=10" + assert m.kw_func1(y=10, x=5) == "x=5, y=10" + + assert m.kw_func2() == "x=100, y=200" + assert m.kw_func2(5) == "x=5, y=200" + assert m.kw_func2(x=5) == "x=5, y=200" + assert m.kw_func2(y=10) == "x=100, y=10" + assert m.kw_func2(5, 10) == "x=5, y=10" + assert m.kw_func2(x=5, y=10) == "x=5, y=10" + + with pytest.raises(TypeError) as excinfo: + # noinspection PyArgumentList + m.kw_func2(x=5, y=10, z=12) + assert excinfo.match( + r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$') + + assert m.kw_func4() == "{13 17}" + assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}" + + assert m.kw_func_udl(x=5, y=10) == "x=5, y=10" + assert m.kw_func_udl_z(x=5) == "x=5, y=0" + + +def test_arg_and_kwargs(): + args = 'arg1_value', 'arg2_value', 3 + assert m.args_function(*args) == args + + args = 'a1', 'a2' + kwargs = dict(arg3='a3', arg4=4) + assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs) + + +def test_mixed_args_and_kwargs(msg): + mpa = m.mixed_plus_args + mpk = m.mixed_plus_kwargs + mpak = m.mixed_plus_args_kwargs + mpakd = m.mixed_plus_args_kwargs_defaults + + assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None)) + assert mpa(1, 2.5) == (1, 2.5, ()) + with pytest.raises(TypeError) as excinfo: + assert mpa(1) + assert msg(excinfo.value) == """ + mixed_plus_args(): incompatible function arguments. The following argument types are supported: + 1. (arg0: int, arg1: float, *args) -> tuple + + Invoked with: 1 + """ # noqa: E501 line too long + with pytest.raises(TypeError) as excinfo: + assert mpa() + assert msg(excinfo.value) == """ + mixed_plus_args(): incompatible function arguments. The following argument types are supported: + 1. (arg0: int, arg1: float, *args) -> tuple + + Invoked with: + """ # noqa: E501 line too long + + assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159}) + assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == ( + 7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7}) + assert mpakd() == (1, 3.14159, (), {}) + assert mpakd(3) == (3, 3.14159, (), {}) + assert mpakd(j=2.71828) == (1, 2.71828, (), {}) + assert mpakd(k=42) == (1, 3.14159, (), {'k': 42}) + assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == ( + 1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21}) + # Arguments specified both positionally and via kwargs should fail: + with pytest.raises(TypeError) as excinfo: + assert mpakd(1, i=1) + assert msg(excinfo.value) == """ + mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported: + 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple + + Invoked with: 1; kwargs: i=1 + """ # noqa: E501 line too long + with pytest.raises(TypeError) as excinfo: + assert mpakd(1, 2, j=1) + assert msg(excinfo.value) == """ + mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported: + 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple + + Invoked with: 1, 2; kwargs: j=1 + """ # noqa: E501 line too long + + +def test_keyword_only_args(msg): + assert m.kwonly_all(i=1, j=2) == (1, 2) + assert m.kwonly_all(j=1, i=2) == (2, 1) + + with pytest.raises(TypeError) as excinfo: + assert m.kwonly_all(i=1) == (1,) + assert "incompatible function arguments" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + assert m.kwonly_all(1, 2) == (1, 2) + assert "incompatible function arguments" in str(excinfo.value) + + assert m.kwonly_some(1, k=3, j=2) == (1, 2, 3) + + assert m.kwonly_with_defaults(z=8) == (3, 4, 5, 8) + assert m.kwonly_with_defaults(2, z=8) == (2, 4, 5, 8) + assert m.kwonly_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9) + assert m.kwonly_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9) + + assert m.kwonly_mixed(1, j=2) == (1, 2) + assert m.kwonly_mixed(j=2, i=3) == (3, 2) + assert m.kwonly_mixed(i=2, j=3) == (2, 3) + + assert m.kwonly_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {'extra': 7}) + assert m.kwonly_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {'extra': 6}) + assert m.kwonly_plus_more(2, k=3, extra=4) == (2, -1, 3, {'extra': 4}) + + with pytest.raises(TypeError) as excinfo: + assert m.kwonly_mixed(i=1) == (1,) + assert "incompatible function arguments" in str(excinfo.value) + + with pytest.raises(RuntimeError) as excinfo: + m.register_invalid_kwonly(m) + assert msg(excinfo.value) == """ + arg(): cannot specify an unnamed argument after an kwonly() annotation + """ + + +@pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count") +def test_args_refcount(): + """Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular + arguments""" + refcount = m.arg_refcount_h + + myval = 54321 + expected = refcount(myval) + assert m.arg_refcount_h(myval) == expected + assert m.arg_refcount_o(myval) == expected + 1 + assert m.arg_refcount_h(myval) == expected + assert refcount(myval) == expected + + assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval)) + assert refcount(myval) == expected + + assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {"a": 1, "b": myval}) + assert refcount(myval) == expected + + assert m.args_function(-1, myval) == (-1, myval) + assert refcount(myval) == expected + + assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval}) + assert refcount(myval) == expected + + assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \ + ((7, 8, myval), {"a": 1, "b": myval}) + assert refcount(myval) == expected + + exp3 = refcount(myval, myval, myval) + assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3) + assert refcount(myval) == expected + + # This function takes the first arg as a `py::object` and the rest as a `py::args`. Unlike the + # previous case, when we have both positional and `py::args` we need to construct a new tuple + # for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input + # tuple without having to inc_ref the individual elements, but here we can't, hence the extra + # refs. + assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3) + + assert m.class_default_argument() == "" diff --git a/diffvg/pybind11/tests/test_local_bindings.cpp b/diffvg/pybind11/tests/test_local_bindings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..97c02dbeb567c3699aa48f150bd8ec9dd3cd951f --- /dev/null +++ b/diffvg/pybind11/tests/test_local_bindings.cpp @@ -0,0 +1,101 @@ +/* + tests/test_local_bindings.cpp -- tests the py::module_local class feature which makes a class + binding local to the module in which it is defined. + + Copyright (c) 2017 Jason Rhinelander + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "local_bindings.h" +#include +#include +#include + +TEST_SUBMODULE(local_bindings, m) { + // test_load_external + m.def("load_external1", [](ExternalType1 &e) { return e.i; }); + m.def("load_external2", [](ExternalType2 &e) { return e.i; }); + + // test_local_bindings + // Register a class with py::module_local: + bind_local(m, "LocalType", py::module_local()) + .def("get3", [](LocalType &t) { return t.i + 3; }) + ; + + m.def("local_value", [](LocalType &l) { return l.i; }); + + // test_nonlocal_failure + // The main pybind11 test module is loaded first, so this registration will succeed (the second + // one, in pybind11_cross_module_tests.cpp, is designed to fail): + bind_local(m, "NonLocalType") + .def(py::init()) + .def("get", [](LocalType &i) { return i.i; }) + ; + + // test_duplicate_local + // py::module_local declarations should be visible across compilation units that get linked together; + // this tries to register a duplicate local. It depends on a definition in test_class.cpp and + // should raise a runtime error from the duplicate definition attempt. If test_class isn't + // available it *also* throws a runtime error (with "test_class not enabled" as value). + m.def("register_local_external", [m]() { + auto main = py::module::import("pybind11_tests"); + if (py::hasattr(main, "class_")) { + bind_local(m, "LocalExternal", py::module_local()); + } + else throw std::runtime_error("test_class not enabled"); + }); + + // test_stl_bind_local + // stl_bind.h binders defaults to py::module_local if the types are local or converting: + py::bind_vector(m, "LocalVec"); + py::bind_map(m, "LocalMap"); + // and global if the type (or one of the types, for the map) is global: + py::bind_vector(m, "NonLocalVec"); + py::bind_map(m, "NonLocalMap"); + + // test_stl_bind_global + // They can, however, be overridden to global using `py::module_local(false)`: + bind_local(m, "NonLocal2"); + py::bind_vector(m, "LocalVec2", py::module_local()); + py::bind_map(m, "NonLocalMap2", py::module_local(false)); + + // test_mixed_local_global + // We try this both with the global type registered first and vice versa (the order shouldn't + // matter). + m.def("register_mixed_global", [m]() { + bind_local(m, "MixedGlobalLocal", py::module_local(false)); + }); + m.def("register_mixed_local", [m]() { + bind_local(m, "MixedLocalGlobal", py::module_local()); + }); + m.def("get_mixed_gl", [](int i) { return MixedGlobalLocal(i); }); + m.def("get_mixed_lg", [](int i) { return MixedLocalGlobal(i); }); + + // test_internal_locals_differ + m.def("local_cpp_types_addr", []() { return (uintptr_t) &py::detail::registered_local_types_cpp(); }); + + // test_stl_caster_vs_stl_bind + m.def("load_vector_via_caster", [](std::vector v) { + return std::accumulate(v.begin(), v.end(), 0); + }); + + // test_cross_module_calls + m.def("return_self", [](LocalVec *v) { return v; }); + m.def("return_copy", [](const LocalVec &v) { return LocalVec(v); }); + + class Cat : public pets::Pet { public: Cat(std::string name) : Pet(name) {}; }; + py::class_(m, "Pet", py::module_local()) + .def("get_name", &pets::Pet::name); + // Binding for local extending class: + py::class_(m, "Cat") + .def(py::init()); + m.def("pet_name", [](pets::Pet &p) { return p.name(); }); + + py::class_(m, "MixGL").def(py::init()); + m.def("get_gl_value", [](MixGL &o) { return o.i + 10; }); + + py::class_(m, "MixGL2").def(py::init()); +} diff --git a/diffvg/pybind11/tests/test_local_bindings.py b/diffvg/pybind11/tests/test_local_bindings.py new file mode 100644 index 0000000000000000000000000000000000000000..5460727e1d7ad840f5f2817e9ffbb4e10920b583 --- /dev/null +++ b/diffvg/pybind11/tests/test_local_bindings.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import local_bindings as m + + +def test_load_external(): + """Load a `py::module_local` type that's only registered in an external module""" + import pybind11_cross_module_tests as cm + + assert m.load_external1(cm.ExternalType1(11)) == 11 + assert m.load_external2(cm.ExternalType2(22)) == 22 + + with pytest.raises(TypeError) as excinfo: + assert m.load_external2(cm.ExternalType1(21)) == 21 + assert "incompatible function arguments" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + assert m.load_external1(cm.ExternalType2(12)) == 12 + assert "incompatible function arguments" in str(excinfo.value) + + +def test_local_bindings(): + """Tests that duplicate `py::module_local` class bindings work across modules""" + + # Make sure we can load the second module with the conflicting (but local) definition: + import pybind11_cross_module_tests as cm + + i1 = m.LocalType(5) + assert i1.get() == 4 + assert i1.get3() == 8 + + i2 = cm.LocalType(10) + assert i2.get() == 11 + assert i2.get2() == 12 + + assert not hasattr(i1, 'get2') + assert not hasattr(i2, 'get3') + + # Loading within the local module + assert m.local_value(i1) == 5 + assert cm.local_value(i2) == 10 + + # Cross-module loading works as well (on failure, the type loader looks for + # external module-local converters): + assert m.local_value(i2) == 10 + assert cm.local_value(i1) == 5 + + +def test_nonlocal_failure(): + """Tests that attempting to register a non-local type in multiple modules fails""" + import pybind11_cross_module_tests as cm + + with pytest.raises(RuntimeError) as excinfo: + cm.register_nonlocal() + assert str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!' + + +def test_duplicate_local(): + """Tests expected failure when registering a class twice with py::local in the same module""" + with pytest.raises(RuntimeError) as excinfo: + m.register_local_external() + import pybind11_tests + assert str(excinfo.value) == ( + 'generic_type: type "LocalExternal" is already registered!' + if hasattr(pybind11_tests, 'class_') else 'test_class not enabled') + + +def test_stl_bind_local(): + import pybind11_cross_module_tests as cm + + v1, v2 = m.LocalVec(), cm.LocalVec() + v1.append(m.LocalType(1)) + v1.append(m.LocalType(2)) + v2.append(cm.LocalType(1)) + v2.append(cm.LocalType(2)) + + # Cross module value loading: + v1.append(cm.LocalType(3)) + v2.append(m.LocalType(3)) + + assert [i.get() for i in v1] == [0, 1, 2] + assert [i.get() for i in v2] == [2, 3, 4] + + v3, v4 = m.NonLocalVec(), cm.NonLocalVec2() + v3.append(m.NonLocalType(1)) + v3.append(m.NonLocalType(2)) + v4.append(m.NonLocal2(3)) + v4.append(m.NonLocal2(4)) + + assert [i.get() for i in v3] == [1, 2] + assert [i.get() for i in v4] == [13, 14] + + d1, d2 = m.LocalMap(), cm.LocalMap() + d1["a"] = v1[0] + d1["b"] = v1[1] + d2["c"] = v2[0] + d2["d"] = v2[1] + assert {i: d1[i].get() for i in d1} == {'a': 0, 'b': 1} + assert {i: d2[i].get() for i in d2} == {'c': 2, 'd': 3} + + +def test_stl_bind_global(): + import pybind11_cross_module_tests as cm + + with pytest.raises(RuntimeError) as excinfo: + cm.register_nonlocal_map() + assert str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!' + + with pytest.raises(RuntimeError) as excinfo: + cm.register_nonlocal_vec() + assert str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!' + + with pytest.raises(RuntimeError) as excinfo: + cm.register_nonlocal_map2() + assert str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!' + + +def test_mixed_local_global(): + """Local types take precedence over globally registered types: a module with a `module_local` + type can be registered even if the type is already registered globally. With the module, + casting will go to the local type; outside the module casting goes to the global type.""" + import pybind11_cross_module_tests as cm + m.register_mixed_global() + m.register_mixed_local() + + a = [] + a.append(m.MixedGlobalLocal(1)) + a.append(m.MixedLocalGlobal(2)) + a.append(m.get_mixed_gl(3)) + a.append(m.get_mixed_lg(4)) + + assert [x.get() for x in a] == [101, 1002, 103, 1004] + + cm.register_mixed_global_local() + cm.register_mixed_local_global() + a.append(m.MixedGlobalLocal(5)) + a.append(m.MixedLocalGlobal(6)) + a.append(cm.MixedGlobalLocal(7)) + a.append(cm.MixedLocalGlobal(8)) + a.append(m.get_mixed_gl(9)) + a.append(m.get_mixed_lg(10)) + a.append(cm.get_mixed_gl(11)) + a.append(cm.get_mixed_lg(12)) + + assert [x.get() for x in a] == \ + [101, 1002, 103, 1004, 105, 1006, 207, 2008, 109, 1010, 211, 2012] + + +def test_internal_locals_differ(): + """Makes sure the internal local type map differs across the two modules""" + import pybind11_cross_module_tests as cm + assert m.local_cpp_types_addr() != cm.local_cpp_types_addr() + + +@pytest.mark.xfail("env.PYPY") +def test_stl_caster_vs_stl_bind(msg): + """One module uses a generic vector caster from `` while the other + exports `std::vector` via `py:bind_vector` and `py::module_local`""" + import pybind11_cross_module_tests as cm + + v1 = cm.VectorInt([1, 2, 3]) + assert m.load_vector_via_caster(v1) == 6 + assert cm.load_vector_via_binding(v1) == 6 + + v2 = [1, 2, 3] + assert m.load_vector_via_caster(v2) == 6 + with pytest.raises(TypeError) as excinfo: + cm.load_vector_via_binding(v2) == 6 + assert msg(excinfo.value) == """ + load_vector_via_binding(): incompatible function arguments. The following argument types are supported: + 1. (arg0: pybind11_cross_module_tests.VectorInt) -> int + + Invoked with: [1, 2, 3] + """ # noqa: E501 line too long + + +def test_cross_module_calls(): + import pybind11_cross_module_tests as cm + + v1 = m.LocalVec() + v1.append(m.LocalType(1)) + v2 = cm.LocalVec() + v2.append(cm.LocalType(2)) + + # Returning the self pointer should get picked up as returning an existing + # instance (even when that instance is of a foreign, non-local type). + assert m.return_self(v1) is v1 + assert cm.return_self(v2) is v2 + assert m.return_self(v2) is v2 + assert cm.return_self(v1) is v1 + + assert m.LocalVec is not cm.LocalVec + # Returning a copy, on the other hand, always goes to the local type, + # regardless of where the source type came from. + assert type(m.return_copy(v1)) is m.LocalVec + assert type(m.return_copy(v2)) is m.LocalVec + assert type(cm.return_copy(v1)) is cm.LocalVec + assert type(cm.return_copy(v2)) is cm.LocalVec + + # Test the example given in the documentation (which also tests inheritance casting): + mycat = m.Cat("Fluffy") + mydog = cm.Dog("Rover") + assert mycat.get_name() == "Fluffy" + assert mydog.name() == "Rover" + assert m.Cat.__base__.__name__ == "Pet" + assert cm.Dog.__base__.__name__ == "Pet" + assert m.Cat.__base__ is not cm.Dog.__base__ + assert m.pet_name(mycat) == "Fluffy" + assert m.pet_name(mydog) == "Rover" + assert cm.pet_name(mycat) == "Fluffy" + assert cm.pet_name(mydog) == "Rover" + + assert m.MixGL is not cm.MixGL + a = m.MixGL(1) + b = cm.MixGL(2) + assert m.get_gl_value(a) == 11 + assert m.get_gl_value(b) == 12 + assert cm.get_gl_value(a) == 101 + assert cm.get_gl_value(b) == 102 + + c, d = m.MixGL2(3), cm.MixGL2(4) + with pytest.raises(TypeError) as excinfo: + m.get_gl_value(c) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.get_gl_value(d) + assert "incompatible function arguments" in str(excinfo.value) diff --git a/diffvg/pybind11/tests/test_methods_and_attributes.cpp b/diffvg/pybind11/tests/test_methods_and_attributes.cpp new file mode 100644 index 0000000000000000000000000000000000000000..11d4e7b3501a8bb37b829af6c4aa5d4a4e094f8e --- /dev/null +++ b/diffvg/pybind11/tests/test_methods_and_attributes.cpp @@ -0,0 +1,372 @@ +/* + tests/test_methods_and_attributes.cpp -- constructors, deconstructors, attribute access, + __str__, argument and return value conventions + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + +#if !defined(PYBIND11_OVERLOAD_CAST) +template +using overload_cast_ = pybind11::detail::overload_cast_impl; +#endif + +class ExampleMandA { +public: + ExampleMandA() { print_default_created(this); } + ExampleMandA(int value) : value(value) { print_created(this, value); } + ExampleMandA(const ExampleMandA &e) : value(e.value) { print_copy_created(this); } + ExampleMandA(std::string&&) {} + ExampleMandA(ExampleMandA &&e) : value(e.value) { print_move_created(this); } + ~ExampleMandA() { print_destroyed(this); } + + std::string toString() { + return "ExampleMandA[value=" + std::to_string(value) + "]"; + } + + void operator=(const ExampleMandA &e) { print_copy_assigned(this); value = e.value; } + void operator=(ExampleMandA &&e) { print_move_assigned(this); value = e.value; } + + void add1(ExampleMandA other) { value += other.value; } // passing by value + void add2(ExampleMandA &other) { value += other.value; } // passing by reference + void add3(const ExampleMandA &other) { value += other.value; } // passing by const reference + void add4(ExampleMandA *other) { value += other->value; } // passing by pointer + void add5(const ExampleMandA *other) { value += other->value; } // passing by const pointer + + void add6(int other) { value += other; } // passing by value + void add7(int &other) { value += other; } // passing by reference + void add8(const int &other) { value += other; } // passing by const reference + void add9(int *other) { value += *other; } // passing by pointer + void add10(const int *other) { value += *other; } // passing by const pointer + + void consume_str(std::string&&) {} + + ExampleMandA self1() { return *this; } // return by value + ExampleMandA &self2() { return *this; } // return by reference + const ExampleMandA &self3() { return *this; } // return by const reference + ExampleMandA *self4() { return this; } // return by pointer + const ExampleMandA *self5() { return this; } // return by const pointer + + int internal1() { return value; } // return by value + int &internal2() { return value; } // return by reference + const int &internal3() { return value; } // return by const reference + int *internal4() { return &value; } // return by pointer + const int *internal5() { return &value; } // return by const pointer + + py::str overloaded() { return "()"; } + py::str overloaded(int) { return "(int)"; } + py::str overloaded(int, float) { return "(int, float)"; } + py::str overloaded(float, int) { return "(float, int)"; } + py::str overloaded(int, int) { return "(int, int)"; } + py::str overloaded(float, float) { return "(float, float)"; } + py::str overloaded(int) const { return "(int) const"; } + py::str overloaded(int, float) const { return "(int, float) const"; } + py::str overloaded(float, int) const { return "(float, int) const"; } + py::str overloaded(int, int) const { return "(int, int) const"; } + py::str overloaded(float, float) const { return "(float, float) const"; } + + static py::str overloaded(float) { return "static float"; } + + int value = 0; +}; + +struct TestProperties { + int value = 1; + static int static_value; + + int get() const { return value; } + void set(int v) { value = v; } + + static int static_get() { return static_value; } + static void static_set(int v) { static_value = v; } +}; +int TestProperties::static_value = 1; + +struct TestPropertiesOverride : TestProperties { + int value = 99; + static int static_value; +}; +int TestPropertiesOverride::static_value = 99; + +struct TestPropRVP { + UserType v1{1}; + UserType v2{1}; + static UserType sv1; + static UserType sv2; + + const UserType &get1() const { return v1; } + const UserType &get2() const { return v2; } + UserType get_rvalue() const { return v2; } + void set1(int v) { v1.set(v); } + void set2(int v) { v2.set(v); } +}; +UserType TestPropRVP::sv1(1); +UserType TestPropRVP::sv2(1); + +// Test None-allowed py::arg argument policy +class NoneTester { public: int answer = 42; }; +int none1(const NoneTester &obj) { return obj.answer; } +int none2(NoneTester *obj) { return obj ? obj->answer : -1; } +int none3(std::shared_ptr &obj) { return obj ? obj->answer : -1; } +int none4(std::shared_ptr *obj) { return obj && *obj ? (*obj)->answer : -1; } +int none5(std::shared_ptr obj) { return obj ? obj->answer : -1; } + +struct StrIssue { + int val = -1; + + StrIssue() = default; + StrIssue(int i) : val{i} {} +}; + +// Issues #854, #910: incompatible function args when member function/pointer is in unregistered base class +class UnregisteredBase { +public: + void do_nothing() const {} + void increase_value() { rw_value++; ro_value += 0.25; } + void set_int(int v) { rw_value = v; } + int get_int() const { return rw_value; } + double get_double() const { return ro_value; } + int rw_value = 42; + double ro_value = 1.25; +}; +class RegisteredDerived : public UnregisteredBase { +public: + using UnregisteredBase::UnregisteredBase; + double sum() const { return rw_value + ro_value; } +}; + +// Test explicit lvalue ref-qualification +struct RefQualified { + int value = 0; + + void refQualified(int other) & { value += other; } + int constRefQualified(int other) const & { return value + other; } +}; + +TEST_SUBMODULE(methods_and_attributes, m) { + // test_methods_and_attributes + py::class_ emna(m, "ExampleMandA"); + emna.def(py::init<>()) + .def(py::init()) + .def(py::init()) + .def(py::init()) + .def("add1", &ExampleMandA::add1) + .def("add2", &ExampleMandA::add2) + .def("add3", &ExampleMandA::add3) + .def("add4", &ExampleMandA::add4) + .def("add5", &ExampleMandA::add5) + .def("add6", &ExampleMandA::add6) + .def("add7", &ExampleMandA::add7) + .def("add8", &ExampleMandA::add8) + .def("add9", &ExampleMandA::add9) + .def("add10", &ExampleMandA::add10) + .def("consume_str", &ExampleMandA::consume_str) + .def("self1", &ExampleMandA::self1) + .def("self2", &ExampleMandA::self2) + .def("self3", &ExampleMandA::self3) + .def("self4", &ExampleMandA::self4) + .def("self5", &ExampleMandA::self5) + .def("internal1", &ExampleMandA::internal1) + .def("internal2", &ExampleMandA::internal2) + .def("internal3", &ExampleMandA::internal3) + .def("internal4", &ExampleMandA::internal4) + .def("internal5", &ExampleMandA::internal5) +#if defined(PYBIND11_OVERLOAD_CAST) + .def("overloaded", py::overload_cast<>(&ExampleMandA::overloaded)) + .def("overloaded", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded_float", py::overload_cast(&ExampleMandA::overloaded)) + .def("overloaded_const", py::overload_cast(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", py::overload_cast(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", py::overload_cast(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", py::overload_cast(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", py::overload_cast(&ExampleMandA::overloaded, py::const_)) +#else + // Use both the traditional static_cast method and the C++11 compatible overload_cast_ + .def("overloaded", overload_cast_<>()(&ExampleMandA::overloaded)) + .def("overloaded", overload_cast_()(&ExampleMandA::overloaded)) + .def("overloaded", overload_cast_()(&ExampleMandA::overloaded)) + .def("overloaded", static_cast(&ExampleMandA::overloaded)) + .def("overloaded", static_cast(&ExampleMandA::overloaded)) + .def("overloaded", static_cast(&ExampleMandA::overloaded)) + .def("overloaded_float", overload_cast_()(&ExampleMandA::overloaded)) + .def("overloaded_const", overload_cast_()(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", overload_cast_()(&ExampleMandA::overloaded, py::const_)) + .def("overloaded_const", static_cast(&ExampleMandA::overloaded)) + .def("overloaded_const", static_cast(&ExampleMandA::overloaded)) + .def("overloaded_const", static_cast(&ExampleMandA::overloaded)) +#endif + // test_no_mixed_overloads + // Raise error if trying to mix static/non-static overloads on the same name: + .def_static("add_mixed_overloads1", []() { + auto emna = py::reinterpret_borrow>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA")); + emna.def ("overload_mixed1", static_cast(&ExampleMandA::overloaded)) + .def_static("overload_mixed1", static_cast(&ExampleMandA::overloaded)); + }) + .def_static("add_mixed_overloads2", []() { + auto emna = py::reinterpret_borrow>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA")); + emna.def_static("overload_mixed2", static_cast(&ExampleMandA::overloaded)) + .def ("overload_mixed2", static_cast(&ExampleMandA::overloaded)); + }) + .def("__str__", &ExampleMandA::toString) + .def_readwrite("value", &ExampleMandA::value); + + // test_copy_method + // Issue #443: can't call copied methods in Python 3 + emna.attr("add2b") = emna.attr("add2"); + + // test_properties, test_static_properties, test_static_cls + py::class_(m, "TestProperties") + .def(py::init<>()) + .def_readonly("def_readonly", &TestProperties::value) + .def_readwrite("def_readwrite", &TestProperties::value) + .def_property("def_writeonly", nullptr, + [](TestProperties& s,int v) { s.value = v; } ) + .def_property("def_property_writeonly", nullptr, &TestProperties::set) + .def_property_readonly("def_property_readonly", &TestProperties::get) + .def_property("def_property", &TestProperties::get, &TestProperties::set) + .def_property("def_property_impossible", nullptr, nullptr) + .def_readonly_static("def_readonly_static", &TestProperties::static_value) + .def_readwrite_static("def_readwrite_static", &TestProperties::static_value) + .def_property_static("def_writeonly_static", nullptr, + [](py::object, int v) { TestProperties::static_value = v; }) + .def_property_readonly_static("def_property_readonly_static", + [](py::object) { return TestProperties::static_get(); }) + .def_property_static("def_property_writeonly_static", nullptr, + [](py::object, int v) { return TestProperties::static_set(v); }) + .def_property_static("def_property_static", + [](py::object) { return TestProperties::static_get(); }, + [](py::object, int v) { TestProperties::static_set(v); }) + .def_property_static("static_cls", + [](py::object cls) { return cls; }, + [](py::object cls, py::function f) { f(cls); }); + + py::class_(m, "TestPropertiesOverride") + .def(py::init<>()) + .def_readonly("def_readonly", &TestPropertiesOverride::value) + .def_readonly_static("def_readonly_static", &TestPropertiesOverride::static_value); + + auto static_get1 = [](py::object) -> const UserType & { return TestPropRVP::sv1; }; + auto static_get2 = [](py::object) -> const UserType & { return TestPropRVP::sv2; }; + auto static_set1 = [](py::object, int v) { TestPropRVP::sv1.set(v); }; + auto static_set2 = [](py::object, int v) { TestPropRVP::sv2.set(v); }; + auto rvp_copy = py::return_value_policy::copy; + + // test_property_return_value_policies + py::class_(m, "TestPropRVP") + .def(py::init<>()) + .def_property_readonly("ro_ref", &TestPropRVP::get1) + .def_property_readonly("ro_copy", &TestPropRVP::get2, rvp_copy) + .def_property_readonly("ro_func", py::cpp_function(&TestPropRVP::get2, rvp_copy)) + .def_property("rw_ref", &TestPropRVP::get1, &TestPropRVP::set1) + .def_property("rw_copy", &TestPropRVP::get2, &TestPropRVP::set2, rvp_copy) + .def_property("rw_func", py::cpp_function(&TestPropRVP::get2, rvp_copy), &TestPropRVP::set2) + .def_property_readonly_static("static_ro_ref", static_get1) + .def_property_readonly_static("static_ro_copy", static_get2, rvp_copy) + .def_property_readonly_static("static_ro_func", py::cpp_function(static_get2, rvp_copy)) + .def_property_static("static_rw_ref", static_get1, static_set1) + .def_property_static("static_rw_copy", static_get2, static_set2, rvp_copy) + .def_property_static("static_rw_func", py::cpp_function(static_get2, rvp_copy), static_set2) + // test_property_rvalue_policy + .def_property_readonly("rvalue", &TestPropRVP::get_rvalue) + .def_property_readonly_static("static_rvalue", [](py::object) { return UserType(1); }); + + // test_metaclass_override + struct MetaclassOverride { }; + py::class_(m, "MetaclassOverride", py::metaclass((PyObject *) &PyType_Type)) + .def_property_readonly_static("readonly", [](py::object) { return 1; }); + +#if !defined(PYPY_VERSION) + // test_dynamic_attributes + class DynamicClass { + public: + DynamicClass() { print_default_created(this); } + DynamicClass(const DynamicClass&) = delete; + ~DynamicClass() { print_destroyed(this); } + }; + py::class_(m, "DynamicClass", py::dynamic_attr()) + .def(py::init()); + + class CppDerivedDynamicClass : public DynamicClass { }; + py::class_(m, "CppDerivedDynamicClass") + .def(py::init()); +#endif + + // test_bad_arg_default + // Issue/PR #648: bad arg default debugging output +#if !defined(NDEBUG) + m.attr("debug_enabled") = true; +#else + m.attr("debug_enabled") = false; +#endif + m.def("bad_arg_def_named", []{ + auto m = py::module::import("pybind11_tests"); + m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg("a") = UnregisteredType()); + }); + m.def("bad_arg_def_unnamed", []{ + auto m = py::module::import("pybind11_tests"); + m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg() = UnregisteredType()); + }); + + // test_accepts_none + py::class_>(m, "NoneTester") + .def(py::init<>()); + m.def("no_none1", &none1, py::arg().none(false)); + m.def("no_none2", &none2, py::arg().none(false)); + m.def("no_none3", &none3, py::arg().none(false)); + m.def("no_none4", &none4, py::arg().none(false)); + m.def("no_none5", &none5, py::arg().none(false)); + m.def("ok_none1", &none1); + m.def("ok_none2", &none2, py::arg().none(true)); + m.def("ok_none3", &none3); + m.def("ok_none4", &none4, py::arg().none(true)); + m.def("ok_none5", &none5); + + // test_str_issue + // Issue #283: __str__ called on uninitialized instance when constructor arguments invalid + py::class_(m, "StrIssue") + .def(py::init()) + .def(py::init<>()) + .def("__str__", [](const StrIssue &si) { + return "StrIssue[" + std::to_string(si.val) + "]"; } + ); + + // test_unregistered_base_implementations + // + // Issues #854/910: incompatible function args when member function/pointer is in unregistered + // base class The methods and member pointers below actually resolve to members/pointers in + // UnregisteredBase; before this test/fix they would be registered via lambda with a first + // argument of an unregistered type, and thus uncallable. + py::class_(m, "RegisteredDerived") + .def(py::init<>()) + .def("do_nothing", &RegisteredDerived::do_nothing) + .def("increase_value", &RegisteredDerived::increase_value) + .def_readwrite("rw_value", &RegisteredDerived::rw_value) + .def_readonly("ro_value", &RegisteredDerived::ro_value) + // These should trigger a static_assert if uncommented + //.def_readwrite("fails", &UserType::value) // should trigger a static_assert if uncommented + //.def_readonly("fails", &UserType::value) // should trigger a static_assert if uncommented + .def_property("rw_value_prop", &RegisteredDerived::get_int, &RegisteredDerived::set_int) + .def_property_readonly("ro_value_prop", &RegisteredDerived::get_double) + // This one is in the registered class: + .def("sum", &RegisteredDerived::sum) + ; + + using Adapted = decltype(py::method_adaptor(&RegisteredDerived::do_nothing)); + static_assert(std::is_same::value, ""); + + // test_methods_and_attributes + py::class_(m, "RefQualified") + .def(py::init<>()) + .def_readonly("value", &RefQualified::value) + .def("refQualified", &RefQualified::refQualified) + .def("constRefQualified", &RefQualified::constRefQualified); +} diff --git a/diffvg/pybind11/tests/test_methods_and_attributes.py b/diffvg/pybind11/tests/test_methods_and_attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..c296b6868d64f75085493e6def6d319860851b44 --- /dev/null +++ b/diffvg/pybind11/tests/test_methods_and_attributes.py @@ -0,0 +1,440 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import methods_and_attributes as m +from pybind11_tests import ConstructorStats + + +def test_methods_and_attributes(): + instance1 = m.ExampleMandA() + instance2 = m.ExampleMandA(32) + + instance1.add1(instance2) + instance1.add2(instance2) + instance1.add3(instance2) + instance1.add4(instance2) + instance1.add5(instance2) + instance1.add6(32) + instance1.add7(32) + instance1.add8(32) + instance1.add9(32) + instance1.add10(32) + + assert str(instance1) == "ExampleMandA[value=320]" + assert str(instance2) == "ExampleMandA[value=32]" + assert str(instance1.self1()) == "ExampleMandA[value=320]" + assert str(instance1.self2()) == "ExampleMandA[value=320]" + assert str(instance1.self3()) == "ExampleMandA[value=320]" + assert str(instance1.self4()) == "ExampleMandA[value=320]" + assert str(instance1.self5()) == "ExampleMandA[value=320]" + + assert instance1.internal1() == 320 + assert instance1.internal2() == 320 + assert instance1.internal3() == 320 + assert instance1.internal4() == 320 + assert instance1.internal5() == 320 + + assert instance1.overloaded() == "()" + assert instance1.overloaded(0) == "(int)" + assert instance1.overloaded(1, 1.0) == "(int, float)" + assert instance1.overloaded(2.0, 2) == "(float, int)" + assert instance1.overloaded(3, 3) == "(int, int)" + assert instance1.overloaded(4., 4.) == "(float, float)" + assert instance1.overloaded_const(-3) == "(int) const" + assert instance1.overloaded_const(5, 5.0) == "(int, float) const" + assert instance1.overloaded_const(6.0, 6) == "(float, int) const" + assert instance1.overloaded_const(7, 7) == "(int, int) const" + assert instance1.overloaded_const(8., 8.) == "(float, float) const" + assert instance1.overloaded_float(1, 1) == "(float, float)" + assert instance1.overloaded_float(1, 1.) == "(float, float)" + assert instance1.overloaded_float(1., 1) == "(float, float)" + assert instance1.overloaded_float(1., 1.) == "(float, float)" + + assert instance1.value == 320 + instance1.value = 100 + assert str(instance1) == "ExampleMandA[value=100]" + + cstats = ConstructorStats.get(m.ExampleMandA) + assert cstats.alive() == 2 + del instance1, instance2 + assert cstats.alive() == 0 + assert cstats.values() == ["32"] + assert cstats.default_constructions == 1 + assert cstats.copy_constructions == 2 + assert cstats.move_constructions >= 2 + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + +def test_copy_method(): + """Issue #443: calling copied methods fails in Python 3""" + + m.ExampleMandA.add2c = m.ExampleMandA.add2 + m.ExampleMandA.add2d = m.ExampleMandA.add2b + a = m.ExampleMandA(123) + assert a.value == 123 + a.add2(m.ExampleMandA(-100)) + assert a.value == 23 + a.add2b(m.ExampleMandA(20)) + assert a.value == 43 + a.add2c(m.ExampleMandA(6)) + assert a.value == 49 + a.add2d(m.ExampleMandA(-7)) + assert a.value == 42 + + +def test_properties(): + instance = m.TestProperties() + + assert instance.def_readonly == 1 + with pytest.raises(AttributeError): + instance.def_readonly = 2 + + instance.def_readwrite = 2 + assert instance.def_readwrite == 2 + + assert instance.def_property_readonly == 2 + with pytest.raises(AttributeError): + instance.def_property_readonly = 3 + + instance.def_property = 3 + assert instance.def_property == 3 + + with pytest.raises(AttributeError) as excinfo: + dummy = instance.def_property_writeonly # noqa: F841 unused var + assert "unreadable attribute" in str(excinfo.value) + + instance.def_property_writeonly = 4 + assert instance.def_property_readonly == 4 + + with pytest.raises(AttributeError) as excinfo: + dummy = instance.def_property_impossible # noqa: F841 unused var + assert "unreadable attribute" in str(excinfo.value) + + with pytest.raises(AttributeError) as excinfo: + instance.def_property_impossible = 5 + assert "can't set attribute" in str(excinfo.value) + + +def test_static_properties(): + assert m.TestProperties.def_readonly_static == 1 + with pytest.raises(AttributeError) as excinfo: + m.TestProperties.def_readonly_static = 2 + assert "can't set attribute" in str(excinfo.value) + + m.TestProperties.def_readwrite_static = 2 + assert m.TestProperties.def_readwrite_static == 2 + + with pytest.raises(AttributeError) as excinfo: + dummy = m.TestProperties.def_writeonly_static # noqa: F841 unused var + assert "unreadable attribute" in str(excinfo.value) + + m.TestProperties.def_writeonly_static = 3 + assert m.TestProperties.def_readonly_static == 3 + + assert m.TestProperties.def_property_readonly_static == 3 + with pytest.raises(AttributeError) as excinfo: + m.TestProperties.def_property_readonly_static = 99 + assert "can't set attribute" in str(excinfo.value) + + m.TestProperties.def_property_static = 4 + assert m.TestProperties.def_property_static == 4 + + with pytest.raises(AttributeError) as excinfo: + dummy = m.TestProperties.def_property_writeonly_static + assert "unreadable attribute" in str(excinfo.value) + + m.TestProperties.def_property_writeonly_static = 5 + assert m.TestProperties.def_property_static == 5 + + # Static property read and write via instance + instance = m.TestProperties() + + m.TestProperties.def_readwrite_static = 0 + assert m.TestProperties.def_readwrite_static == 0 + assert instance.def_readwrite_static == 0 + + instance.def_readwrite_static = 2 + assert m.TestProperties.def_readwrite_static == 2 + assert instance.def_readwrite_static == 2 + + with pytest.raises(AttributeError) as excinfo: + dummy = instance.def_property_writeonly_static # noqa: F841 unused var + assert "unreadable attribute" in str(excinfo.value) + + instance.def_property_writeonly_static = 4 + assert instance.def_property_static == 4 + + # It should be possible to override properties in derived classes + assert m.TestPropertiesOverride().def_readonly == 99 + assert m.TestPropertiesOverride.def_readonly_static == 99 + + +def test_static_cls(): + """Static property getter and setters expect the type object as the their only argument""" + + instance = m.TestProperties() + assert m.TestProperties.static_cls is m.TestProperties + assert instance.static_cls is m.TestProperties + + def check_self(self): + assert self is m.TestProperties + + m.TestProperties.static_cls = check_self + instance.static_cls = check_self + + +def test_metaclass_override(): + """Overriding pybind11's default metaclass changes the behavior of `static_property`""" + + assert type(m.ExampleMandA).__name__ == "pybind11_type" + assert type(m.MetaclassOverride).__name__ == "type" + + assert m.MetaclassOverride.readonly == 1 + assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property" + + # Regular `type` replaces the property instead of calling `__set__()` + m.MetaclassOverride.readonly = 2 + assert m.MetaclassOverride.readonly == 2 + assert isinstance(m.MetaclassOverride.__dict__["readonly"], int) + + +def test_no_mixed_overloads(): + from pybind11_tests import debug_enabled + + with pytest.raises(RuntimeError) as excinfo: + m.ExampleMandA.add_mixed_overloads1() + assert (str(excinfo.value) == + "overloading a method with both static and instance methods is not supported; " + + ("compile in debug mode for more details" if not debug_enabled else + "error while attempting to bind static method ExampleMandA.overload_mixed1" + "(arg0: float) -> str") + ) + + with pytest.raises(RuntimeError) as excinfo: + m.ExampleMandA.add_mixed_overloads2() + assert (str(excinfo.value) == + "overloading a method with both static and instance methods is not supported; " + + ("compile in debug mode for more details" if not debug_enabled else + "error while attempting to bind instance method ExampleMandA.overload_mixed2" + "(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)" + " -> str") + ) + + +@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"]) +def test_property_return_value_policies(access): + if not access.startswith("static"): + obj = m.TestPropRVP() + else: + obj = m.TestPropRVP + + ref = getattr(obj, access + "_ref") + assert ref.value == 1 + ref.value = 2 + assert getattr(obj, access + "_ref").value == 2 + ref.value = 1 # restore original value for static properties + + copy = getattr(obj, access + "_copy") + assert copy.value == 1 + copy.value = 2 + assert getattr(obj, access + "_copy").value == 1 + + copy = getattr(obj, access + "_func") + assert copy.value == 1 + copy.value = 2 + assert getattr(obj, access + "_func").value == 1 + + +def test_property_rvalue_policy(): + """When returning an rvalue, the return value policy is automatically changed from + `reference(_internal)` to `move`. The following would not work otherwise.""" + + instance = m.TestPropRVP() + o = instance.rvalue + assert o.value == 1 + + os = m.TestPropRVP.static_rvalue + assert os.value == 1 + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2447 +@pytest.mark.xfail("env.PYPY") +def test_dynamic_attributes(): + instance = m.DynamicClass() + assert not hasattr(instance, "foo") + assert "foo" not in dir(instance) + + # Dynamically add attribute + instance.foo = 42 + assert hasattr(instance, "foo") + assert instance.foo == 42 + assert "foo" in dir(instance) + + # __dict__ should be accessible and replaceable + assert "foo" in instance.__dict__ + instance.__dict__ = {"bar": True} + assert not hasattr(instance, "foo") + assert hasattr(instance, "bar") + + with pytest.raises(TypeError) as excinfo: + instance.__dict__ = [] + assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'" + + cstats = ConstructorStats.get(m.DynamicClass) + assert cstats.alive() == 1 + del instance + assert cstats.alive() == 0 + + # Derived classes should work as well + class PythonDerivedDynamicClass(m.DynamicClass): + pass + + for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass: + derived = cls() + derived.foobar = 100 + assert derived.foobar == 100 + + assert cstats.alive() == 1 + del derived + assert cstats.alive() == 0 + + +# https://foss.heptapod.net/pypy/pypy/-/issues/2447 +@pytest.mark.xfail("env.PYPY") +def test_cyclic_gc(): + # One object references itself + instance = m.DynamicClass() + instance.circular_reference = instance + + cstats = ConstructorStats.get(m.DynamicClass) + assert cstats.alive() == 1 + del instance + assert cstats.alive() == 0 + + # Two object reference each other + i1 = m.DynamicClass() + i2 = m.DynamicClass() + i1.cycle = i2 + i2.cycle = i1 + + assert cstats.alive() == 2 + del i1, i2 + assert cstats.alive() == 0 + + +def test_bad_arg_default(msg): + from pybind11_tests import debug_enabled + + with pytest.raises(RuntimeError) as excinfo: + m.bad_arg_def_named() + assert msg(excinfo.value) == ( + "arg(): could not convert default argument 'a: UnregisteredType' in function " + "'should_fail' into a Python object (type not registered yet?)" + if debug_enabled else + "arg(): could not convert default argument into a Python object (type not registered " + "yet?). Compile in debug mode for more information." + ) + + with pytest.raises(RuntimeError) as excinfo: + m.bad_arg_def_unnamed() + assert msg(excinfo.value) == ( + "arg(): could not convert default argument 'UnregisteredType' in function " + "'should_fail' into a Python object (type not registered yet?)" + if debug_enabled else + "arg(): could not convert default argument into a Python object (type not registered " + "yet?). Compile in debug mode for more information." + ) + + +def test_accepts_none(msg): + a = m.NoneTester() + assert m.no_none1(a) == 42 + assert m.no_none2(a) == 42 + assert m.no_none3(a) == 42 + assert m.no_none4(a) == 42 + assert m.no_none5(a) == 42 + assert m.ok_none1(a) == 42 + assert m.ok_none2(a) == 42 + assert m.ok_none3(a) == 42 + assert m.ok_none4(a) == 42 + assert m.ok_none5(a) == 42 + + with pytest.raises(TypeError) as excinfo: + m.no_none1(None) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.no_none2(None) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.no_none3(None) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.no_none4(None) + assert "incompatible function arguments" in str(excinfo.value) + with pytest.raises(TypeError) as excinfo: + m.no_none5(None) + assert "incompatible function arguments" in str(excinfo.value) + + # The first one still raises because you can't pass None as a lvalue reference arg: + with pytest.raises(TypeError) as excinfo: + assert m.ok_none1(None) == -1 + assert msg(excinfo.value) == """ + ok_none1(): incompatible function arguments. The following argument types are supported: + 1. (arg0: m.methods_and_attributes.NoneTester) -> int + + Invoked with: None + """ + + # The rest take the argument as pointer or holder, and accept None: + assert m.ok_none2(None) == -1 + assert m.ok_none3(None) == -1 + assert m.ok_none4(None) == -1 + assert m.ok_none5(None) == -1 + + +def test_str_issue(msg): + """#283: __str__ called on uninitialized instance when constructor arguments invalid""" + + assert str(m.StrIssue(3)) == "StrIssue[3]" + + with pytest.raises(TypeError) as excinfo: + str(m.StrIssue("no", "such", "constructor")) + assert msg(excinfo.value) == """ + __init__(): incompatible constructor arguments. The following argument types are supported: + 1. m.methods_and_attributes.StrIssue(arg0: int) + 2. m.methods_and_attributes.StrIssue() + + Invoked with: 'no', 'such', 'constructor' + """ + + +def test_unregistered_base_implementations(): + a = m.RegisteredDerived() + a.do_nothing() + assert a.rw_value == 42 + assert a.ro_value == 1.25 + a.rw_value += 5 + assert a.sum() == 48.25 + a.increase_value() + assert a.rw_value == 48 + assert a.ro_value == 1.5 + assert a.sum() == 49.5 + assert a.rw_value_prop == 48 + a.rw_value_prop += 1 + assert a.rw_value_prop == 49 + a.increase_value() + assert a.ro_value_prop == 1.75 + + +def test_ref_qualified(): + """Tests that explicit lvalue ref-qualified methods can be called just like their + non ref-qualified counterparts.""" + + r = m.RefQualified() + assert r.value == 0 + r.refQualified(17) + assert r.value == 17 + assert r.constRefQualified(23) == 40 diff --git a/diffvg/pybind11/tests/test_modules.cpp b/diffvg/pybind11/tests/test_modules.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1475fa62357b9b2f2b31b844b2479557665f152 --- /dev/null +++ b/diffvg/pybind11/tests/test_modules.cpp @@ -0,0 +1,98 @@ +/* + tests/test_modules.cpp -- nested modules, importing modules, and + internal references + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + +TEST_SUBMODULE(modules, m) { + // test_nested_modules + py::module m_sub = m.def_submodule("subsubmodule"); + m_sub.def("submodule_func", []() { return "submodule_func()"; }); + + // test_reference_internal + class A { + public: + A(int v) : v(v) { print_created(this, v); } + ~A() { print_destroyed(this); } + A(const A&) { print_copy_created(this); } + A& operator=(const A ©) { print_copy_assigned(this); v = copy.v; return *this; } + std::string toString() { return "A[" + std::to_string(v) + "]"; } + private: + int v; + }; + py::class_(m_sub, "A") + .def(py::init()) + .def("__repr__", &A::toString); + + class B { + public: + B() { print_default_created(this); } + ~B() { print_destroyed(this); } + B(const B&) { print_copy_created(this); } + B& operator=(const B ©) { print_copy_assigned(this); a1 = copy.a1; a2 = copy.a2; return *this; } + A &get_a1() { return a1; } + A &get_a2() { return a2; } + + A a1{1}; + A a2{2}; + }; + py::class_(m_sub, "B") + .def(py::init<>()) + .def("get_a1", &B::get_a1, "Return the internal A 1", py::return_value_policy::reference_internal) + .def("get_a2", &B::get_a2, "Return the internal A 2", py::return_value_policy::reference_internal) + .def_readwrite("a1", &B::a1) // def_readonly uses an internal reference return policy by default + .def_readwrite("a2", &B::a2); + + m.attr("OD") = py::module::import("collections").attr("OrderedDict"); + + // test_duplicate_registration + // Registering two things with the same name + m.def("duplicate_registration", []() { + class Dupe1 { }; + class Dupe2 { }; + class Dupe3 { }; + class DupeException { }; + + auto dm = py::module("dummy"); + auto failures = py::list(); + + py::class_(dm, "Dupe1"); + py::class_(dm, "Dupe2"); + dm.def("dupe1_factory", []() { return Dupe1(); }); + py::exception(dm, "DupeException"); + + try { + py::class_(dm, "Dupe1"); + failures.append("Dupe1 class"); + } catch (std::runtime_error &) {} + try { + dm.def("Dupe1", []() { return Dupe1(); }); + failures.append("Dupe1 function"); + } catch (std::runtime_error &) {} + try { + py::class_(dm, "dupe1_factory"); + failures.append("dupe1_factory"); + } catch (std::runtime_error &) {} + try { + py::exception(dm, "Dupe2"); + failures.append("Dupe2"); + } catch (std::runtime_error &) {} + try { + dm.def("DupeException", []() { return 30; }); + failures.append("DupeException1"); + } catch (std::runtime_error &) {} + try { + py::class_(dm, "DupeException"); + failures.append("DupeException2"); + } catch (std::runtime_error &) {} + + return failures; + }); +} diff --git a/diffvg/pybind11/tests/test_modules.py b/diffvg/pybind11/tests/test_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..7e2100524506b13a5d3189a3fabb9dead628c2a5 --- /dev/null +++ b/diffvg/pybind11/tests/test_modules.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +from pybind11_tests import modules as m +from pybind11_tests.modules import subsubmodule as ms +from pybind11_tests import ConstructorStats + + +def test_nested_modules(): + import pybind11_tests + assert pybind11_tests.__name__ == "pybind11_tests" + assert pybind11_tests.modules.__name__ == "pybind11_tests.modules" + assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule" + assert m.__name__ == "pybind11_tests.modules" + assert ms.__name__ == "pybind11_tests.modules.subsubmodule" + + assert ms.submodule_func() == "submodule_func()" + + +def test_reference_internal(): + b = ms.B() + assert str(b.get_a1()) == "A[1]" + assert str(b.a1) == "A[1]" + assert str(b.get_a2()) == "A[2]" + assert str(b.a2) == "A[2]" + + b.a1 = ms.A(42) + b.a2 = ms.A(43) + assert str(b.get_a1()) == "A[42]" + assert str(b.a1) == "A[42]" + assert str(b.get_a2()) == "A[43]" + assert str(b.a2) == "A[43]" + + astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B) + assert astats.alive() == 2 + assert bstats.alive() == 1 + del b + assert astats.alive() == 0 + assert bstats.alive() == 0 + assert astats.values() == ['1', '2', '42', '43'] + assert bstats.values() == [] + assert astats.default_constructions == 0 + assert bstats.default_constructions == 1 + assert astats.copy_constructions == 0 + assert bstats.copy_constructions == 0 + # assert astats.move_constructions >= 0 # Don't invoke any + # assert bstats.move_constructions >= 0 # Don't invoke any + assert astats.copy_assignments == 2 + assert bstats.copy_assignments == 0 + assert astats.move_assignments == 0 + assert bstats.move_assignments == 0 + + +def test_importing(): + from pybind11_tests.modules import OD + from collections import OrderedDict + + assert OD is OrderedDict + assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])" + + +def test_pydoc(): + """Pydoc needs to be able to provide help() for everything inside a pybind11 module""" + import pybind11_tests + import pydoc + + assert pybind11_tests.__name__ == "pybind11_tests" + assert pybind11_tests.__doc__ == "pybind11 test module" + assert pydoc.text.docmodule(pybind11_tests) + + +def test_duplicate_registration(): + """Registering two things with the same name""" + + assert m.duplicate_registration() == [] diff --git a/diffvg/pybind11/tests/test_multiple_inheritance.cpp b/diffvg/pybind11/tests/test_multiple_inheritance.cpp new file mode 100644 index 0000000000000000000000000000000000000000..70e34178540d210770fc862b5520a3b3c9d91a5c --- /dev/null +++ b/diffvg/pybind11/tests/test_multiple_inheritance.cpp @@ -0,0 +1,220 @@ +/* + tests/test_multiple_inheritance.cpp -- multiple inheritance, + implicit MI casts + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" + +// Many bases for testing that multiple inheritance from many classes (i.e. requiring extra +// space for holder constructed flags) works. +template struct BaseN { + BaseN(int i) : i(i) { } + int i; +}; + +// test_mi_static_properties +struct Vanilla { + std::string vanilla() { return "Vanilla"; }; +}; +struct WithStatic1 { + static std::string static_func1() { return "WithStatic1"; }; + static int static_value1; +}; +struct WithStatic2 { + static std::string static_func2() { return "WithStatic2"; }; + static int static_value2; +}; +struct VanillaStaticMix1 : Vanilla, WithStatic1, WithStatic2 { + static std::string static_func() { return "VanillaStaticMix1"; } + static int static_value; +}; +struct VanillaStaticMix2 : WithStatic1, Vanilla, WithStatic2 { + static std::string static_func() { return "VanillaStaticMix2"; } + static int static_value; +}; +int WithStatic1::static_value1 = 1; +int WithStatic2::static_value2 = 2; +int VanillaStaticMix1::static_value = 12; +int VanillaStaticMix2::static_value = 12; + +TEST_SUBMODULE(multiple_inheritance, m) { + + // test_multiple_inheritance_mix1 + // test_multiple_inheritance_mix2 + struct Base1 { + Base1(int i) : i(i) { } + int foo() { return i; } + int i; + }; + py::class_ b1(m, "Base1"); + b1.def(py::init()) + .def("foo", &Base1::foo); + + struct Base2 { + Base2(int i) : i(i) { } + int bar() { return i; } + int i; + }; + py::class_ b2(m, "Base2"); + b2.def(py::init()) + .def("bar", &Base2::bar); + + + // test_multiple_inheritance_cpp + struct Base12 : Base1, Base2 { + Base12(int i, int j) : Base1(i), Base2(j) { } + }; + struct MIType : Base12 { + MIType(int i, int j) : Base12(i, j) { } + }; + py::class_(m, "Base12"); + py::class_(m, "MIType") + .def(py::init()); + + + // test_multiple_inheritance_python_many_bases + #define PYBIND11_BASEN(N) py::class_>(m, "BaseN" #N).def(py::init()).def("f" #N, [](BaseN &b) { return b.i + N; }) + PYBIND11_BASEN( 1); PYBIND11_BASEN( 2); PYBIND11_BASEN( 3); PYBIND11_BASEN( 4); + PYBIND11_BASEN( 5); PYBIND11_BASEN( 6); PYBIND11_BASEN( 7); PYBIND11_BASEN( 8); + PYBIND11_BASEN( 9); PYBIND11_BASEN(10); PYBIND11_BASEN(11); PYBIND11_BASEN(12); + PYBIND11_BASEN(13); PYBIND11_BASEN(14); PYBIND11_BASEN(15); PYBIND11_BASEN(16); + PYBIND11_BASEN(17); + + // Uncommenting this should result in a compile time failure (MI can only be specified via + // template parameters because pybind has to know the types involved; see discussion in #742 for + // details). +// struct Base12v2 : Base1, Base2 { +// Base12v2(int i, int j) : Base1(i), Base2(j) { } +// }; +// py::class_(m, "Base12v2", b1, b2) +// .def(py::init()); + + + // test_multiple_inheritance_virtbase + // Test the case where not all base classes are specified, and where pybind11 requires the + // py::multiple_inheritance flag to perform proper casting between types. + struct Base1a { + Base1a(int i) : i(i) { } + int foo() { return i; } + int i; + }; + py::class_>(m, "Base1a") + .def(py::init()) + .def("foo", &Base1a::foo); + + struct Base2a { + Base2a(int i) : i(i) { } + int bar() { return i; } + int i; + }; + py::class_>(m, "Base2a") + .def(py::init()) + .def("bar", &Base2a::bar); + + struct Base12a : Base1a, Base2a { + Base12a(int i, int j) : Base1a(i), Base2a(j) { } + }; + py::class_>(m, "Base12a", py::multiple_inheritance()) + .def(py::init()); + + m.def("bar_base2a", [](Base2a *b) { return b->bar(); }); + m.def("bar_base2a_sharedptr", [](std::shared_ptr b) { return b->bar(); }); + + // test_mi_unaligned_base + // test_mi_base_return + // Issue #801: invalid casting to derived type with MI bases + struct I801B1 { int a = 1; I801B1() = default; I801B1(const I801B1 &) = default; virtual ~I801B1() = default; }; + struct I801B2 { int b = 2; I801B2() = default; I801B2(const I801B2 &) = default; virtual ~I801B2() = default; }; + struct I801C : I801B1, I801B2 {}; + struct I801D : I801C {}; // Indirect MI + // Unregistered classes: + struct I801B3 { int c = 3; virtual ~I801B3() = default; }; + struct I801E : I801B3, I801D {}; + + py::class_>(m, "I801B1").def(py::init<>()).def_readonly("a", &I801B1::a); + py::class_>(m, "I801B2").def(py::init<>()).def_readonly("b", &I801B2::b); + py::class_>(m, "I801C").def(py::init<>()); + py::class_>(m, "I801D").def(py::init<>()); + + // Two separate issues here: first, we want to recognize a pointer to a base type as being a + // known instance even when the pointer value is unequal (i.e. due to a non-first + // multiple-inheritance base class): + m.def("i801b1_c", [](I801C *c) { return static_cast(c); }); + m.def("i801b2_c", [](I801C *c) { return static_cast(c); }); + m.def("i801b1_d", [](I801D *d) { return static_cast(d); }); + m.def("i801b2_d", [](I801D *d) { return static_cast(d); }); + + // Second, when returned a base class pointer to a derived instance, we cannot assume that the + // pointer is `reinterpret_cast`able to the derived pointer because, like above, the base class + // pointer could be offset. + m.def("i801c_b1", []() -> I801B1 * { return new I801C(); }); + m.def("i801c_b2", []() -> I801B2 * { return new I801C(); }); + m.def("i801d_b1", []() -> I801B1 * { return new I801D(); }); + m.def("i801d_b2", []() -> I801B2 * { return new I801D(); }); + + // Return a base class pointer to a pybind-registered type when the actual derived type + // isn't pybind-registered (and uses multiple-inheritance to offset the pybind base) + m.def("i801e_c", []() -> I801C * { return new I801E(); }); + m.def("i801e_b2", []() -> I801B2 * { return new I801E(); }); + + + // test_mi_static_properties + py::class_(m, "Vanilla") + .def(py::init<>()) + .def("vanilla", &Vanilla::vanilla); + + py::class_(m, "WithStatic1") + .def(py::init<>()) + .def_static("static_func1", &WithStatic1::static_func1) + .def_readwrite_static("static_value1", &WithStatic1::static_value1); + + py::class_(m, "WithStatic2") + .def(py::init<>()) + .def_static("static_func2", &WithStatic2::static_func2) + .def_readwrite_static("static_value2", &WithStatic2::static_value2); + + py::class_( + m, "VanillaStaticMix1") + .def(py::init<>()) + .def_static("static_func", &VanillaStaticMix1::static_func) + .def_readwrite_static("static_value", &VanillaStaticMix1::static_value); + + py::class_( + m, "VanillaStaticMix2") + .def(py::init<>()) + .def_static("static_func", &VanillaStaticMix2::static_func) + .def_readwrite_static("static_value", &VanillaStaticMix2::static_value); + + +#if !(defined(PYPY_VERSION) && (PYPY_VERSION_NUM < 0x06000000)) + struct WithDict { }; + struct VanillaDictMix1 : Vanilla, WithDict { }; + struct VanillaDictMix2 : WithDict, Vanilla { }; + py::class_(m, "WithDict", py::dynamic_attr()).def(py::init<>()); + py::class_(m, "VanillaDictMix1").def(py::init<>()); + py::class_(m, "VanillaDictMix2").def(py::init<>()); +#endif + + // test_diamond_inheritance + // Issue #959: segfault when constructing diamond inheritance instance + // All of these have int members so that there will be various unequal pointers involved. + struct B { int b; B() = default; B(const B&) = default; virtual ~B() = default; }; + struct C0 : public virtual B { int c0; }; + struct C1 : public virtual B { int c1; }; + struct D : public C0, public C1 { int d; }; + py::class_(m, "B") + .def("b", [](B *self) { return self; }); + py::class_(m, "C0") + .def("c0", [](C0 *self) { return self; }); + py::class_(m, "C1") + .def("c1", [](C1 *self) { return self; }); + py::class_(m, "D") + .def(py::init<>()); +} diff --git a/diffvg/pybind11/tests/test_multiple_inheritance.py b/diffvg/pybind11/tests/test_multiple_inheritance.py new file mode 100644 index 0000000000000000000000000000000000000000..7a0259d2148f14aafeac67a43d3c906a0b5719d0 --- /dev/null +++ b/diffvg/pybind11/tests/test_multiple_inheritance.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import ConstructorStats +from pybind11_tests import multiple_inheritance as m + + +def test_multiple_inheritance_cpp(): + mt = m.MIType(3, 4) + + assert mt.foo() == 3 + assert mt.bar() == 4 + + +@pytest.mark.skipif("env.PYPY and env.PY2") +@pytest.mark.xfail("env.PYPY and not env.PY2") +def test_multiple_inheritance_mix1(): + class Base1: + def __init__(self, i): + self.i = i + + def foo(self): + return self.i + + class MITypePy(Base1, m.Base2): + def __init__(self, i, j): + Base1.__init__(self, i) + m.Base2.__init__(self, j) + + mt = MITypePy(3, 4) + + assert mt.foo() == 3 + assert mt.bar() == 4 + + +def test_multiple_inheritance_mix2(): + class Base2: + def __init__(self, i): + self.i = i + + def bar(self): + return self.i + + class MITypePy(m.Base1, Base2): + def __init__(self, i, j): + m.Base1.__init__(self, i) + Base2.__init__(self, j) + + mt = MITypePy(3, 4) + + assert mt.foo() == 3 + assert mt.bar() == 4 + + +@pytest.mark.skipif("env.PYPY and env.PY2") +@pytest.mark.xfail("env.PYPY and not env.PY2") +def test_multiple_inheritance_python(): + + class MI1(m.Base1, m.Base2): + def __init__(self, i, j): + m.Base1.__init__(self, i) + m.Base2.__init__(self, j) + + class B1(object): + def v(self): + return 1 + + class MI2(B1, m.Base1, m.Base2): + def __init__(self, i, j): + B1.__init__(self) + m.Base1.__init__(self, i) + m.Base2.__init__(self, j) + + class MI3(MI2): + def __init__(self, i, j): + MI2.__init__(self, i, j) + + class MI4(MI3, m.Base2): + def __init__(self, i, j): + MI3.__init__(self, i, j) + # This should be ignored (Base2 is already initialized via MI2): + m.Base2.__init__(self, i + 100) + + class MI5(m.Base2, B1, m.Base1): + def __init__(self, i, j): + B1.__init__(self) + m.Base1.__init__(self, i) + m.Base2.__init__(self, j) + + class MI6(m.Base2, B1): + def __init__(self, i): + m.Base2.__init__(self, i) + B1.__init__(self) + + class B2(B1): + def v(self): + return 2 + + class B3(object): + def v(self): + return 3 + + class B4(B3, B2): + def v(self): + return 4 + + class MI7(B4, MI6): + def __init__(self, i): + B4.__init__(self) + MI6.__init__(self, i) + + class MI8(MI6, B3): + def __init__(self, i): + MI6.__init__(self, i) + B3.__init__(self) + + class MI8b(B3, MI6): + def __init__(self, i): + B3.__init__(self) + MI6.__init__(self, i) + + mi1 = MI1(1, 2) + assert mi1.foo() == 1 + assert mi1.bar() == 2 + + mi2 = MI2(3, 4) + assert mi2.v() == 1 + assert mi2.foo() == 3 + assert mi2.bar() == 4 + + mi3 = MI3(5, 6) + assert mi3.v() == 1 + assert mi3.foo() == 5 + assert mi3.bar() == 6 + + mi4 = MI4(7, 8) + assert mi4.v() == 1 + assert mi4.foo() == 7 + assert mi4.bar() == 8 + + mi5 = MI5(10, 11) + assert mi5.v() == 1 + assert mi5.foo() == 10 + assert mi5.bar() == 11 + + mi6 = MI6(12) + assert mi6.v() == 1 + assert mi6.bar() == 12 + + mi7 = MI7(13) + assert mi7.v() == 4 + assert mi7.bar() == 13 + + mi8 = MI8(14) + assert mi8.v() == 1 + assert mi8.bar() == 14 + + mi8b = MI8b(15) + assert mi8b.v() == 3 + assert mi8b.bar() == 15 + + +def test_multiple_inheritance_python_many_bases(): + + class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4): + def __init__(self): + m.BaseN1.__init__(self, 1) + m.BaseN2.__init__(self, 2) + m.BaseN3.__init__(self, 3) + m.BaseN4.__init__(self, 4) + + class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8): + def __init__(self): + m.BaseN5.__init__(self, 5) + m.BaseN6.__init__(self, 6) + m.BaseN7.__init__(self, 7) + m.BaseN8.__init__(self, 8) + + class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15, + m.BaseN16): + def __init__(self): + m.BaseN9.__init__(self, 9) + m.BaseN10.__init__(self, 10) + m.BaseN11.__init__(self, 11) + m.BaseN12.__init__(self, 12) + m.BaseN13.__init__(self, 13) + m.BaseN14.__init__(self, 14) + m.BaseN15.__init__(self, 15) + m.BaseN16.__init__(self, 16) + + class MIMany19(MIMany14, MIMany58, m.BaseN9): + def __init__(self): + MIMany14.__init__(self) + MIMany58.__init__(self) + m.BaseN9.__init__(self, 9) + + class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17): + def __init__(self): + MIMany14.__init__(self) + MIMany58.__init__(self) + MIMany916.__init__(self) + m.BaseN17.__init__(self, 17) + + # Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch: + a = MIMany14() + for i in range(1, 4): + assert getattr(a, "f" + str(i))() == 2 * i + + # Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch: + b = MIMany916() + for i in range(9, 16): + assert getattr(b, "f" + str(i))() == 2 * i + + # Inherits from 9: requires >= 2 pointers worth of holder flags + c = MIMany19() + for i in range(1, 9): + assert getattr(c, "f" + str(i))() == 2 * i + + # Inherits from 17: requires >= 3 pointers worth of holder flags + d = MIMany117() + for i in range(1, 17): + assert getattr(d, "f" + str(i))() == 2 * i + + +def test_multiple_inheritance_virtbase(): + + class MITypePy(m.Base12a): + def __init__(self, i, j): + m.Base12a.__init__(self, i, j) + + mt = MITypePy(3, 4) + assert mt.bar() == 4 + assert m.bar_base2a(mt) == 4 + assert m.bar_base2a_sharedptr(mt) == 4 + + +def test_mi_static_properties(): + """Mixing bases with and without static properties should be possible + and the result should be independent of base definition order""" + + for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()): + assert d.vanilla() == "Vanilla" + assert d.static_func1() == "WithStatic1" + assert d.static_func2() == "WithStatic2" + assert d.static_func() == d.__class__.__name__ + + m.WithStatic1.static_value1 = 1 + m.WithStatic2.static_value2 = 2 + assert d.static_value1 == 1 + assert d.static_value2 == 2 + assert d.static_value == 12 + + d.static_value1 = 0 + assert d.static_value1 == 0 + d.static_value2 = 0 + assert d.static_value2 == 0 + d.static_value = 0 + assert d.static_value == 0 + + +# Requires PyPy 6+ +def test_mi_dynamic_attributes(): + """Mixing bases with and without dynamic attribute support""" + + for d in (m.VanillaDictMix1(), m.VanillaDictMix2()): + d.dynamic = 1 + assert d.dynamic == 1 + + +def test_mi_unaligned_base(): + """Returning an offset (non-first MI) base class pointer should recognize the instance""" + + n_inst = ConstructorStats.detail_reg_inst() + + c = m.I801C() + d = m.I801D() + # + 4 below because we have the two instances, and each instance has offset base I801B2 + assert ConstructorStats.detail_reg_inst() == n_inst + 4 + b1c = m.i801b1_c(c) + assert b1c is c + b2c = m.i801b2_c(c) + assert b2c is c + b1d = m.i801b1_d(d) + assert b1d is d + b2d = m.i801b2_d(d) + assert b2d is d + + assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances + del c, b1c, b2c + assert ConstructorStats.detail_reg_inst() == n_inst + 2 + del d, b1d, b2d + assert ConstructorStats.detail_reg_inst() == n_inst + + +def test_mi_base_return(): + """Tests returning an offset (non-first MI) base class pointer to a derived instance""" + + n_inst = ConstructorStats.detail_reg_inst() + + c1 = m.i801c_b1() + assert type(c1) is m.I801C + assert c1.a == 1 + assert c1.b == 2 + + d1 = m.i801d_b1() + assert type(d1) is m.I801D + assert d1.a == 1 + assert d1.b == 2 + + assert ConstructorStats.detail_reg_inst() == n_inst + 4 + + c2 = m.i801c_b2() + assert type(c2) is m.I801C + assert c2.a == 1 + assert c2.b == 2 + + d2 = m.i801d_b2() + assert type(d2) is m.I801D + assert d2.a == 1 + assert d2.b == 2 + + assert ConstructorStats.detail_reg_inst() == n_inst + 8 + + del c2 + assert ConstructorStats.detail_reg_inst() == n_inst + 6 + del c1, d1, d2 + assert ConstructorStats.detail_reg_inst() == n_inst + + # Returning an unregistered derived type with a registered base; we won't + # pick up the derived type, obviously, but should still work (as an object + # of whatever type was returned). + e1 = m.i801e_c() + assert type(e1) is m.I801C + assert e1.a == 1 + assert e1.b == 2 + + e2 = m.i801e_b2() + assert type(e2) is m.I801B2 + assert e2.b == 2 + + +def test_diamond_inheritance(): + """Tests that diamond inheritance works as expected (issue #959)""" + + # Issue #959: this shouldn't segfault: + d = m.D() + + # Make sure all the various distinct pointers are all recognized as registered instances: + assert d is d.c0() + assert d is d.c1() + assert d is d.b() + assert d is d.c0().b() + assert d is d.c1().b() + assert d is d.c0().c1().b().c0().b() diff --git a/diffvg/pybind11/tests/test_numpy_array.cpp b/diffvg/pybind11/tests/test_numpy_array.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e37beb5a5c22661f39bd1651d41dac594d3ac2ba --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_array.cpp @@ -0,0 +1,388 @@ +/* + tests/test_numpy_array.cpp -- test core array functionality + + Copyright (c) 2016 Ivan Smirnov + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +#include +#include + +#include + +// Size / dtype checks. +struct DtypeCheck { + py::dtype numpy{}; + py::dtype pybind11{}; +}; + +template +DtypeCheck get_dtype_check(const char* name) { + py::module np = py::module::import("numpy"); + DtypeCheck check{}; + check.numpy = np.attr("dtype")(np.attr(name)); + check.pybind11 = py::dtype::of(); + return check; +} + +std::vector get_concrete_dtype_checks() { + return { + // Normalization + get_dtype_check("int8"), + get_dtype_check("uint8"), + get_dtype_check("int16"), + get_dtype_check("uint16"), + get_dtype_check("int32"), + get_dtype_check("uint32"), + get_dtype_check("int64"), + get_dtype_check("uint64") + }; +} + +struct DtypeSizeCheck { + std::string name{}; + int size_cpp{}; + int size_numpy{}; + // For debugging. + py::dtype dtype{}; +}; + +template +DtypeSizeCheck get_dtype_size_check() { + DtypeSizeCheck check{}; + check.name = py::type_id(); + check.size_cpp = sizeof(T); + check.dtype = py::dtype::of(); + check.size_numpy = check.dtype.attr("itemsize").template cast(); + return check; +} + +std::vector get_platform_dtype_size_checks() { + return { + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + get_dtype_size_check(), + }; +} + +// Arrays. +using arr = py::array; +using arr_t = py::array_t; +static_assert(std::is_same::value, ""); + +template arr data(const arr& a, Ix... index) { + return arr(a.nbytes() - a.offset_at(index...), (const uint8_t *) a.data(index...)); +} + +template arr data_t(const arr_t& a, Ix... index) { + return arr(a.size() - a.index_at(index...), a.data(index...)); +} + +template arr& mutate_data(arr& a, Ix... index) { + auto ptr = (uint8_t *) a.mutable_data(index...); + for (ssize_t i = 0; i < a.nbytes() - a.offset_at(index...); i++) + ptr[i] = (uint8_t) (ptr[i] * 2); + return a; +} + +template arr_t& mutate_data_t(arr_t& a, Ix... index) { + auto ptr = a.mutable_data(index...); + for (ssize_t i = 0; i < a.size() - a.index_at(index...); i++) + ptr[i]++; + return a; +} + +template ssize_t index_at(const arr& a, Ix... idx) { return a.index_at(idx...); } +template ssize_t index_at_t(const arr_t& a, Ix... idx) { return a.index_at(idx...); } +template ssize_t offset_at(const arr& a, Ix... idx) { return a.offset_at(idx...); } +template ssize_t offset_at_t(const arr_t& a, Ix... idx) { return a.offset_at(idx...); } +template ssize_t at_t(const arr_t& a, Ix... idx) { return a.at(idx...); } +template arr_t& mutate_at_t(arr_t& a, Ix... idx) { a.mutable_at(idx...)++; return a; } + +#define def_index_fn(name, type) \ + sm.def(#name, [](type a) { return name(a); }); \ + sm.def(#name, [](type a, int i) { return name(a, i); }); \ + sm.def(#name, [](type a, int i, int j) { return name(a, i, j); }); \ + sm.def(#name, [](type a, int i, int j, int k) { return name(a, i, j, k); }); + +template py::handle auxiliaries(T &&r, T2 &&r2) { + if (r.ndim() != 2) throw std::domain_error("error: ndim != 2"); + py::list l; + l.append(*r.data(0, 0)); + l.append(*r2.mutable_data(0, 0)); + l.append(r.data(0, 1) == r2.mutable_data(0, 1)); + l.append(r.ndim()); + l.append(r.itemsize()); + l.append(r.shape(0)); + l.append(r.shape(1)); + l.append(r.size()); + l.append(r.nbytes()); + return l.release(); +} + +// note: declaration at local scope would create a dangling reference! +static int data_i = 42; + +TEST_SUBMODULE(numpy_array, sm) { + try { py::module::import("numpy"); } + catch (...) { return; } + + // test_dtypes + py::class_(sm, "DtypeCheck") + .def_readonly("numpy", &DtypeCheck::numpy) + .def_readonly("pybind11", &DtypeCheck::pybind11) + .def("__repr__", [](const DtypeCheck& self) { + return py::str("").format( + self.numpy, self.pybind11); + }); + sm.def("get_concrete_dtype_checks", &get_concrete_dtype_checks); + + py::class_(sm, "DtypeSizeCheck") + .def_readonly("name", &DtypeSizeCheck::name) + .def_readonly("size_cpp", &DtypeSizeCheck::size_cpp) + .def_readonly("size_numpy", &DtypeSizeCheck::size_numpy) + .def("__repr__", [](const DtypeSizeCheck& self) { + return py::str("").format( + self.name, self.size_cpp, self.size_numpy, self.dtype); + }); + sm.def("get_platform_dtype_size_checks", &get_platform_dtype_size_checks); + + // test_array_attributes + sm.def("ndim", [](const arr& a) { return a.ndim(); }); + sm.def("shape", [](const arr& a) { return arr(a.ndim(), a.shape()); }); + sm.def("shape", [](const arr& a, ssize_t dim) { return a.shape(dim); }); + sm.def("strides", [](const arr& a) { return arr(a.ndim(), a.strides()); }); + sm.def("strides", [](const arr& a, ssize_t dim) { return a.strides(dim); }); + sm.def("writeable", [](const arr& a) { return a.writeable(); }); + sm.def("size", [](const arr& a) { return a.size(); }); + sm.def("itemsize", [](const arr& a) { return a.itemsize(); }); + sm.def("nbytes", [](const arr& a) { return a.nbytes(); }); + sm.def("owndata", [](const arr& a) { return a.owndata(); }); + + // test_index_offset + def_index_fn(index_at, const arr&); + def_index_fn(index_at_t, const arr_t&); + def_index_fn(offset_at, const arr&); + def_index_fn(offset_at_t, const arr_t&); + // test_data + def_index_fn(data, const arr&); + def_index_fn(data_t, const arr_t&); + // test_mutate_data, test_mutate_readonly + def_index_fn(mutate_data, arr&); + def_index_fn(mutate_data_t, arr_t&); + def_index_fn(at_t, const arr_t&); + def_index_fn(mutate_at_t, arr_t&); + + // test_make_c_f_array + sm.def("make_f_array", [] { return py::array_t({ 2, 2 }, { 4, 8 }); }); + sm.def("make_c_array", [] { return py::array_t({ 2, 2 }, { 8, 4 }); }); + + // test_empty_shaped_array + sm.def("make_empty_shaped_array", [] { return py::array(py::dtype("f"), {}, {}); }); + // test numpy scalars (empty shape, ndim==0) + sm.def("scalar_int", []() { return py::array(py::dtype("i"), {}, {}, &data_i); }); + + // test_wrap + sm.def("wrap", [](py::array a) { + return py::array( + a.dtype(), + {a.shape(), a.shape() + a.ndim()}, + {a.strides(), a.strides() + a.ndim()}, + a.data(), + a + ); + }); + + // test_numpy_view + struct ArrayClass { + int data[2] = { 1, 2 }; + ArrayClass() { py::print("ArrayClass()"); } + ~ArrayClass() { py::print("~ArrayClass()"); } + }; + py::class_(sm, "ArrayClass") + .def(py::init<>()) + .def("numpy_view", [](py::object &obj) { + py::print("ArrayClass::numpy_view()"); + ArrayClass &a = obj.cast(); + return py::array_t({2}, {4}, a.data, obj); + } + ); + + // test_cast_numpy_int64_to_uint64 + sm.def("function_taking_uint64", [](uint64_t) { }); + + // test_isinstance + sm.def("isinstance_untyped", [](py::object yes, py::object no) { + return py::isinstance(yes) && !py::isinstance(no); + }); + sm.def("isinstance_typed", [](py::object o) { + return py::isinstance>(o) && !py::isinstance>(o); + }); + + // test_constructors + sm.def("default_constructors", []() { + return py::dict( + "array"_a=py::array(), + "array_t"_a=py::array_t(), + "array_t"_a=py::array_t() + ); + }); + sm.def("converting_constructors", [](py::object o) { + return py::dict( + "array"_a=py::array(o), + "array_t"_a=py::array_t(o), + "array_t"_a=py::array_t(o) + ); + }); + + // test_overload_resolution + sm.def("overloaded", [](py::array_t) { return "double"; }); + sm.def("overloaded", [](py::array_t) { return "float"; }); + sm.def("overloaded", [](py::array_t) { return "int"; }); + sm.def("overloaded", [](py::array_t) { return "unsigned short"; }); + sm.def("overloaded", [](py::array_t) { return "long long"; }); + sm.def("overloaded", [](py::array_t>) { return "double complex"; }); + sm.def("overloaded", [](py::array_t>) { return "float complex"; }); + + sm.def("overloaded2", [](py::array_t>) { return "double complex"; }); + sm.def("overloaded2", [](py::array_t) { return "double"; }); + sm.def("overloaded2", [](py::array_t>) { return "float complex"; }); + sm.def("overloaded2", [](py::array_t) { return "float"; }); + + // Only accept the exact types: + sm.def("overloaded3", [](py::array_t) { return "int"; }, py::arg().noconvert()); + sm.def("overloaded3", [](py::array_t) { return "double"; }, py::arg().noconvert()); + + // Make sure we don't do unsafe coercion (e.g. float to int) when not using forcecast, but + // rather that float gets converted via the safe (conversion to double) overload: + sm.def("overloaded4", [](py::array_t) { return "long long"; }); + sm.def("overloaded4", [](py::array_t) { return "double"; }); + + // But we do allow conversion to int if forcecast is enabled (but only if no overload matches + // without conversion) + sm.def("overloaded5", [](py::array_t) { return "unsigned int"; }); + sm.def("overloaded5", [](py::array_t) { return "double"; }); + + // test_greedy_string_overload + // Issue 685: ndarray shouldn't go to std::string overload + sm.def("issue685", [](std::string) { return "string"; }); + sm.def("issue685", [](py::array) { return "array"; }); + sm.def("issue685", [](py::object) { return "other"; }); + + // test_array_unchecked_fixed_dims + sm.def("proxy_add2", [](py::array_t a, double v) { + auto r = a.mutable_unchecked<2>(); + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + r(i, j) += v; + }, py::arg().noconvert(), py::arg()); + + sm.def("proxy_init3", [](double start) { + py::array_t a({ 3, 3, 3 }); + auto r = a.mutable_unchecked<3>(); + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + for (ssize_t k = 0; k < r.shape(2); k++) + r(i, j, k) = start++; + return a; + }); + sm.def("proxy_init3F", [](double start) { + py::array_t a({ 3, 3, 3 }); + auto r = a.mutable_unchecked<3>(); + for (ssize_t k = 0; k < r.shape(2); k++) + for (ssize_t j = 0; j < r.shape(1); j++) + for (ssize_t i = 0; i < r.shape(0); i++) + r(i, j, k) = start++; + return a; + }); + sm.def("proxy_squared_L2_norm", [](py::array_t a) { + auto r = a.unchecked<1>(); + double sumsq = 0; + for (ssize_t i = 0; i < r.shape(0); i++) + sumsq += r[i] * r(i); // Either notation works for a 1D array + return sumsq; + }); + + sm.def("proxy_auxiliaries2", [](py::array_t a) { + auto r = a.unchecked<2>(); + auto r2 = a.mutable_unchecked<2>(); + return auxiliaries(r, r2); + }); + + // test_array_unchecked_dyn_dims + // Same as the above, but without a compile-time dimensions specification: + sm.def("proxy_add2_dyn", [](py::array_t a, double v) { + auto r = a.mutable_unchecked(); + if (r.ndim() != 2) throw std::domain_error("error: ndim != 2"); + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + r(i, j) += v; + }, py::arg().noconvert(), py::arg()); + sm.def("proxy_init3_dyn", [](double start) { + py::array_t a({ 3, 3, 3 }); + auto r = a.mutable_unchecked(); + if (r.ndim() != 3) throw std::domain_error("error: ndim != 3"); + for (ssize_t i = 0; i < r.shape(0); i++) + for (ssize_t j = 0; j < r.shape(1); j++) + for (ssize_t k = 0; k < r.shape(2); k++) + r(i, j, k) = start++; + return a; + }); + sm.def("proxy_auxiliaries2_dyn", [](py::array_t a) { + return auxiliaries(a.unchecked(), a.mutable_unchecked()); + }); + + sm.def("array_auxiliaries2", [](py::array_t a) { + return auxiliaries(a, a); + }); + + // test_array_failures + // Issue #785: Uninformative "Unknown internal error" exception when constructing array from empty object: + sm.def("array_fail_test", []() { return py::array(py::object()); }); + sm.def("array_t_fail_test", []() { return py::array_t(py::object()); }); + // Make sure the error from numpy is being passed through: + sm.def("array_fail_test_negative_size", []() { int c = 0; return py::array(-1, &c); }); + + // test_initializer_list + // Issue (unnumbered; reported in #788): regression: initializer lists can be ambiguous + sm.def("array_initializer_list1", []() { return py::array_t(1); }); // { 1 } also works, but clang warns about it + sm.def("array_initializer_list2", []() { return py::array_t({ 1, 2 }); }); + sm.def("array_initializer_list3", []() { return py::array_t({ 1, 2, 3 }); }); + sm.def("array_initializer_list4", []() { return py::array_t({ 1, 2, 3, 4 }); }); + + // test_array_resize + // reshape array to 2D without changing size + sm.def("array_reshape2", [](py::array_t a) { + const ssize_t dim_sz = (ssize_t)std::sqrt(a.size()); + if (dim_sz * dim_sz != a.size()) + throw std::domain_error("array_reshape2: input array total size is not a squared integer"); + a.resize({dim_sz, dim_sz}); + }); + + // resize to 3D array with each dimension = N + sm.def("array_resize3", [](py::array_t a, size_t N, bool refcheck) { + a.resize({N, N, N}, refcheck); + }); + + // test_array_create_and_resize + // return 2D array with Nrows = Ncols = N + sm.def("create_and_resize", [](size_t N) { + py::array_t a; + a.resize({N, N}); + std::fill(a.mutable_data(), a.mutable_data() + a.size(), 42.); + return a; + }); + + sm.def("index_using_ellipsis", [](py::array a) { + return a[py::make_tuple(0, py::ellipsis(), 0)]; + }); +} diff --git a/diffvg/pybind11/tests/test_numpy_array.py b/diffvg/pybind11/tests/test_numpy_array.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3ca58c1af53e2b65ffed341a0512ebb5c20815 --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_array.py @@ -0,0 +1,446 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import numpy_array as m + +np = pytest.importorskip("numpy") + + +def test_dtypes(): + # See issue #1328. + # - Platform-dependent sizes. + for size_check in m.get_platform_dtype_size_checks(): + print(size_check) + assert size_check.size_cpp == size_check.size_numpy, size_check + # - Concrete sizes. + for check in m.get_concrete_dtype_checks(): + print(check) + assert check.numpy == check.pybind11, check + if check.numpy.num != check.pybind11.num: + print("NOTE: typenum mismatch for {}: {} != {}".format( + check, check.numpy.num, check.pybind11.num)) + + +@pytest.fixture(scope='function') +def arr(): + return np.array([[1, 2, 3], [4, 5, 6]], '=u2') + + +def test_array_attributes(): + a = np.array(0, 'f8') + assert m.ndim(a) == 0 + assert all(m.shape(a) == []) + assert all(m.strides(a) == []) + with pytest.raises(IndexError) as excinfo: + m.shape(a, 0) + assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)' + with pytest.raises(IndexError) as excinfo: + m.strides(a, 0) + assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)' + assert m.writeable(a) + assert m.size(a) == 1 + assert m.itemsize(a) == 8 + assert m.nbytes(a) == 8 + assert m.owndata(a) + + a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view() + a.flags.writeable = False + assert m.ndim(a) == 2 + assert all(m.shape(a) == [2, 3]) + assert m.shape(a, 0) == 2 + assert m.shape(a, 1) == 3 + assert all(m.strides(a) == [6, 2]) + assert m.strides(a, 0) == 6 + assert m.strides(a, 1) == 2 + with pytest.raises(IndexError) as excinfo: + m.shape(a, 2) + assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)' + with pytest.raises(IndexError) as excinfo: + m.strides(a, 2) + assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)' + assert not m.writeable(a) + assert m.size(a) == 6 + assert m.itemsize(a) == 2 + assert m.nbytes(a) == 12 + assert not m.owndata(a) + + +@pytest.mark.parametrize('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)]) +def test_index_offset(arr, args, ret): + assert m.index_at(arr, *args) == ret + assert m.index_at_t(arr, *args) == ret + assert m.offset_at(arr, *args) == ret * arr.dtype.itemsize + assert m.offset_at_t(arr, *args) == ret * arr.dtype.itemsize + + +def test_dim_check_fail(arr): + for func in (m.index_at, m.index_at_t, m.offset_at, m.offset_at_t, m.data, m.data_t, + m.mutate_data, m.mutate_data_t): + with pytest.raises(IndexError) as excinfo: + func(arr, 1, 2, 3) + assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)' + + +@pytest.mark.parametrize('args, ret', + [([], [1, 2, 3, 4, 5, 6]), + ([1], [4, 5, 6]), + ([0, 1], [2, 3, 4, 5, 6]), + ([1, 2], [6])]) +def test_data(arr, args, ret): + from sys import byteorder + assert all(m.data_t(arr, *args) == ret) + assert all(m.data(arr, *args)[(0 if byteorder == 'little' else 1)::2] == ret) + assert all(m.data(arr, *args)[(1 if byteorder == 'little' else 0)::2] == 0) + + +@pytest.mark.parametrize('dim', [0, 1, 3]) +def test_at_fail(arr, dim): + for func in m.at_t, m.mutate_at_t: + with pytest.raises(IndexError) as excinfo: + func(arr, *([0] * dim)) + assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim) + + +def test_at(arr): + assert m.at_t(arr, 0, 2) == 3 + assert m.at_t(arr, 1, 0) == 4 + + assert all(m.mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6]) + assert all(m.mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6]) + + +def test_mutate_readonly(arr): + arr.flags.writeable = False + for func, args in (m.mutate_data, ()), (m.mutate_data_t, ()), (m.mutate_at_t, (0, 0)): + with pytest.raises(ValueError) as excinfo: + func(arr, *args) + assert str(excinfo.value) == 'array is not writeable' + + +def test_mutate_data(arr): + assert all(m.mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12]) + assert all(m.mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24]) + assert all(m.mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48]) + assert all(m.mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96]) + assert all(m.mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192]) + + assert all(m.mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193]) + assert all(m.mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194]) + assert all(m.mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195]) + assert all(m.mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196]) + assert all(m.mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197]) + + +def test_bounds_check(arr): + for func in (m.index_at, m.index_at_t, m.data, m.data_t, + m.mutate_data, m.mutate_data_t, m.at_t, m.mutate_at_t): + with pytest.raises(IndexError) as excinfo: + func(arr, 2, 0) + assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2' + with pytest.raises(IndexError) as excinfo: + func(arr, 0, 4) + assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3' + + +def test_make_c_f_array(): + assert m.make_c_array().flags.c_contiguous + assert not m.make_c_array().flags.f_contiguous + assert m.make_f_array().flags.f_contiguous + assert not m.make_f_array().flags.c_contiguous + + +def test_make_empty_shaped_array(): + m.make_empty_shaped_array() + + # empty shape means numpy scalar, PEP 3118 + assert m.scalar_int().ndim == 0 + assert m.scalar_int().shape == () + assert m.scalar_int() == 42 + + +def test_wrap(): + def assert_references(a, b, base=None): + from distutils.version import LooseVersion + if base is None: + base = a + assert a is not b + assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0] + assert a.shape == b.shape + assert a.strides == b.strides + assert a.flags.c_contiguous == b.flags.c_contiguous + assert a.flags.f_contiguous == b.flags.f_contiguous + assert a.flags.writeable == b.flags.writeable + assert a.flags.aligned == b.flags.aligned + if LooseVersion(np.__version__) >= LooseVersion("1.14.0"): + assert a.flags.writebackifcopy == b.flags.writebackifcopy + else: + assert a.flags.updateifcopy == b.flags.updateifcopy + assert np.all(a == b) + assert not b.flags.owndata + assert b.base is base + if a.flags.writeable and a.ndim == 2: + a[0, 0] = 1234 + assert b[0, 0] == 1234 + + a1 = np.array([1, 2], dtype=np.int16) + assert a1.flags.owndata and a1.base is None + a2 = m.wrap(a1) + assert_references(a1, a2) + + a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F') + assert a1.flags.owndata and a1.base is None + a2 = m.wrap(a1) + assert_references(a1, a2) + + a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C') + a1.flags.writeable = False + a2 = m.wrap(a1) + assert_references(a1, a2) + + a1 = np.random.random((4, 4, 4)) + a2 = m.wrap(a1) + assert_references(a1, a2) + + a1t = a1.transpose() + a2 = m.wrap(a1t) + assert_references(a1t, a2, a1) + + a1d = a1.diagonal() + a2 = m.wrap(a1d) + assert_references(a1d, a2, a1) + + a1m = a1[::-1, ::-1, ::-1] + a2 = m.wrap(a1m) + assert_references(a1m, a2, a1) + + +def test_numpy_view(capture): + with capture: + ac = m.ArrayClass() + ac_view_1 = ac.numpy_view() + ac_view_2 = ac.numpy_view() + assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32)) + del ac + pytest.gc_collect() + assert capture == """ + ArrayClass() + ArrayClass::numpy_view() + ArrayClass::numpy_view() + """ + ac_view_1[0] = 4 + ac_view_1[1] = 3 + assert ac_view_2[0] == 4 + assert ac_view_2[1] == 3 + with capture: + del ac_view_1 + del ac_view_2 + pytest.gc_collect() + pytest.gc_collect() + assert capture == """ + ~ArrayClass() + """ + + +def test_cast_numpy_int64_to_uint64(): + m.function_taking_uint64(123) + m.function_taking_uint64(np.uint64(123)) + + +def test_isinstance(): + assert m.isinstance_untyped(np.array([1, 2, 3]), "not an array") + assert m.isinstance_typed(np.array([1.0, 2.0, 3.0])) + + +def test_constructors(): + defaults = m.default_constructors() + for a in defaults.values(): + assert a.size == 0 + assert defaults["array"].dtype == np.array([]).dtype + assert defaults["array_t"].dtype == np.int32 + assert defaults["array_t"].dtype == np.float64 + + results = m.converting_constructors([1, 2, 3]) + for a in results.values(): + np.testing.assert_array_equal(a, [1, 2, 3]) + assert results["array"].dtype == np.int_ + assert results["array_t"].dtype == np.int32 + assert results["array_t"].dtype == np.float64 + + +def test_overload_resolution(msg): + # Exact overload matches: + assert m.overloaded(np.array([1], dtype='float64')) == 'double' + assert m.overloaded(np.array([1], dtype='float32')) == 'float' + assert m.overloaded(np.array([1], dtype='ushort')) == 'unsigned short' + assert m.overloaded(np.array([1], dtype='intc')) == 'int' + assert m.overloaded(np.array([1], dtype='longlong')) == 'long long' + assert m.overloaded(np.array([1], dtype='complex')) == 'double complex' + assert m.overloaded(np.array([1], dtype='csingle')) == 'float complex' + + # No exact match, should call first convertible version: + assert m.overloaded(np.array([1], dtype='uint8')) == 'double' + + with pytest.raises(TypeError) as excinfo: + m.overloaded("not an array") + assert msg(excinfo.value) == """ + overloaded(): incompatible function arguments. The following argument types are supported: + 1. (arg0: numpy.ndarray[numpy.float64]) -> str + 2. (arg0: numpy.ndarray[numpy.float32]) -> str + 3. (arg0: numpy.ndarray[numpy.int32]) -> str + 4. (arg0: numpy.ndarray[numpy.uint16]) -> str + 5. (arg0: numpy.ndarray[numpy.int64]) -> str + 6. (arg0: numpy.ndarray[numpy.complex128]) -> str + 7. (arg0: numpy.ndarray[numpy.complex64]) -> str + + Invoked with: 'not an array' + """ + + assert m.overloaded2(np.array([1], dtype='float64')) == 'double' + assert m.overloaded2(np.array([1], dtype='float32')) == 'float' + assert m.overloaded2(np.array([1], dtype='complex64')) == 'float complex' + assert m.overloaded2(np.array([1], dtype='complex128')) == 'double complex' + assert m.overloaded2(np.array([1], dtype='float32')) == 'float' + + assert m.overloaded3(np.array([1], dtype='float64')) == 'double' + assert m.overloaded3(np.array([1], dtype='intc')) == 'int' + expected_exc = """ + overloaded3(): incompatible function arguments. The following argument types are supported: + 1. (arg0: numpy.ndarray[numpy.int32]) -> str + 2. (arg0: numpy.ndarray[numpy.float64]) -> str + + Invoked with: """ + + with pytest.raises(TypeError) as excinfo: + m.overloaded3(np.array([1], dtype='uintc')) + assert msg(excinfo.value) == expected_exc + repr(np.array([1], dtype='uint32')) + with pytest.raises(TypeError) as excinfo: + m.overloaded3(np.array([1], dtype='float32')) + assert msg(excinfo.value) == expected_exc + repr(np.array([1.], dtype='float32')) + with pytest.raises(TypeError) as excinfo: + m.overloaded3(np.array([1], dtype='complex')) + assert msg(excinfo.value) == expected_exc + repr(np.array([1. + 0.j])) + + # Exact matches: + assert m.overloaded4(np.array([1], dtype='double')) == 'double' + assert m.overloaded4(np.array([1], dtype='longlong')) == 'long long' + # Non-exact matches requiring conversion. Since float to integer isn't a + # save conversion, it should go to the double overload, but short can go to + # either (and so should end up on the first-registered, the long long). + assert m.overloaded4(np.array([1], dtype='float32')) == 'double' + assert m.overloaded4(np.array([1], dtype='short')) == 'long long' + + assert m.overloaded5(np.array([1], dtype='double')) == 'double' + assert m.overloaded5(np.array([1], dtype='uintc')) == 'unsigned int' + assert m.overloaded5(np.array([1], dtype='float32')) == 'unsigned int' + + +def test_greedy_string_overload(): + """Tests fix for #685 - ndarray shouldn't go to std::string overload""" + + assert m.issue685("abc") == "string" + assert m.issue685(np.array([97, 98, 99], dtype='b')) == "array" + assert m.issue685(123) == "other" + + +def test_array_unchecked_fixed_dims(msg): + z1 = np.array([[1, 2], [3, 4]], dtype='float64') + m.proxy_add2(z1, 10) + assert np.all(z1 == [[11, 12], [13, 14]]) + + with pytest.raises(ValueError) as excinfo: + m.proxy_add2(np.array([1., 2, 3]), 5.0) + assert msg(excinfo.value) == "array has incorrect number of dimensions: 1; expected 2" + + expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int') + assert np.all(m.proxy_init3(3.0) == expect_c) + expect_f = np.transpose(expect_c) + assert np.all(m.proxy_init3F(3.0) == expect_f) + + assert m.proxy_squared_L2_norm(np.array(range(6))) == 55 + assert m.proxy_squared_L2_norm(np.array(range(6), dtype="float64")) == 55 + + assert m.proxy_auxiliaries2(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32] + assert m.proxy_auxiliaries2(z1) == m.array_auxiliaries2(z1) + + +def test_array_unchecked_dyn_dims(msg): + z1 = np.array([[1, 2], [3, 4]], dtype='float64') + m.proxy_add2_dyn(z1, 10) + assert np.all(z1 == [[11, 12], [13, 14]]) + + expect_c = np.ndarray(shape=(3, 3, 3), buffer=np.array(range(3, 30)), dtype='int') + assert np.all(m.proxy_init3_dyn(3.0) == expect_c) + + assert m.proxy_auxiliaries2_dyn(z1) == [11, 11, True, 2, 8, 2, 2, 4, 32] + assert m.proxy_auxiliaries2_dyn(z1) == m.array_auxiliaries2(z1) + + +def test_array_failure(): + with pytest.raises(ValueError) as excinfo: + m.array_fail_test() + assert str(excinfo.value) == 'cannot create a pybind11::array from a nullptr' + + with pytest.raises(ValueError) as excinfo: + m.array_t_fail_test() + assert str(excinfo.value) == 'cannot create a pybind11::array_t from a nullptr' + + with pytest.raises(ValueError) as excinfo: + m.array_fail_test_negative_size() + assert str(excinfo.value) == 'negative dimensions are not allowed' + + +def test_initializer_list(): + assert m.array_initializer_list1().shape == (1,) + assert m.array_initializer_list2().shape == (1, 2) + assert m.array_initializer_list3().shape == (1, 2, 3) + assert m.array_initializer_list4().shape == (1, 2, 3, 4) + + +def test_array_resize(msg): + a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float64') + m.array_reshape2(a) + assert(a.size == 9) + assert(np.all(a == [[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + + # total size change should succced with refcheck off + m.array_resize3(a, 4, False) + assert(a.size == 64) + # ... and fail with refcheck on + try: + m.array_resize3(a, 3, True) + except ValueError as e: + assert(str(e).startswith("cannot resize an array")) + # transposed array doesn't own data + b = a.transpose() + try: + m.array_resize3(b, 3, False) + except ValueError as e: + assert(str(e).startswith("cannot resize this array: it does not own its data")) + # ... but reshape should be fine + m.array_reshape2(b) + assert(b.shape == (8, 8)) + + +@pytest.mark.xfail("env.PYPY") +def test_array_create_and_resize(msg): + a = m.create_and_resize(2) + assert(a.size == 4) + assert(np.all(a == 42.)) + + +def test_index_using_ellipsis(): + a = m.index_using_ellipsis(np.zeros((5, 6, 7))) + assert a.shape == (6,) + + +@pytest.mark.xfail("env.PYPY") +def test_dtype_refcount_leak(): + from sys import getrefcount + dtype = np.dtype(np.float_) + a = np.array([1], dtype=dtype) + before = getrefcount(dtype) + m.ndim(a) + after = getrefcount(dtype) + assert after == before diff --git a/diffvg/pybind11/tests/test_numpy_dtypes.cpp b/diffvg/pybind11/tests/test_numpy_dtypes.cpp new file mode 100644 index 0000000000000000000000000000000000000000..467e0253f7eb422da4fff3b4db7e4836fc2c11f2 --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_dtypes.cpp @@ -0,0 +1,474 @@ +/* + tests/test_numpy_dtypes.cpp -- Structured and compound NumPy dtypes + + Copyright (c) 2016 Ivan Smirnov + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include + +#ifdef __GNUC__ +#define PYBIND11_PACKED(cls) cls __attribute__((__packed__)) +#else +#define PYBIND11_PACKED(cls) __pragma(pack(push, 1)) cls __pragma(pack(pop)) +#endif + +namespace py = pybind11; + +struct SimpleStruct { + bool bool_; + uint32_t uint_; + float float_; + long double ldbl_; +}; + +std::ostream& operator<<(std::ostream& os, const SimpleStruct& v) { + return os << "s:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_; +} + +struct SimpleStructReordered { + bool bool_; + float float_; + uint32_t uint_; + long double ldbl_; +}; + +PYBIND11_PACKED(struct PackedStruct { + bool bool_; + uint32_t uint_; + float float_; + long double ldbl_; +}); + +std::ostream& operator<<(std::ostream& os, const PackedStruct& v) { + return os << "p:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_; +} + +PYBIND11_PACKED(struct NestedStruct { + SimpleStruct a; + PackedStruct b; +}); + +std::ostream& operator<<(std::ostream& os, const NestedStruct& v) { + return os << "n:a=" << v.a << ";b=" << v.b; +} + +struct PartialStruct { + bool bool_; + uint32_t uint_; + float float_; + uint64_t dummy2; + long double ldbl_; +}; + +struct PartialNestedStruct { + uint64_t dummy1; + PartialStruct a; + uint64_t dummy2; +}; + +struct UnboundStruct { }; + +struct StringStruct { + char a[3]; + std::array b; +}; + +struct ComplexStruct { + std::complex cflt; + std::complex cdbl; +}; + +std::ostream& operator<<(std::ostream& os, const ComplexStruct& v) { + return os << "c:" << v.cflt << "," << v.cdbl; +} + +struct ArrayStruct { + char a[3][4]; + int32_t b[2]; + std::array c; + std::array d[4]; +}; + +PYBIND11_PACKED(struct StructWithUglyNames { + int8_t __x__; + uint64_t __y__; +}); + +enum class E1 : int64_t { A = -1, B = 1 }; +enum E2 : uint8_t { X = 1, Y = 2 }; + +PYBIND11_PACKED(struct EnumStruct { + E1 e1; + E2 e2; +}); + +std::ostream& operator<<(std::ostream& os, const StringStruct& v) { + os << "a='"; + for (size_t i = 0; i < 3 && v.a[i]; i++) os << v.a[i]; + os << "',b='"; + for (size_t i = 0; i < 3 && v.b[i]; i++) os << v.b[i]; + return os << "'"; +} + +std::ostream& operator<<(std::ostream& os, const ArrayStruct& v) { + os << "a={"; + for (int i = 0; i < 3; i++) { + if (i > 0) + os << ','; + os << '{'; + for (int j = 0; j < 3; j++) + os << v.a[i][j] << ','; + os << v.a[i][3] << '}'; + } + os << "},b={" << v.b[0] << ',' << v.b[1]; + os << "},c={" << int(v.c[0]) << ',' << int(v.c[1]) << ',' << int(v.c[2]); + os << "},d={"; + for (int i = 0; i < 4; i++) { + if (i > 0) + os << ','; + os << '{' << v.d[i][0] << ',' << v.d[i][1] << '}'; + } + return os << '}'; +} + +std::ostream& operator<<(std::ostream& os, const EnumStruct& v) { + return os << "e1=" << (v.e1 == E1::A ? "A" : "B") << ",e2=" << (v.e2 == E2::X ? "X" : "Y"); +} + +template +py::array mkarray_via_buffer(size_t n) { + return py::array(py::buffer_info(nullptr, sizeof(T), + py::format_descriptor::format(), + 1, { n }, { sizeof(T) })); +} + +#define SET_TEST_VALS(s, i) do { \ + s.bool_ = (i) % 2 != 0; \ + s.uint_ = (uint32_t) (i); \ + s.float_ = (float) (i) * 1.5f; \ + s.ldbl_ = (long double) (i) * -2.5L; } while (0) + +template +py::array_t create_recarray(size_t n) { + auto arr = mkarray_via_buffer(n); + auto req = arr.request(); + auto ptr = static_cast(req.ptr); + for (size_t i = 0; i < n; i++) { + SET_TEST_VALS(ptr[i], i); + } + return arr; +} + +template +py::list print_recarray(py::array_t arr) { + const auto req = arr.request(); + const auto ptr = static_cast(req.ptr); + auto l = py::list(); + for (ssize_t i = 0; i < req.size; i++) { + std::stringstream ss; + ss << ptr[i]; + l.append(py::str(ss.str())); + } + return l; +} + +py::array_t test_array_ctors(int i) { + using arr_t = py::array_t; + + std::vector data { 1, 2, 3, 4, 5, 6 }; + std::vector shape { 3, 2 }; + std::vector strides { 8, 4 }; + + auto ptr = data.data(); + auto vptr = (void *) ptr; + auto dtype = py::dtype("int32"); + + py::buffer_info buf_ndim1(vptr, 4, "i", 6); + py::buffer_info buf_ndim1_null(nullptr, 4, "i", 6); + py::buffer_info buf_ndim2(vptr, 4, "i", 2, shape, strides); + py::buffer_info buf_ndim2_null(nullptr, 4, "i", 2, shape, strides); + + auto fill = [](py::array arr) { + auto req = arr.request(); + for (int i = 0; i < 6; i++) ((int32_t *) req.ptr)[i] = i + 1; + return arr; + }; + + switch (i) { + // shape: (3, 2) + case 10: return arr_t(shape, strides, ptr); + case 11: return py::array(shape, strides, ptr); + case 12: return py::array(dtype, shape, strides, vptr); + case 13: return arr_t(shape, ptr); + case 14: return py::array(shape, ptr); + case 15: return py::array(dtype, shape, vptr); + case 16: return arr_t(buf_ndim2); + case 17: return py::array(buf_ndim2); + // shape: (3, 2) - post-fill + case 20: return fill(arr_t(shape, strides)); + case 21: return py::array(shape, strides, ptr); // can't have nullptr due to templated ctor + case 22: return fill(py::array(dtype, shape, strides)); + case 23: return fill(arr_t(shape)); + case 24: return py::array(shape, ptr); // can't have nullptr due to templated ctor + case 25: return fill(py::array(dtype, shape)); + case 26: return fill(arr_t(buf_ndim2_null)); + case 27: return fill(py::array(buf_ndim2_null)); + // shape: (6, ) + case 30: return arr_t(6, ptr); + case 31: return py::array(6, ptr); + case 32: return py::array(dtype, 6, vptr); + case 33: return arr_t(buf_ndim1); + case 34: return py::array(buf_ndim1); + // shape: (6, ) + case 40: return fill(arr_t(6)); + case 41: return py::array(6, ptr); // can't have nullptr due to templated ctor + case 42: return fill(py::array(dtype, 6)); + case 43: return fill(arr_t(buf_ndim1_null)); + case 44: return fill(py::array(buf_ndim1_null)); + } + return arr_t(); +} + +py::list test_dtype_ctors() { + py::list list; + list.append(py::dtype("int32")); + list.append(py::dtype(std::string("float64"))); + list.append(py::dtype::from_args(py::str("bool"))); + py::list names, offsets, formats; + py::dict dict; + names.append(py::str("a")); names.append(py::str("b")); dict["names"] = names; + offsets.append(py::int_(1)); offsets.append(py::int_(10)); dict["offsets"] = offsets; + formats.append(py::dtype("int32")); formats.append(py::dtype("float64")); dict["formats"] = formats; + dict["itemsize"] = py::int_(20); + list.append(py::dtype::from_args(dict)); + list.append(py::dtype(names, formats, offsets, 20)); + list.append(py::dtype(py::buffer_info((void *) 0, sizeof(unsigned int), "I", 1))); + list.append(py::dtype(py::buffer_info((void *) 0, 0, "T{i:a:f:b:}", 1))); + return list; +} + +struct A {}; +struct B {}; + +TEST_SUBMODULE(numpy_dtypes, m) { + try { py::module::import("numpy"); } + catch (...) { return; } + + // typeinfo may be registered before the dtype descriptor for scalar casts to work... + py::class_(m, "SimpleStruct"); + + PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); + PYBIND11_NUMPY_DTYPE(SimpleStructReordered, bool_, uint_, float_, ldbl_); + PYBIND11_NUMPY_DTYPE(PackedStruct, bool_, uint_, float_, ldbl_); + PYBIND11_NUMPY_DTYPE(NestedStruct, a, b); + PYBIND11_NUMPY_DTYPE(PartialStruct, bool_, uint_, float_, ldbl_); + PYBIND11_NUMPY_DTYPE(PartialNestedStruct, a); + PYBIND11_NUMPY_DTYPE(StringStruct, a, b); + PYBIND11_NUMPY_DTYPE(ArrayStruct, a, b, c, d); + PYBIND11_NUMPY_DTYPE(EnumStruct, e1, e2); + PYBIND11_NUMPY_DTYPE(ComplexStruct, cflt, cdbl); + + // ... or after + py::class_(m, "PackedStruct"); + + PYBIND11_NUMPY_DTYPE_EX(StructWithUglyNames, __x__, "x", __y__, "y"); + + // If uncommented, this should produce a static_assert failure telling the user that the struct + // is not a POD type +// struct NotPOD { std::string v; NotPOD() : v("hi") {}; }; +// PYBIND11_NUMPY_DTYPE(NotPOD, v); + + // Check that dtypes can be registered programmatically, both from + // initializer lists of field descriptors and from other containers. + py::detail::npy_format_descriptor::register_dtype( + {} + ); + py::detail::npy_format_descriptor::register_dtype( + std::vector{} + ); + + // test_recarray, test_scalar_conversion + m.def("create_rec_simple", &create_recarray); + m.def("create_rec_packed", &create_recarray); + m.def("create_rec_nested", [](size_t n) { // test_signature + py::array_t arr = mkarray_via_buffer(n); + auto req = arr.request(); + auto ptr = static_cast(req.ptr); + for (size_t i = 0; i < n; i++) { + SET_TEST_VALS(ptr[i].a, i); + SET_TEST_VALS(ptr[i].b, i + 1); + } + return arr; + }); + m.def("create_rec_partial", &create_recarray); + m.def("create_rec_partial_nested", [](size_t n) { + py::array_t arr = mkarray_via_buffer(n); + auto req = arr.request(); + auto ptr = static_cast(req.ptr); + for (size_t i = 0; i < n; i++) { + SET_TEST_VALS(ptr[i].a, i); + } + return arr; + }); + m.def("print_rec_simple", &print_recarray); + m.def("print_rec_packed", &print_recarray); + m.def("print_rec_nested", &print_recarray); + + // test_format_descriptors + m.def("get_format_unbound", []() { return py::format_descriptor::format(); }); + m.def("print_format_descriptors", []() { + py::list l; + for (const auto &fmt : { + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format(), + py::format_descriptor::format() + }) { + l.append(py::cast(fmt)); + } + return l; + }); + + // test_dtype + m.def("print_dtypes", []() { + py::list l; + for (const py::handle &d : { + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of(), + py::dtype::of() + }) + l.append(py::str(d)); + return l; + }); + m.def("test_dtype_ctors", &test_dtype_ctors); + m.def("test_dtype_methods", []() { + py::list list; + auto dt1 = py::dtype::of(); + auto dt2 = py::dtype::of(); + list.append(dt1); list.append(dt2); + list.append(py::bool_(dt1.has_fields())); list.append(py::bool_(dt2.has_fields())); + list.append(py::int_(dt1.itemsize())); list.append(py::int_(dt2.itemsize())); + return list; + }); + struct TrailingPaddingStruct { + int32_t a; + char b; + }; + PYBIND11_NUMPY_DTYPE(TrailingPaddingStruct, a, b); + m.def("trailing_padding_dtype", []() { return py::dtype::of(); }); + + // test_string_array + m.def("create_string_array", [](bool non_empty) { + py::array_t arr = mkarray_via_buffer(non_empty ? 4 : 0); + if (non_empty) { + auto req = arr.request(); + auto ptr = static_cast(req.ptr); + for (ssize_t i = 0; i < req.size * req.itemsize; i++) + static_cast(req.ptr)[i] = 0; + ptr[1].a[0] = 'a'; ptr[1].b[0] = 'a'; + ptr[2].a[0] = 'a'; ptr[2].b[0] = 'a'; + ptr[3].a[0] = 'a'; ptr[3].b[0] = 'a'; + + ptr[2].a[1] = 'b'; ptr[2].b[1] = 'b'; + ptr[3].a[1] = 'b'; ptr[3].b[1] = 'b'; + + ptr[3].a[2] = 'c'; ptr[3].b[2] = 'c'; + } + return arr; + }); + m.def("print_string_array", &print_recarray); + + // test_array_array + m.def("create_array_array", [](size_t n) { + py::array_t arr = mkarray_via_buffer(n); + auto ptr = (ArrayStruct *) arr.mutable_data(); + for (size_t i = 0; i < n; i++) { + for (size_t j = 0; j < 3; j++) + for (size_t k = 0; k < 4; k++) + ptr[i].a[j][k] = char('A' + (i * 100 + j * 10 + k) % 26); + for (size_t j = 0; j < 2; j++) + ptr[i].b[j] = int32_t(i * 1000 + j); + for (size_t j = 0; j < 3; j++) + ptr[i].c[j] = uint8_t(i * 10 + j); + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 2; k++) + ptr[i].d[j][k] = float(i) * 100.0f + float(j) * 10.0f + float(k); + } + return arr; + }); + m.def("print_array_array", &print_recarray); + + // test_enum_array + m.def("create_enum_array", [](size_t n) { + py::array_t arr = mkarray_via_buffer(n); + auto ptr = (EnumStruct *) arr.mutable_data(); + for (size_t i = 0; i < n; i++) { + ptr[i].e1 = static_cast(-1 + ((int) i % 2) * 2); + ptr[i].e2 = static_cast(1 + (i % 2)); + } + return arr; + }); + m.def("print_enum_array", &print_recarray); + + // test_complex_array + m.def("create_complex_array", [](size_t n) { + py::array_t arr = mkarray_via_buffer(n); + auto ptr = (ComplexStruct *) arr.mutable_data(); + for (size_t i = 0; i < n; i++) { + ptr[i].cflt.real(float(i)); + ptr[i].cflt.imag(float(i) + 0.25f); + ptr[i].cdbl.real(double(i) + 0.5); + ptr[i].cdbl.imag(double(i) + 0.75); + } + return arr; + }); + m.def("print_complex_array", &print_recarray); + + // test_array_constructors + m.def("test_array_ctors", &test_array_ctors); + + // test_compare_buffer_info + struct CompareStruct { + bool x; + uint32_t y; + float z; + }; + PYBIND11_NUMPY_DTYPE(CompareStruct, x, y, z); + m.def("compare_buffer_info", []() { + py::list list; + list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(float), "f", 1)))); + list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(int), "I", 1)))); + list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(long), "l", 1)))); + list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(long), sizeof(long) == sizeof(int) ? "i" : "q", 1)))); + list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(CompareStruct), "T{?:x:3xI:y:f:z:}", 1)))); + return list; + }); + m.def("buffer_to_dtype", [](py::buffer& buf) { return py::dtype(buf.request()); }); + + // test_scalar_conversion + m.def("f_simple", [](SimpleStruct s) { return s.uint_ * 10; }); + m.def("f_packed", [](PackedStruct s) { return s.uint_ * 10; }); + m.def("f_nested", [](NestedStruct s) { return s.a.uint_ * 10; }); + + // test_register_dtype + m.def("register_dtype", []() { PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); }); + + // test_str_leak + m.def("dtype_wrapper", [](py::object d) { return py::dtype::from_args(std::move(d)); }); +} diff --git a/diffvg/pybind11/tests/test_numpy_dtypes.py b/diffvg/pybind11/tests/test_numpy_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..417d6f1cffbbd3a08857797c5c22f555d6f2dd33 --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_dtypes.py @@ -0,0 +1,312 @@ +# -*- coding: utf-8 -*- +import re + +import pytest + +import env # noqa: F401 + +from pybind11_tests import numpy_dtypes as m + +np = pytest.importorskip("numpy") + + +@pytest.fixture(scope='module') +def simple_dtype(): + ld = np.dtype('longdouble') + return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'], + 'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)], + 'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]}) + + +@pytest.fixture(scope='module') +def packed_dtype(): + return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')]) + + +def dt_fmt(): + from sys import byteorder + e = '<' if byteorder == 'little' else '>' + return ("{{'names':['bool_','uint_','float_','ldbl_']," + " 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}']," + " 'offsets':[0,4,8,{}], 'itemsize':{}}}") + + +def simple_dtype_fmt(): + ld = np.dtype('longdouble') + simple_ld_off = 12 + 4 * (ld.alignment > 4) + return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize) + + +def packed_dtype_fmt(): + from sys import byteorder + return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format( + np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>') + + +def partial_ld_offset(): + return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * ( + np.dtype('longdouble').alignment > 8) + + +def partial_dtype_fmt(): + ld = np.dtype('longdouble') + partial_ld_off = partial_ld_offset() + return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize) + + +def partial_nested_fmt(): + ld = np.dtype('longdouble') + partial_nested_off = 8 + 8 * (ld.alignment > 8) + partial_ld_off = partial_ld_offset() + partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize + return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format( + partial_dtype_fmt(), partial_nested_off, partial_nested_size) + + +def assert_equal(actual, expected_data, expected_dtype): + np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype)) + + +def test_format_descriptors(): + with pytest.raises(RuntimeError) as excinfo: + m.get_format_unbound() + assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value)) + + ld = np.dtype('longdouble') + ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char + ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}" + dbl = np.dtype('double') + partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" + + str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) + + "xg:ldbl_:}") + nested_extra = str(max(8, ld.alignment)) + assert m.print_format_descriptors() == [ + ss_fmt, + "^T{?:bool_:I:uint_:f:float_:g:ldbl_:}", + "^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}", + partial_fmt, + "^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}", + "^T{3s:a:3s:b:}", + "^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}", + '^T{q:e1:B:e2:}', + '^T{Zf:cflt:Zd:cdbl:}' + ] + + +def test_dtype(simple_dtype): + from sys import byteorder + e = '<' if byteorder == 'little' else '>' + + assert m.print_dtypes() == [ + simple_dtype_fmt(), + packed_dtype_fmt(), + "[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()), + partial_dtype_fmt(), + partial_nested_fmt(), + "[('a', 'S3'), ('b', 'S3')]", + ("{{'names':['a','b','c','d'], " + + "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('" + e + "f4', (4, 2))], " + + "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e), + "[('e1', '" + e + "i8'), ('e2', 'u1')]", + "[('x', 'i1'), ('y', '" + e + "u8')]", + "[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]" + ] + + d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'], + 'offsets': [1, 10], 'itemsize': 20}) + d2 = np.dtype([('a', 'i4'), ('b', 'f4')]) + assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'), + np.dtype('bool'), d1, d1, np.dtype('uint32'), d2] + + assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True, + np.dtype('int32').itemsize, simple_dtype.itemsize] + + assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype())) + + +def test_recarray(simple_dtype, packed_dtype): + elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)] + + for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]: + arr = func(0) + assert arr.dtype == dtype + assert_equal(arr, [], simple_dtype) + assert_equal(arr, [], packed_dtype) + + arr = func(3) + assert arr.dtype == dtype + assert_equal(arr, elements, simple_dtype) + assert_equal(arr, elements, packed_dtype) + + if dtype == simple_dtype: + assert m.print_rec_simple(arr) == [ + "s:0,0,0,-0", + "s:1,1,1.5,-2.5", + "s:0,2,3,-5" + ] + else: + assert m.print_rec_packed(arr) == [ + "p:0,0,0,-0", + "p:1,1,1.5,-2.5", + "p:0,2,3,-5" + ] + + nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)]) + + arr = m.create_rec_nested(0) + assert arr.dtype == nested_dtype + assert_equal(arr, [], nested_dtype) + + arr = m.create_rec_nested(3) + assert arr.dtype == nested_dtype + assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)), + ((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)), + ((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype) + assert m.print_rec_nested(arr) == [ + "n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5", + "n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5", + "n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5" + ] + + arr = m.create_rec_partial(3) + assert str(arr.dtype) == partial_dtype_fmt() + partial_dtype = arr.dtype + assert '' not in arr.dtype.fields + assert partial_dtype.itemsize > simple_dtype.itemsize + assert_equal(arr, elements, simple_dtype) + assert_equal(arr, elements, packed_dtype) + + arr = m.create_rec_partial_nested(3) + assert str(arr.dtype) == partial_nested_fmt() + assert '' not in arr.dtype.fields + assert '' not in arr.dtype.fields['a'][0].fields + assert arr.dtype.itemsize > partial_dtype.itemsize + np.testing.assert_equal(arr['a'], m.create_rec_partial(3)) + + +def test_array_constructors(): + data = np.arange(1, 7, dtype='int32') + for i in range(8): + np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2))) + np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2))) + for i in range(5): + np.testing.assert_array_equal(m.test_array_ctors(30 + i), data) + np.testing.assert_array_equal(m.test_array_ctors(40 + i), data) + + +def test_string_array(): + arr = m.create_string_array(True) + assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]" + assert m.print_string_array(arr) == [ + "a='',b=''", + "a='a',b='a'", + "a='ab',b='ab'", + "a='abc',b='abc'" + ] + dtype = arr.dtype + assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc'] + assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc'] + arr = m.create_string_array(False) + assert dtype == arr.dtype + + +def test_array_array(): + from sys import byteorder + e = '<' if byteorder == 'little' else '>' + + arr = m.create_array_array(3) + assert str(arr.dtype) == ( + "{{'names':['a','b','c','d'], " + + "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " + + "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e) + assert m.print_array_array(arr) == [ + "a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," + + "c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}", + "a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," + + "c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}", + "a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," + + "c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}", + ] + assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'], + [b'WXYZ', b'GHIJ', b'QRST'], + [b'STUV', b'CDEF', b'MNOP']] + assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]] + assert m.create_array_array(0).dtype == arr.dtype + + +def test_enum_array(): + from sys import byteorder + e = '<' if byteorder == 'little' else '>' + + arr = m.create_enum_array(3) + dtype = arr.dtype + assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')]) + assert m.print_enum_array(arr) == [ + "e1=A,e2=X", + "e1=B,e2=Y", + "e1=A,e2=X" + ] + assert arr['e1'].tolist() == [-1, 1, -1] + assert arr['e2'].tolist() == [1, 2, 1] + assert m.create_enum_array(0).dtype == dtype + + +def test_complex_array(): + from sys import byteorder + e = '<' if byteorder == 'little' else '>' + + arr = m.create_complex_array(3) + dtype = arr.dtype + assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')]) + assert m.print_complex_array(arr) == [ + "c:(0,0.25),(0.5,0.75)", + "c:(1,1.25),(1.5,1.75)", + "c:(2,2.25),(2.5,2.75)" + ] + assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j] + assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j] + assert m.create_complex_array(0).dtype == dtype + + +def test_signature(doc): + assert doc(m.create_rec_nested) == \ + "create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]" + + +def test_scalar_conversion(): + n = 3 + arrays = [m.create_rec_simple(n), m.create_rec_packed(n), + m.create_rec_nested(n), m.create_enum_array(n)] + funcs = [m.f_simple, m.f_packed, m.f_nested] + + for i, func in enumerate(funcs): + for j, arr in enumerate(arrays): + if i == j and i < 2: + assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)] + else: + with pytest.raises(TypeError) as excinfo: + func(arr[0]) + assert 'incompatible function arguments' in str(excinfo.value) + + +def test_register_dtype(): + with pytest.raises(RuntimeError) as excinfo: + m.register_dtype() + assert 'dtype is already registered' in str(excinfo.value) + + +@pytest.mark.xfail("env.PYPY") +def test_str_leak(): + from sys import getrefcount + fmt = "f4" + pytest.gc_collect() + start = getrefcount(fmt) + d = m.dtype_wrapper(fmt) + assert d is np.dtype("f4") + del d + pytest.gc_collect() + assert getrefcount(fmt) == start + + +def test_compare_buffer_info(): + assert all(m.compare_buffer_info()) diff --git a/diffvg/pybind11/tests/test_numpy_vectorize.cpp b/diffvg/pybind11/tests/test_numpy_vectorize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a875a74b99e95285ad5733616ad3f2ff1d0b2900 --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_vectorize.cpp @@ -0,0 +1,89 @@ +/* + tests/test_numpy_vectorize.cpp -- auto-vectorize functions over NumPy array + arguments + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include + +double my_func(int x, float y, double z) { + py::print("my_func(x:int={}, y:float={:.0f}, z:float={:.0f})"_s.format(x, y, z)); + return (float) x*y*z; +} + +TEST_SUBMODULE(numpy_vectorize, m) { + try { py::module::import("numpy"); } + catch (...) { return; } + + // test_vectorize, test_docs, test_array_collapse + // Vectorize all arguments of a function (though non-vector arguments are also allowed) + m.def("vectorized_func", py::vectorize(my_func)); + + // Vectorize a lambda function with a capture object (e.g. to exclude some arguments from the vectorization) + m.def("vectorized_func2", + [](py::array_t x, py::array_t y, float z) { + return py::vectorize([z](int x, float y) { return my_func(x, y, z); })(x, y); + } + ); + + // Vectorize a complex-valued function + m.def("vectorized_func3", py::vectorize( + [](std::complex c) { return c * std::complex(2.f); } + )); + + // test_type_selection + // Numpy function which only accepts specific data types + m.def("selective_func", [](py::array_t) { return "Int branch taken."; }); + m.def("selective_func", [](py::array_t) { return "Float branch taken."; }); + m.def("selective_func", [](py::array_t, py::array::c_style>) { return "Complex float branch taken."; }); + + + // test_passthrough_arguments + // Passthrough test: references and non-pod types should be automatically passed through (in the + // function definition below, only `b`, `d`, and `g` are vectorized): + struct NonPODClass { + NonPODClass(int v) : value{v} {} + int value; + }; + py::class_(m, "NonPODClass").def(py::init()); + m.def("vec_passthrough", py::vectorize( + [](double *a, double b, py::array_t c, const int &d, int &e, NonPODClass f, const double g) { + return *a + b + c.at(0) + d + e + f.value + g; + } + )); + + // test_method_vectorization + struct VectorizeTestClass { + VectorizeTestClass(int v) : value{v} {}; + float method(int x, float y) { return y + (float) (x + value); } + int value = 0; + }; + py::class_ vtc(m, "VectorizeTestClass"); + vtc .def(py::init()) + .def_readwrite("value", &VectorizeTestClass::value); + + // Automatic vectorizing of methods + vtc.def("method", py::vectorize(&VectorizeTestClass::method)); + + // test_trivial_broadcasting + // Internal optimization test for whether the input is trivially broadcastable: + py::enum_(m, "trivial") + .value("f_trivial", py::detail::broadcast_trivial::f_trivial) + .value("c_trivial", py::detail::broadcast_trivial::c_trivial) + .value("non_trivial", py::detail::broadcast_trivial::non_trivial); + m.def("vectorized_is_trivial", []( + py::array_t arg1, + py::array_t arg2, + py::array_t arg3 + ) { + ssize_t ndim; + std::vector shape; + std::array buffers {{ arg1.request(), arg2.request(), arg3.request() }}; + return py::detail::broadcast(buffers, ndim, shape); + }); +} diff --git a/diffvg/pybind11/tests/test_numpy_vectorize.py b/diffvg/pybind11/tests/test_numpy_vectorize.py new file mode 100644 index 0000000000000000000000000000000000000000..54e44cd8d3f9630a4d76c419e449c8ce1e7cee59 --- /dev/null +++ b/diffvg/pybind11/tests/test_numpy_vectorize.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import numpy_vectorize as m + +np = pytest.importorskip("numpy") + + +def test_vectorize(capture): + assert np.isclose(m.vectorized_func3(np.array(3 + 7j)), [6 + 14j]) + + for f in [m.vectorized_func, m.vectorized_func2]: + with capture: + assert np.isclose(f(1, 2, 3), 6) + assert capture == "my_func(x:int=1, y:float=2, z:float=3)" + with capture: + assert np.isclose(f(np.array(1), np.array(2), 3), 6) + assert capture == "my_func(x:int=1, y:float=2, z:float=3)" + with capture: + assert np.allclose(f(np.array([1, 3]), np.array([2, 4]), 3), [6, 36]) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=3) + my_func(x:int=3, y:float=4, z:float=3) + """ + with capture: + a = np.array([[1, 2], [3, 4]], order='F') + b = np.array([[10, 20], [30, 40]], order='F') + c = 3 + result = f(a, b, c) + assert np.allclose(result, a * b * c) + assert result.flags.f_contiguous + # All inputs are F order and full or singletons, so we the result is in col-major order: + assert capture == """ + my_func(x:int=1, y:float=10, z:float=3) + my_func(x:int=3, y:float=30, z:float=3) + my_func(x:int=2, y:float=20, z:float=3) + my_func(x:int=4, y:float=40, z:float=3) + """ + with capture: + a, b, c = np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=3) + my_func(x:int=3, y:float=4, z:float=3) + my_func(x:int=5, y:float=6, z:float=3) + my_func(x:int=7, y:float=8, z:float=3) + my_func(x:int=9, y:float=10, z:float=3) + my_func(x:int=11, y:float=12, z:float=3) + """ + with capture: + a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=2) + my_func(x:int=2, y:float=3, z:float=2) + my_func(x:int=3, y:float=4, z:float=2) + my_func(x:int=4, y:float=2, z:float=2) + my_func(x:int=5, y:float=3, z:float=2) + my_func(x:int=6, y:float=4, z:float=2) + """ + with capture: + a, b, c = np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=2) + my_func(x:int=2, y:float=2, z:float=2) + my_func(x:int=3, y:float=2, z:float=2) + my_func(x:int=4, y:float=3, z:float=2) + my_func(x:int=5, y:float=3, z:float=2) + my_func(x:int=6, y:float=3, z:float=2) + """ + with capture: + a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F'), np.array([[2], [3]]), 2 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=2) + my_func(x:int=2, y:float=2, z:float=2) + my_func(x:int=3, y:float=2, z:float=2) + my_func(x:int=4, y:float=3, z:float=2) + my_func(x:int=5, y:float=3, z:float=2) + my_func(x:int=6, y:float=3, z:float=2) + """ + with capture: + a, b, c = np.array([[1, 2, 3], [4, 5, 6]])[::, ::2], np.array([[2], [3]]), 2 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=2) + my_func(x:int=3, y:float=2, z:float=2) + my_func(x:int=4, y:float=3, z:float=2) + my_func(x:int=6, y:float=3, z:float=2) + """ + with capture: + a, b, c = np.array([[1, 2, 3], [4, 5, 6]], order='F')[::, ::2], np.array([[2], [3]]), 2 + assert np.allclose(f(a, b, c), a * b * c) + assert capture == """ + my_func(x:int=1, y:float=2, z:float=2) + my_func(x:int=3, y:float=2, z:float=2) + my_func(x:int=4, y:float=3, z:float=2) + my_func(x:int=6, y:float=3, z:float=2) + """ + + +def test_type_selection(): + assert m.selective_func(np.array([1], dtype=np.int32)) == "Int branch taken." + assert m.selective_func(np.array([1.0], dtype=np.float32)) == "Float branch taken." + assert m.selective_func(np.array([1.0j], dtype=np.complex64)) == "Complex float branch taken." + + +def test_docs(doc): + assert doc(m.vectorized_func) == """ + vectorized_func(arg0: numpy.ndarray[numpy.int32], arg1: numpy.ndarray[numpy.float32], arg2: numpy.ndarray[numpy.float64]) -> object + """ # noqa: E501 line too long + + +def test_trivial_broadcasting(): + trivial, vectorized_is_trivial = m.trivial, m.vectorized_is_trivial + + assert vectorized_is_trivial(1, 2, 3) == trivial.c_trivial + assert vectorized_is_trivial(np.array(1), np.array(2), 3) == trivial.c_trivial + assert vectorized_is_trivial(np.array([1, 3]), np.array([2, 4]), 3) == trivial.c_trivial + assert trivial.c_trivial == vectorized_is_trivial( + np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3) + assert vectorized_is_trivial( + np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2) == trivial.non_trivial + assert vectorized_is_trivial( + np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2) == trivial.non_trivial + z1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype='int32') + z2 = np.array(z1, dtype='float32') + z3 = np.array(z1, dtype='float64') + assert vectorized_is_trivial(z1, z2, z3) == trivial.c_trivial + assert vectorized_is_trivial(1, z2, z3) == trivial.c_trivial + assert vectorized_is_trivial(z1, 1, z3) == trivial.c_trivial + assert vectorized_is_trivial(z1, z2, 1) == trivial.c_trivial + assert vectorized_is_trivial(z1[::2, ::2], 1, 1) == trivial.non_trivial + assert vectorized_is_trivial(1, 1, z1[::2, ::2]) == trivial.c_trivial + assert vectorized_is_trivial(1, 1, z3[::2, ::2]) == trivial.non_trivial + assert vectorized_is_trivial(z1, 1, z3[1::4, 1::4]) == trivial.c_trivial + + y1 = np.array(z1, order='F') + y2 = np.array(y1) + y3 = np.array(y1) + assert vectorized_is_trivial(y1, y2, y3) == trivial.f_trivial + assert vectorized_is_trivial(y1, 1, 1) == trivial.f_trivial + assert vectorized_is_trivial(1, y2, 1) == trivial.f_trivial + assert vectorized_is_trivial(1, 1, y3) == trivial.f_trivial + assert vectorized_is_trivial(y1, z2, 1) == trivial.non_trivial + assert vectorized_is_trivial(z1[1::4, 1::4], y2, 1) == trivial.f_trivial + assert vectorized_is_trivial(y1[1::4, 1::4], z2, 1) == trivial.c_trivial + + assert m.vectorized_func(z1, z2, z3).flags.c_contiguous + assert m.vectorized_func(y1, y2, y3).flags.f_contiguous + assert m.vectorized_func(z1, 1, 1).flags.c_contiguous + assert m.vectorized_func(1, y2, 1).flags.f_contiguous + assert m.vectorized_func(z1[1::4, 1::4], y2, 1).flags.f_contiguous + assert m.vectorized_func(y1[1::4, 1::4], z2, 1).flags.c_contiguous + + +def test_passthrough_arguments(doc): + assert doc(m.vec_passthrough) == ( + "vec_passthrough(" + ", ".join([ + "arg0: float", + "arg1: numpy.ndarray[numpy.float64]", + "arg2: numpy.ndarray[numpy.float64]", + "arg3: numpy.ndarray[numpy.int32]", + "arg4: int", + "arg5: m.numpy_vectorize.NonPODClass", + "arg6: numpy.ndarray[numpy.float64]"]) + ") -> object") + + b = np.array([[10, 20, 30]], dtype='float64') + c = np.array([100, 200]) # NOT a vectorized argument + d = np.array([[1000], [2000], [3000]], dtype='int') + g = np.array([[1000000, 2000000, 3000000]], dtype='int') # requires casting + assert np.all( + m.vec_passthrough(1, b, c, d, 10000, m.NonPODClass(100000), g) == + np.array([[1111111, 2111121, 3111131], + [1112111, 2112121, 3112131], + [1113111, 2113121, 3113131]])) + + +def test_method_vectorization(): + o = m.VectorizeTestClass(3) + x = np.array([1, 2], dtype='int') + y = np.array([[10], [20]], dtype='float32') + assert np.all(o.method(x, y) == [[14, 15], [24, 25]]) + + +def test_array_collapse(): + assert not isinstance(m.vectorized_func(1, 2, 3), np.ndarray) + assert not isinstance(m.vectorized_func(np.array(1), 2, 3), np.ndarray) + z = m.vectorized_func([1], 2, 3) + assert isinstance(z, np.ndarray) + assert z.shape == (1, ) + z = m.vectorized_func(1, [[[2]]], 3) + assert isinstance(z, np.ndarray) + assert z.shape == (1, 1, 1) diff --git a/diffvg/pybind11/tests/test_opaque_types.cpp b/diffvg/pybind11/tests/test_opaque_types.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0d20d9a01c8592e844fb909b336fd5c8e969b9e0 --- /dev/null +++ b/diffvg/pybind11/tests/test_opaque_types.cpp @@ -0,0 +1,67 @@ +/* + tests/test_opaque_types.cpp -- opaque types, passing void pointers + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include +#include + +// IMPORTANT: Disable internal pybind11 translation mechanisms for STL data structures +// +// This also deliberately doesn't use the below StringList type alias to test +// that MAKE_OPAQUE can handle a type containing a `,`. (The `std::allocator` +// bit is just the default `std::vector` allocator). +PYBIND11_MAKE_OPAQUE(std::vector>); + +using StringList = std::vector>; + +TEST_SUBMODULE(opaque_types, m) { + // test_string_list + py::class_(m, "StringList") + .def(py::init<>()) + .def("pop_back", &StringList::pop_back) + /* There are multiple versions of push_back(), etc. Select the right ones. */ + .def("push_back", (void (StringList::*)(const std::string &)) &StringList::push_back) + .def("back", (std::string &(StringList::*)()) &StringList::back) + .def("__len__", [](const StringList &v) { return v.size(); }) + .def("__iter__", [](StringList &v) { + return py::make_iterator(v.begin(), v.end()); + }, py::keep_alive<0, 1>()); + + class ClassWithSTLVecProperty { + public: + StringList stringList; + }; + py::class_(m, "ClassWithSTLVecProperty") + .def(py::init<>()) + .def_readwrite("stringList", &ClassWithSTLVecProperty::stringList); + + m.def("print_opaque_list", [](const StringList &l) { + std::string ret = "Opaque list: ["; + bool first = true; + for (auto entry : l) { + if (!first) + ret += ", "; + ret += entry; + first = false; + } + return ret + "]"; + }); + + // test_pointers + m.def("return_void_ptr", []() { return (void *) 0x1234; }); + m.def("get_void_ptr_value", [](void *ptr) { return reinterpret_cast(ptr); }); + m.def("return_null_str", []() { return (char *) nullptr; }); + m.def("get_null_str_value", [](char *ptr) { return reinterpret_cast(ptr); }); + + m.def("return_unique_ptr", []() -> std::unique_ptr { + StringList *result = new StringList(); + result->push_back("some value"); + return std::unique_ptr(result); + }); +} diff --git a/diffvg/pybind11/tests/test_opaque_types.py b/diffvg/pybind11/tests/test_opaque_types.py new file mode 100644 index 0000000000000000000000000000000000000000..3f2392775d83a833457d95520648ee7e1f2aa6d5 --- /dev/null +++ b/diffvg/pybind11/tests/test_opaque_types.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import opaque_types as m +from pybind11_tests import ConstructorStats, UserType + + +def test_string_list(): + lst = m.StringList() + lst.push_back("Element 1") + lst.push_back("Element 2") + assert m.print_opaque_list(lst) == "Opaque list: [Element 1, Element 2]" + assert lst.back() == "Element 2" + + for i, k in enumerate(lst, start=1): + assert k == "Element {}".format(i) + lst.pop_back() + assert m.print_opaque_list(lst) == "Opaque list: [Element 1]" + + cvp = m.ClassWithSTLVecProperty() + assert m.print_opaque_list(cvp.stringList) == "Opaque list: []" + + cvp.stringList = lst + cvp.stringList.push_back("Element 3") + assert m.print_opaque_list(cvp.stringList) == "Opaque list: [Element 1, Element 3]" + + +def test_pointers(msg): + living_before = ConstructorStats.get(UserType).alive() + assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234 + assert m.get_void_ptr_value(UserType()) # Should also work for other C++ types + assert ConstructorStats.get(UserType).alive() == living_before + + with pytest.raises(TypeError) as excinfo: + m.get_void_ptr_value([1, 2, 3]) # This should not work + assert msg(excinfo.value) == """ + get_void_ptr_value(): incompatible function arguments. The following argument types are supported: + 1. (arg0: capsule) -> int + + Invoked with: [1, 2, 3] + """ # noqa: E501 line too long + + assert m.return_null_str() is None + assert m.get_null_str_value(m.return_null_str()) is not None + + ptr = m.return_unique_ptr() + assert "StringList" in repr(ptr) + assert m.print_opaque_list(ptr) == "Opaque list: [some value]" diff --git a/diffvg/pybind11/tests/test_operator_overloading.cpp b/diffvg/pybind11/tests/test_operator_overloading.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f3c2eaafa9918baf38483725cd52c48aa6ecb8af --- /dev/null +++ b/diffvg/pybind11/tests/test_operator_overloading.cpp @@ -0,0 +1,226 @@ +/* + tests/test_operator_overloading.cpp -- operator overloading + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include +#include + +class Vector2 { +public: + Vector2(float x, float y) : x(x), y(y) { print_created(this, toString()); } + Vector2(const Vector2 &v) : x(v.x), y(v.y) { print_copy_created(this); } + Vector2(Vector2 &&v) : x(v.x), y(v.y) { print_move_created(this); v.x = v.y = 0; } + Vector2 &operator=(const Vector2 &v) { x = v.x; y = v.y; print_copy_assigned(this); return *this; } + Vector2 &operator=(Vector2 &&v) { x = v.x; y = v.y; v.x = v.y = 0; print_move_assigned(this); return *this; } + ~Vector2() { print_destroyed(this); } + + std::string toString() const { return "[" + std::to_string(x) + ", " + std::to_string(y) + "]"; } + + Vector2 operator-() const { return Vector2(-x, -y); } + Vector2 operator+(const Vector2 &v) const { return Vector2(x + v.x, y + v.y); } + Vector2 operator-(const Vector2 &v) const { return Vector2(x - v.x, y - v.y); } + Vector2 operator-(float value) const { return Vector2(x - value, y - value); } + Vector2 operator+(float value) const { return Vector2(x + value, y + value); } + Vector2 operator*(float value) const { return Vector2(x * value, y * value); } + Vector2 operator/(float value) const { return Vector2(x / value, y / value); } + Vector2 operator*(const Vector2 &v) const { return Vector2(x * v.x, y * v.y); } + Vector2 operator/(const Vector2 &v) const { return Vector2(x / v.x, y / v.y); } + Vector2& operator+=(const Vector2 &v) { x += v.x; y += v.y; return *this; } + Vector2& operator-=(const Vector2 &v) { x -= v.x; y -= v.y; return *this; } + Vector2& operator*=(float v) { x *= v; y *= v; return *this; } + Vector2& operator/=(float v) { x /= v; y /= v; return *this; } + Vector2& operator*=(const Vector2 &v) { x *= v.x; y *= v.y; return *this; } + Vector2& operator/=(const Vector2 &v) { x /= v.x; y /= v.y; return *this; } + + friend Vector2 operator+(float f, const Vector2 &v) { return Vector2(f + v.x, f + v.y); } + friend Vector2 operator-(float f, const Vector2 &v) { return Vector2(f - v.x, f - v.y); } + friend Vector2 operator*(float f, const Vector2 &v) { return Vector2(f * v.x, f * v.y); } + friend Vector2 operator/(float f, const Vector2 &v) { return Vector2(f / v.x, f / v.y); } + + bool operator==(const Vector2 &v) const { + return x == v.x && y == v.y; + } + bool operator!=(const Vector2 &v) const { + return x != v.x || y != v.y; + } +private: + float x, y; +}; + +class C1 { }; +class C2 { }; + +int operator+(const C1 &, const C1 &) { return 11; } +int operator+(const C2 &, const C2 &) { return 22; } +int operator+(const C2 &, const C1 &) { return 21; } +int operator+(const C1 &, const C2 &) { return 12; } + +// Note: Specializing explicit within `namespace std { ... }` is done due to a +// bug in GCC<7. If you are supporting compilers later than this, consider +// specializing `using template<> struct std::hash<...>` in the global +// namespace instead, per this recommendation: +// https://en.cppreference.com/w/cpp/language/extending_std#Adding_template_specializations +namespace std { + template<> + struct hash { + // Not a good hash function, but easy to test + size_t operator()(const Vector2 &) { return 4; } + }; +} + +// Not a good abs function, but easy to test. +std::string abs(const Vector2&) { + return "abs(Vector2)"; +} + +// MSVC warns about unknown pragmas, and warnings are errors. +#ifndef _MSC_VER + #pragma GCC diagnostic push + // clang 7.0.0 and Apple LLVM 10.0.1 introduce `-Wself-assign-overloaded` to + // `-Wall`, which is used here for overloading (e.g. `py::self += py::self `). + // Here, we suppress the warning using `#pragma diagnostic`. + // Taken from: https://github.com/RobotLocomotion/drake/commit/aaf84b46 + // TODO(eric): This could be resolved using a function / functor (e.g. `py::self()`). + #if (__APPLE__) && (__clang__) + #if (__clang_major__ >= 10) && (__clang_minor__ >= 0) && (__clang_patchlevel__ >= 1) + #pragma GCC diagnostic ignored "-Wself-assign-overloaded" + #endif + #elif (__clang__) + #if (__clang_major__ >= 7) + #pragma GCC diagnostic ignored "-Wself-assign-overloaded" + #endif + #endif +#endif + +TEST_SUBMODULE(operators, m) { + + // test_operator_overloading + py::class_(m, "Vector2") + .def(py::init()) + .def(py::self + py::self) + .def(py::self + float()) + .def(py::self - py::self) + .def(py::self - float()) + .def(py::self * float()) + .def(py::self / float()) + .def(py::self * py::self) + .def(py::self / py::self) + .def(py::self += py::self) + .def(py::self -= py::self) + .def(py::self *= float()) + .def(py::self /= float()) + .def(py::self *= py::self) + .def(py::self /= py::self) + .def(float() + py::self) + .def(float() - py::self) + .def(float() * py::self) + .def(float() / py::self) + .def(-py::self) + .def("__str__", &Vector2::toString) + .def("__repr__", &Vector2::toString) + .def(py::self == py::self) + .def(py::self != py::self) + .def(py::hash(py::self)) + // N.B. See warning about usage of `py::detail::abs(py::self)` in + // `operators.h`. + .def("__abs__", [](const Vector2& v) { return abs(v); }) + ; + + m.attr("Vector") = m.attr("Vector2"); + + // test_operators_notimplemented + // #393: need to return NotSupported to ensure correct arithmetic operator behavior + py::class_(m, "C1") + .def(py::init<>()) + .def(py::self + py::self); + + py::class_(m, "C2") + .def(py::init<>()) + .def(py::self + py::self) + .def("__add__", [](const C2& c2, const C1& c1) { return c2 + c1; }) + .def("__radd__", [](const C2& c2, const C1& c1) { return c1 + c2; }); + + // test_nested + // #328: first member in a class can't be used in operators + struct NestABase { int value = -2; }; + py::class_(m, "NestABase") + .def(py::init<>()) + .def_readwrite("value", &NestABase::value); + + struct NestA : NestABase { + int value = 3; + NestA& operator+=(int i) { value += i; return *this; } + }; + py::class_(m, "NestA") + .def(py::init<>()) + .def(py::self += int()) + .def("as_base", [](NestA &a) -> NestABase& { + return (NestABase&) a; + }, py::return_value_policy::reference_internal); + m.def("get_NestA", [](const NestA &a) { return a.value; }); + + struct NestB { + NestA a; + int value = 4; + NestB& operator-=(int i) { value -= i; return *this; } + }; + py::class_(m, "NestB") + .def(py::init<>()) + .def(py::self -= int()) + .def_readwrite("a", &NestB::a); + m.def("get_NestB", [](const NestB &b) { return b.value; }); + + struct NestC { + NestB b; + int value = 5; + NestC& operator*=(int i) { value *= i; return *this; } + }; + py::class_(m, "NestC") + .def(py::init<>()) + .def(py::self *= int()) + .def_readwrite("b", &NestC::b); + m.def("get_NestC", [](const NestC &c) { return c.value; }); + + + // test_overriding_eq_reset_hash + // #2191 Overriding __eq__ should set __hash__ to None + struct Comparable { + int value; + bool operator==(const Comparable& rhs) const {return value == rhs.value;} + }; + + struct Hashable : Comparable { + explicit Hashable(int value): Comparable{value}{}; + size_t hash() const { return static_cast(value); } + }; + + struct Hashable2 : Hashable { + using Hashable::Hashable; + }; + + py::class_(m, "Comparable") + .def(py::init()) + .def(py::self == py::self); + + py::class_(m, "Hashable") + .def(py::init()) + .def(py::self == py::self) + .def("__hash__", &Hashable::hash); + + // define __hash__ before __eq__ + py::class_(m, "Hashable2") + .def("__hash__", &Hashable::hash) + .def(py::init()) + .def(py::self == py::self); +} + +#ifndef _MSC_VER + #pragma GCC diagnostic pop +#endif diff --git a/diffvg/pybind11/tests/test_operator_overloading.py b/diffvg/pybind11/tests/test_operator_overloading.py new file mode 100644 index 0000000000000000000000000000000000000000..39e3aee271c6f94ab0d54207a02e1962fdc20a24 --- /dev/null +++ b/diffvg/pybind11/tests/test_operator_overloading.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import operators as m +from pybind11_tests import ConstructorStats + + +def test_operator_overloading(): + v1 = m.Vector2(1, 2) + v2 = m.Vector(3, -1) + v3 = m.Vector2(1, 2) # Same value as v1, but different instance. + assert v1 is not v3 + + assert str(v1) == "[1.000000, 2.000000]" + assert str(v2) == "[3.000000, -1.000000]" + + assert str(-v2) == "[-3.000000, 1.000000]" + + assert str(v1 + v2) == "[4.000000, 1.000000]" + assert str(v1 - v2) == "[-2.000000, 3.000000]" + assert str(v1 - 8) == "[-7.000000, -6.000000]" + assert str(v1 + 8) == "[9.000000, 10.000000]" + assert str(v1 * 8) == "[8.000000, 16.000000]" + assert str(v1 / 8) == "[0.125000, 0.250000]" + assert str(8 - v1) == "[7.000000, 6.000000]" + assert str(8 + v1) == "[9.000000, 10.000000]" + assert str(8 * v1) == "[8.000000, 16.000000]" + assert str(8 / v1) == "[8.000000, 4.000000]" + assert str(v1 * v2) == "[3.000000, -2.000000]" + assert str(v2 / v1) == "[3.000000, -0.500000]" + + assert v1 == v3 + assert v1 != v2 + assert hash(v1) == 4 + # TODO(eric.cousineau): Make this work. + # assert abs(v1) == "abs(Vector2)" + + v1 += 2 * v2 + assert str(v1) == "[7.000000, 0.000000]" + v1 -= v2 + assert str(v1) == "[4.000000, 1.000000]" + v1 *= 2 + assert str(v1) == "[8.000000, 2.000000]" + v1 /= 16 + assert str(v1) == "[0.500000, 0.125000]" + v1 *= v2 + assert str(v1) == "[1.500000, -0.125000]" + v2 /= v1 + assert str(v2) == "[2.000000, 8.000000]" + + cstats = ConstructorStats.get(m.Vector2) + assert cstats.alive() == 3 + del v1 + assert cstats.alive() == 2 + del v2 + assert cstats.alive() == 1 + del v3 + assert cstats.alive() == 0 + assert cstats.values() == [ + '[1.000000, 2.000000]', + '[3.000000, -1.000000]', + '[1.000000, 2.000000]', + '[-3.000000, 1.000000]', + '[4.000000, 1.000000]', + '[-2.000000, 3.000000]', + '[-7.000000, -6.000000]', + '[9.000000, 10.000000]', + '[8.000000, 16.000000]', + '[0.125000, 0.250000]', + '[7.000000, 6.000000]', + '[9.000000, 10.000000]', + '[8.000000, 16.000000]', + '[8.000000, 4.000000]', + '[3.000000, -2.000000]', + '[3.000000, -0.500000]', + '[6.000000, -2.000000]', + ] + assert cstats.default_constructions == 0 + assert cstats.copy_constructions == 0 + assert cstats.move_constructions >= 10 + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + +def test_operators_notimplemented(): + """#393: need to return NotSupported to ensure correct arithmetic operator behavior""" + + c1, c2 = m.C1(), m.C2() + assert c1 + c1 == 11 + assert c2 + c2 == 22 + assert c2 + c1 == 21 + assert c1 + c2 == 12 + + +def test_nested(): + """#328: first member in a class can't be used in operators""" + + a = m.NestA() + b = m.NestB() + c = m.NestC() + + a += 10 + assert m.get_NestA(a) == 13 + b.a += 100 + assert m.get_NestA(b.a) == 103 + c.b.a += 1000 + assert m.get_NestA(c.b.a) == 1003 + b -= 1 + assert m.get_NestB(b) == 3 + c.b -= 3 + assert m.get_NestB(c.b) == 1 + c *= 7 + assert m.get_NestC(c) == 35 + + abase = a.as_base() + assert abase.value == -2 + a.as_base().value += 44 + assert abase.value == 42 + assert c.b.a.as_base().value == -2 + c.b.a.as_base().value += 44 + assert c.b.a.as_base().value == 42 + + del c + pytest.gc_collect() + del a # Shouldn't delete while abase is still alive + pytest.gc_collect() + + assert abase.value == 42 + del abase, b + pytest.gc_collect() + + +def test_overriding_eq_reset_hash(): + + assert m.Comparable(15) is not m.Comparable(15) + assert m.Comparable(15) == m.Comparable(15) + + with pytest.raises(TypeError): + hash(m.Comparable(15)) # TypeError: unhashable type: 'm.Comparable' + + for hashable in (m.Hashable, m.Hashable2): + assert hashable(15) is not hashable(15) + assert hashable(15) == hashable(15) + + assert hash(hashable(15)) == 15 + assert hash(hashable(15)) == hash(hashable(15)) diff --git a/diffvg/pybind11/tests/test_pickling.cpp b/diffvg/pybind11/tests/test_pickling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9dc63bda3b5949032fbcd30e7aa4e7db2072dcff --- /dev/null +++ b/diffvg/pybind11/tests/test_pickling.cpp @@ -0,0 +1,130 @@ +/* + tests/test_pickling.cpp -- pickle support + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + +TEST_SUBMODULE(pickling, m) { + // test_roundtrip + class Pickleable { + public: + Pickleable(const std::string &value) : m_value(value) { } + const std::string &value() const { return m_value; } + + void setExtra1(int extra1) { m_extra1 = extra1; } + void setExtra2(int extra2) { m_extra2 = extra2; } + int extra1() const { return m_extra1; } + int extra2() const { return m_extra2; } + private: + std::string m_value; + int m_extra1 = 0; + int m_extra2 = 0; + }; + + class PickleableNew : public Pickleable { + public: + using Pickleable::Pickleable; + }; + + py::class_(m, "Pickleable") + .def(py::init()) + .def("value", &Pickleable::value) + .def("extra1", &Pickleable::extra1) + .def("extra2", &Pickleable::extra2) + .def("setExtra1", &Pickleable::setExtra1) + .def("setExtra2", &Pickleable::setExtra2) + // For details on the methods below, refer to + // http://docs.python.org/3/library/pickle.html#pickling-class-instances + .def("__getstate__", [](const Pickleable &p) { + /* Return a tuple that fully encodes the state of the object */ + return py::make_tuple(p.value(), p.extra1(), p.extra2()); + }) + .def("__setstate__", [](Pickleable &p, py::tuple t) { + if (t.size() != 3) + throw std::runtime_error("Invalid state!"); + /* Invoke the constructor (need to use in-place version) */ + new (&p) Pickleable(t[0].cast()); + + /* Assign any additional state */ + p.setExtra1(t[1].cast()); + p.setExtra2(t[2].cast()); + }); + + py::class_(m, "PickleableNew") + .def(py::init()) + .def(py::pickle( + [](const PickleableNew &p) { + return py::make_tuple(p.value(), p.extra1(), p.extra2()); + }, + [](py::tuple t) { + if (t.size() != 3) + throw std::runtime_error("Invalid state!"); + auto p = PickleableNew(t[0].cast()); + + p.setExtra1(t[1].cast()); + p.setExtra2(t[2].cast()); + return p; + } + )); + +#if !defined(PYPY_VERSION) + // test_roundtrip_with_dict + class PickleableWithDict { + public: + PickleableWithDict(const std::string &value) : value(value) { } + + std::string value; + int extra; + }; + + class PickleableWithDictNew : public PickleableWithDict { + public: + using PickleableWithDict::PickleableWithDict; + }; + + py::class_(m, "PickleableWithDict", py::dynamic_attr()) + .def(py::init()) + .def_readwrite("value", &PickleableWithDict::value) + .def_readwrite("extra", &PickleableWithDict::extra) + .def("__getstate__", [](py::object self) { + /* Also include __dict__ in state */ + return py::make_tuple(self.attr("value"), self.attr("extra"), self.attr("__dict__")); + }) + .def("__setstate__", [](py::object self, py::tuple t) { + if (t.size() != 3) + throw std::runtime_error("Invalid state!"); + /* Cast and construct */ + auto& p = self.cast(); + new (&p) PickleableWithDict(t[0].cast()); + + /* Assign C++ state */ + p.extra = t[1].cast(); + + /* Assign Python state */ + self.attr("__dict__") = t[2]; + }); + + py::class_(m, "PickleableWithDictNew") + .def(py::init()) + .def(py::pickle( + [](py::object self) { + return py::make_tuple(self.attr("value"), self.attr("extra"), self.attr("__dict__")); + }, + [](const py::tuple &t) { + if (t.size() != 3) + throw std::runtime_error("Invalid state!"); + + auto cpp_state = PickleableWithDictNew(t[0].cast()); + cpp_state.extra = t[1].cast(); + + auto py_state = t[2].cast(); + return std::make_pair(cpp_state, py_state); + } + )); +#endif +} diff --git a/diffvg/pybind11/tests/test_pickling.py b/diffvg/pybind11/tests/test_pickling.py new file mode 100644 index 0000000000000000000000000000000000000000..9aee70505de7acc21ee09623417d35812ae11463 --- /dev/null +++ b/diffvg/pybind11/tests/test_pickling.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +import pytest + +import env # noqa: F401 + +from pybind11_tests import pickling as m + +try: + import cPickle as pickle # Use cPickle on Python 2.7 +except ImportError: + import pickle + + +@pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"]) +def test_roundtrip(cls_name): + cls = getattr(m, cls_name) + p = cls("test_value") + p.setExtra1(15) + p.setExtra2(48) + + data = pickle.dumps(p, 2) # Must use pickle protocol >= 2 + p2 = pickle.loads(data) + assert p2.value() == p.value() + assert p2.extra1() == p.extra1() + assert p2.extra2() == p.extra2() + + +@pytest.mark.xfail("env.PYPY") +@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"]) +def test_roundtrip_with_dict(cls_name): + cls = getattr(m, cls_name) + p = cls("test_value") + p.extra = 15 + p.dynamic = "Attribute" + + data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL) + p2 = pickle.loads(data) + assert p2.value == p.value + assert p2.extra == p.extra + assert p2.dynamic == p.dynamic + + +def test_enum_pickle(): + from pybind11_tests import enums as e + data = pickle.dumps(e.EOne, 2) + assert e.EOne == pickle.loads(data) diff --git a/diffvg/pybind11/tests/test_pytypes.cpp b/diffvg/pybind11/tests/test_pytypes.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f8d56410f0add1f2e341e06ea9560ab9e88d643 --- /dev/null +++ b/diffvg/pybind11/tests/test_pytypes.cpp @@ -0,0 +1,375 @@ +/* + tests/test_pytypes.cpp -- Python type casters + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" + + +TEST_SUBMODULE(pytypes, m) { + // test_int + m.def("get_int", []{return py::int_(0);}); + // test_iterator + m.def("get_iterator", []{return py::iterator();}); + // test_iterable + m.def("get_iterable", []{return py::iterable();}); + // test_list + m.def("get_list", []() { + py::list list; + list.append("value"); + py::print("Entry at position 0:", list[0]); + list[0] = py::str("overwritten"); + list.insert(0, "inserted-0"); + list.insert(2, "inserted-2"); + return list; + }); + m.def("print_list", [](py::list list) { + int index = 0; + for (auto item : list) + py::print("list item {}: {}"_s.format(index++, item)); + }); + // test_none + m.def("get_none", []{return py::none();}); + m.def("print_none", [](py::none none) { + py::print("none: {}"_s.format(none)); + }); + + // test_set + m.def("get_set", []() { + py::set set; + set.add(py::str("key1")); + set.add("key2"); + set.add(std::string("key3")); + return set; + }); + m.def("print_set", [](py::set set) { + for (auto item : set) + py::print("key:", item); + }); + m.def("set_contains", [](py::set set, py::object key) { + return set.contains(key); + }); + m.def("set_contains", [](py::set set, const char* key) { + return set.contains(key); + }); + + // test_dict + m.def("get_dict", []() { return py::dict("key"_a="value"); }); + m.def("print_dict", [](py::dict dict) { + for (auto item : dict) + py::print("key: {}, value={}"_s.format(item.first, item.second)); + }); + m.def("dict_keyword_constructor", []() { + auto d1 = py::dict("x"_a=1, "y"_a=2); + auto d2 = py::dict("z"_a=3, **d1); + return d2; + }); + m.def("dict_contains", [](py::dict dict, py::object val) { + return dict.contains(val); + }); + m.def("dict_contains", [](py::dict dict, const char* val) { + return dict.contains(val); + }); + + // test_str + m.def("str_from_string", []() { return py::str(std::string("baz")); }); + m.def("str_from_bytes", []() { return py::str(py::bytes("boo", 3)); }); + m.def("str_from_object", [](const py::object& obj) { return py::str(obj); }); + m.def("repr_from_object", [](const py::object& obj) { return py::repr(obj); }); + + m.def("str_format", []() { + auto s1 = "{} + {} = {}"_s.format(1, 2, 3); + auto s2 = "{a} + {b} = {c}"_s.format("a"_a=1, "b"_a=2, "c"_a=3); + return py::make_tuple(s1, s2); + }); + + // test_bytes + m.def("bytes_from_string", []() { return py::bytes(std::string("foo")); }); + m.def("bytes_from_str", []() { return py::bytes(py::str("bar", 3)); }); + + // test_capsule + m.def("return_capsule_with_destructor", []() { + py::print("creating capsule"); + return py::capsule([]() { + py::print("destructing capsule"); + }); + }); + + m.def("return_capsule_with_destructor_2", []() { + py::print("creating capsule"); + return py::capsule((void *) 1234, [](void *ptr) { + py::print("destructing capsule: {}"_s.format((size_t) ptr)); + }); + }); + + m.def("return_capsule_with_name_and_destructor", []() { + auto capsule = py::capsule((void *) 1234, "pointer type description", [](PyObject *ptr) { + if (ptr) { + auto name = PyCapsule_GetName(ptr); + py::print("destructing capsule ({}, '{}')"_s.format( + (size_t) PyCapsule_GetPointer(ptr, name), name + )); + } + }); + void *contents = capsule; + py::print("created capsule ({}, '{}')"_s.format((size_t) contents, capsule.name())); + return capsule; + }); + + // test_accessors + m.def("accessor_api", [](py::object o) { + auto d = py::dict(); + + d["basic_attr"] = o.attr("basic_attr"); + + auto l = py::list(); + for (const auto &item : o.attr("begin_end")) { + l.append(item); + } + d["begin_end"] = l; + + d["operator[object]"] = o.attr("d")["operator[object]"_s]; + d["operator[char *]"] = o.attr("d")["operator[char *]"]; + + d["attr(object)"] = o.attr("sub").attr("attr_obj"); + d["attr(char *)"] = o.attr("sub").attr("attr_char"); + try { + o.attr("sub").attr("missing").ptr(); + } catch (const py::error_already_set &) { + d["missing_attr_ptr"] = "raised"_s; + } + try { + o.attr("missing").attr("doesn't matter"); + } catch (const py::error_already_set &) { + d["missing_attr_chain"] = "raised"_s; + } + + d["is_none"] = o.attr("basic_attr").is_none(); + + d["operator()"] = o.attr("func")(1); + d["operator*"] = o.attr("func")(*o.attr("begin_end")); + + // Test implicit conversion + py::list implicit_list = o.attr("begin_end"); + d["implicit_list"] = implicit_list; + py::dict implicit_dict = o.attr("__dict__"); + d["implicit_dict"] = implicit_dict; + + return d; + }); + + m.def("tuple_accessor", [](py::tuple existing_t) { + try { + existing_t[0] = 1; + } catch (const py::error_already_set &) { + // --> Python system error + // Only new tuples (refcount == 1) are mutable + auto new_t = py::tuple(3); + for (size_t i = 0; i < new_t.size(); ++i) { + new_t[i] = i; + } + return new_t; + } + return py::tuple(); + }); + + m.def("accessor_assignment", []() { + auto l = py::list(1); + l[0] = 0; + + auto d = py::dict(); + d["get"] = l[0]; + auto var = l[0]; + d["deferred_get"] = var; + l[0] = 1; + d["set"] = l[0]; + var = 99; // this assignment should not overwrite l[0] + d["deferred_set"] = l[0]; + d["var"] = var; + + return d; + }); + + // test_constructors + m.def("default_constructors", []() { + return py::dict( + "bytes"_a=py::bytes(), + "str"_a=py::str(), + "bool"_a=py::bool_(), + "int"_a=py::int_(), + "float"_a=py::float_(), + "tuple"_a=py::tuple(), + "list"_a=py::list(), + "dict"_a=py::dict(), + "set"_a=py::set() + ); + }); + + m.def("converting_constructors", [](py::dict d) { + return py::dict( + "bytes"_a=py::bytes(d["bytes"]), + "str"_a=py::str(d["str"]), + "bool"_a=py::bool_(d["bool"]), + "int"_a=py::int_(d["int"]), + "float"_a=py::float_(d["float"]), + "tuple"_a=py::tuple(d["tuple"]), + "list"_a=py::list(d["list"]), + "dict"_a=py::dict(d["dict"]), + "set"_a=py::set(d["set"]), + "memoryview"_a=py::memoryview(d["memoryview"]) + ); + }); + + m.def("cast_functions", [](py::dict d) { + // When converting between Python types, obj.cast() should be the same as T(obj) + return py::dict( + "bytes"_a=d["bytes"].cast(), + "str"_a=d["str"].cast(), + "bool"_a=d["bool"].cast(), + "int"_a=d["int"].cast(), + "float"_a=d["float"].cast(), + "tuple"_a=d["tuple"].cast(), + "list"_a=d["list"].cast(), + "dict"_a=d["dict"].cast(), + "set"_a=d["set"].cast(), + "memoryview"_a=d["memoryview"].cast() + ); + }); + + m.def("convert_to_pybind11_str", [](py::object o) { return py::str(o); }); + + m.def("get_implicit_casting", []() { + py::dict d; + d["char*_i1"] = "abc"; + const char *c2 = "abc"; + d["char*_i2"] = c2; + d["char*_e"] = py::cast(c2); + d["char*_p"] = py::str(c2); + + d["int_i1"] = 42; + int i = 42; + d["int_i2"] = i; + i++; + d["int_e"] = py::cast(i); + i++; + d["int_p"] = py::int_(i); + + d["str_i1"] = std::string("str"); + std::string s2("str1"); + d["str_i2"] = s2; + s2[3] = '2'; + d["str_e"] = py::cast(s2); + s2[3] = '3'; + d["str_p"] = py::str(s2); + + py::list l(2); + l[0] = 3; + l[1] = py::cast(6); + l.append(9); + l.append(py::cast(12)); + l.append(py::int_(15)); + + return py::dict( + "d"_a=d, + "l"_a=l + ); + }); + + // test_print + m.def("print_function", []() { + py::print("Hello, World!"); + py::print(1, 2.0, "three", true, std::string("-- multiple args")); + auto args = py::make_tuple("and", "a", "custom", "separator"); + py::print("*args", *args, "sep"_a="-"); + py::print("no new line here", "end"_a=" -- "); + py::print("next print"); + + auto py_stderr = py::module::import("sys").attr("stderr"); + py::print("this goes to stderr", "file"_a=py_stderr); + + py::print("flush", "flush"_a=true); + + py::print("{a} + {b} = {c}"_s.format("a"_a="py::print", "b"_a="str.format", "c"_a="this")); + }); + + m.def("print_failure", []() { py::print(42, UnregisteredType()); }); + + m.def("hash_function", [](py::object obj) { return py::hash(obj); }); + + m.def("test_number_protocol", [](py::object a, py::object b) { + py::list l; + l.append(a.equal(b)); + l.append(a.not_equal(b)); + l.append(a < b); + l.append(a <= b); + l.append(a > b); + l.append(a >= b); + l.append(a + b); + l.append(a - b); + l.append(a * b); + l.append(a / b); + l.append(a | b); + l.append(a & b); + l.append(a ^ b); + l.append(a >> b); + l.append(a << b); + return l; + }); + + m.def("test_list_slicing", [](py::list a) { + return a[py::slice(0, -1, 2)]; + }); + + m.def("test_memoryview_object", [](py::buffer b) { + return py::memoryview(b); + }); + + m.def("test_memoryview_buffer_info", [](py::buffer b) { + return py::memoryview(b.request()); + }); + + m.def("test_memoryview_from_buffer", [](bool is_unsigned) { + static const int16_t si16[] = { 3, 1, 4, 1, 5 }; + static const uint16_t ui16[] = { 2, 7, 1, 8 }; + if (is_unsigned) + return py::memoryview::from_buffer( + ui16, { 4 }, { sizeof(uint16_t) }); + else + return py::memoryview::from_buffer( + si16, { 5 }, { sizeof(int16_t) }); + }); + + m.def("test_memoryview_from_buffer_nativeformat", []() { + static const char* format = "@i"; + static const int32_t arr[] = { 4, 7, 5 }; + return py::memoryview::from_buffer( + arr, sizeof(int32_t), format, { 3 }, { sizeof(int32_t) }); + }); + + m.def("test_memoryview_from_buffer_empty_shape", []() { + static const char* buf = ""; + return py::memoryview::from_buffer(buf, 1, "B", { }, { }); + }); + + m.def("test_memoryview_from_buffer_invalid_strides", []() { + static const char* buf = "\x02\x03\x04"; + return py::memoryview::from_buffer(buf, 1, "B", { 3 }, { }); + }); + + m.def("test_memoryview_from_buffer_nullptr", []() { + return py::memoryview::from_buffer( + static_cast(nullptr), 1, "B", { }, { }); + }); + +#if PY_MAJOR_VERSION >= 3 + m.def("test_memoryview_from_memory", []() { + const char* buf = "\xff\xe1\xab\x37"; + return py::memoryview::from_memory( + buf, static_cast(strlen(buf))); + }); +#endif +} diff --git a/diffvg/pybind11/tests/test_pytypes.py b/diffvg/pybind11/tests/test_pytypes.py new file mode 100644 index 0000000000000000000000000000000000000000..95cc94af8c89517bf4a993af43041414c46d4dd5 --- /dev/null +++ b/diffvg/pybind11/tests/test_pytypes.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- +from __future__ import division +import pytest +import sys + +import env # noqa: F401 + +from pybind11_tests import pytypes as m +from pybind11_tests import debug_enabled + + +def test_int(doc): + assert doc(m.get_int) == "get_int() -> int" + + +def test_iterator(doc): + assert doc(m.get_iterator) == "get_iterator() -> Iterator" + + +def test_iterable(doc): + assert doc(m.get_iterable) == "get_iterable() -> Iterable" + + +def test_list(capture, doc): + with capture: + lst = m.get_list() + assert lst == ["inserted-0", "overwritten", "inserted-2"] + + lst.append("value2") + m.print_list(lst) + assert capture.unordered == """ + Entry at position 0: value + list item 0: inserted-0 + list item 1: overwritten + list item 2: inserted-2 + list item 3: value2 + """ + + assert doc(m.get_list) == "get_list() -> list" + assert doc(m.print_list) == "print_list(arg0: list) -> None" + + +def test_none(capture, doc): + assert doc(m.get_none) == "get_none() -> None" + assert doc(m.print_none) == "print_none(arg0: None) -> None" + + +def test_set(capture, doc): + s = m.get_set() + assert s == {"key1", "key2", "key3"} + + with capture: + s.add("key4") + m.print_set(s) + assert capture.unordered == """ + key: key1 + key: key2 + key: key3 + key: key4 + """ + + assert not m.set_contains(set([]), 42) + assert m.set_contains({42}, 42) + assert m.set_contains({"foo"}, "foo") + + assert doc(m.get_list) == "get_list() -> list" + assert doc(m.print_list) == "print_list(arg0: list) -> None" + + +def test_dict(capture, doc): + d = m.get_dict() + assert d == {"key": "value"} + + with capture: + d["key2"] = "value2" + m.print_dict(d) + assert capture.unordered == """ + key: key, value=value + key: key2, value=value2 + """ + + assert not m.dict_contains({}, 42) + assert m.dict_contains({42: None}, 42) + assert m.dict_contains({"foo": None}, "foo") + + assert doc(m.get_dict) == "get_dict() -> dict" + assert doc(m.print_dict) == "print_dict(arg0: dict) -> None" + + assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3} + + +def test_str(doc): + assert m.str_from_string().encode().decode() == "baz" + assert m.str_from_bytes().encode().decode() == "boo" + + assert doc(m.str_from_bytes) == "str_from_bytes() -> str" + + class A(object): + def __str__(self): + return "this is a str" + + def __repr__(self): + return "this is a repr" + + assert m.str_from_object(A()) == "this is a str" + assert m.repr_from_object(A()) == "this is a repr" + + s1, s2 = m.str_format() + assert s1 == "1 + 2 = 3" + assert s1 == s2 + + +def test_bytes(doc): + assert m.bytes_from_string().decode() == "foo" + assert m.bytes_from_str().decode() == "bar" + + assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format( + "str" if env.PY2 else "bytes" + ) + + +def test_capsule(capture): + pytest.gc_collect() + with capture: + a = m.return_capsule_with_destructor() + del a + pytest.gc_collect() + assert capture.unordered == """ + creating capsule + destructing capsule + """ + + with capture: + a = m.return_capsule_with_destructor_2() + del a + pytest.gc_collect() + assert capture.unordered == """ + creating capsule + destructing capsule: 1234 + """ + + with capture: + a = m.return_capsule_with_name_and_destructor() + del a + pytest.gc_collect() + assert capture.unordered == """ + created capsule (1234, 'pointer type description') + destructing capsule (1234, 'pointer type description') + """ + + +def test_accessors(): + class SubTestObject: + attr_obj = 1 + attr_char = 2 + + class TestObject: + basic_attr = 1 + begin_end = [1, 2, 3] + d = {"operator[object]": 1, "operator[char *]": 2} + sub = SubTestObject() + + def func(self, x, *args): + return self.basic_attr + x + sum(args) + + d = m.accessor_api(TestObject()) + assert d["basic_attr"] == 1 + assert d["begin_end"] == [1, 2, 3] + assert d["operator[object]"] == 1 + assert d["operator[char *]"] == 2 + assert d["attr(object)"] == 1 + assert d["attr(char *)"] == 2 + assert d["missing_attr_ptr"] == "raised" + assert d["missing_attr_chain"] == "raised" + assert d["is_none"] is False + assert d["operator()"] == 2 + assert d["operator*"] == 7 + assert d["implicit_list"] == [1, 2, 3] + assert all(x in TestObject.__dict__ for x in d["implicit_dict"]) + + assert m.tuple_accessor(tuple()) == (0, 1, 2) + + d = m.accessor_assignment() + assert d["get"] == 0 + assert d["deferred_get"] == 0 + assert d["set"] == 1 + assert d["deferred_set"] == 1 + assert d["var"] == 99 + + +def test_constructors(): + """C++ default and converting constructors are equivalent to type calls in Python""" + types = [bytes, str, bool, int, float, tuple, list, dict, set] + expected = {t.__name__: t() for t in types} + if env.PY2: + # Note that bytes.__name__ == 'str' in Python 2. + # pybind11::str is unicode even under Python 2. + expected["bytes"] = bytes() + expected["str"] = unicode() # noqa: F821 + assert m.default_constructors() == expected + + data = { + bytes: b'41', # Currently no supported or working conversions. + str: 42, + bool: "Not empty", + int: "42", + float: "+1e3", + tuple: range(3), + list: range(3), + dict: [("two", 2), ("one", 1), ("three", 3)], + set: [4, 4, 5, 6, 6, 6], + memoryview: b'abc' + } + inputs = {k.__name__: v for k, v in data.items()} + expected = {k.__name__: k(v) for k, v in data.items()} + if env.PY2: # Similar to the above. See comments above. + inputs["bytes"] = b'41' + inputs["str"] = 42 + expected["bytes"] = b'41' + expected["str"] = u"42" + + assert m.converting_constructors(inputs) == expected + assert m.cast_functions(inputs) == expected + + # Converting constructors and cast functions should just reference rather + # than copy when no conversion is needed: + noconv1 = m.converting_constructors(expected) + for k in noconv1: + assert noconv1[k] is expected[k] + + noconv2 = m.cast_functions(expected) + for k in noconv2: + assert noconv2[k] is expected[k] + + +def test_pybind11_str_raw_str(): + # specifically to exercise pybind11::str::raw_str + cvt = m.convert_to_pybind11_str + assert cvt(u"Str") == u"Str" + assert cvt(b'Bytes') == u"Bytes" if env.PY2 else "b'Bytes'" + assert cvt(None) == u"None" + assert cvt(False) == u"False" + assert cvt(True) == u"True" + assert cvt(42) == u"42" + assert cvt(2**65) == u"36893488147419103232" + assert cvt(-1.50) == u"-1.5" + assert cvt(()) == u"()" + assert cvt((18,)) == u"(18,)" + assert cvt([]) == u"[]" + assert cvt([28]) == u"[28]" + assert cvt({}) == u"{}" + assert cvt({3: 4}) == u"{3: 4}" + assert cvt(set()) == u"set([])" if env.PY2 else "set()" + assert cvt({3, 3}) == u"set([3])" if env.PY2 else "{3}" + + valid_orig = u"Η±" + valid_utf8 = valid_orig.encode("utf-8") + valid_cvt = cvt(valid_utf8) + assert type(valid_cvt) == bytes # Probably surprising. + assert valid_cvt == b'\xc7\xb1' + + malformed_utf8 = b'\x80' + malformed_cvt = cvt(malformed_utf8) + assert type(malformed_cvt) == bytes # Probably surprising. + assert malformed_cvt == b'\x80' + + +def test_implicit_casting(): + """Tests implicit casting when assigning or appending to dicts and lists.""" + z = m.get_implicit_casting() + assert z['d'] == { + 'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc', + 'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3', + 'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44 + } + assert z['l'] == [3, 6, 9, 12, 15] + + +def test_print(capture): + with capture: + m.print_function() + assert capture == """ + Hello, World! + 1 2.0 three True -- multiple args + *args-and-a-custom-separator + no new line here -- next print + flush + py::print + str.format = this + """ + assert capture.stderr == "this goes to stderr" + + with pytest.raises(RuntimeError) as excinfo: + m.print_failure() + assert str(excinfo.value) == "make_tuple(): unable to convert " + ( + "argument of type 'UnregisteredType' to Python object" + if debug_enabled else + "arguments to Python object (compile in debug mode for details)" + ) + + +def test_hash(): + class Hashable(object): + def __init__(self, value): + self.value = value + + def __hash__(self): + return self.value + + class Unhashable(object): + __hash__ = None + + assert m.hash_function(Hashable(42)) == 42 + with pytest.raises(TypeError): + m.hash_function(Unhashable()) + + +def test_number_protocol(): + for a, b in [(1, 1), (3, 5)]: + li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b, + a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b] + assert m.test_number_protocol(a, b) == li + + +def test_list_slicing(): + li = list(range(100)) + assert li[::2] == m.test_list_slicing(li) + + +@pytest.mark.parametrize('method, args, fmt, expected_view', [ + (m.test_memoryview_object, (b'red',), 'B', b'red'), + (m.test_memoryview_buffer_info, (b'green',), 'B', b'green'), + (m.test_memoryview_from_buffer, (False,), 'h', [3, 1, 4, 1, 5]), + (m.test_memoryview_from_buffer, (True,), 'H', [2, 7, 1, 8]), + (m.test_memoryview_from_buffer_nativeformat, (), '@i', [4, 7, 5]), +]) +def test_memoryview(method, args, fmt, expected_view): + view = method(*args) + assert isinstance(view, memoryview) + assert view.format == fmt + if isinstance(expected_view, bytes) or not env.PY2: + view_as_list = list(view) + else: + # Using max to pick non-zero byte (big-endian vs little-endian). + view_as_list = [max([ord(c) for c in s]) for s in view] + assert view_as_list == list(expected_view) + + +@pytest.mark.xfail("env.PYPY", reason="getrefcount is not available") +@pytest.mark.parametrize('method', [ + m.test_memoryview_object, + m.test_memoryview_buffer_info, +]) +def test_memoryview_refcount(method): + buf = b'\x0a\x0b\x0c\x0d' + ref_before = sys.getrefcount(buf) + view = method(buf) + ref_after = sys.getrefcount(buf) + assert ref_before < ref_after + assert list(view) == list(buf) + + +def test_memoryview_from_buffer_empty_shape(): + view = m.test_memoryview_from_buffer_empty_shape() + assert isinstance(view, memoryview) + assert view.format == 'B' + if env.PY2: + # Python 2 behavior is weird, but Python 3 (the future) is fine. + # PyPy3 has + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include +#include + +#include + +template +class NonZeroIterator { + const T* ptr_; +public: + NonZeroIterator(const T* ptr) : ptr_(ptr) {} + const T& operator*() const { return *ptr_; } + NonZeroIterator& operator++() { ++ptr_; return *this; } +}; + +class NonZeroSentinel {}; + +template +bool operator==(const NonZeroIterator>& it, const NonZeroSentinel&) { + return !(*it).first || !(*it).second; +} + +template +py::list test_random_access_iterator(PythonType x) { + if (x.size() < 5) + throw py::value_error("Please provide at least 5 elements for testing."); + + auto checks = py::list(); + auto assert_equal = [&checks](py::handle a, py::handle b) { + auto result = PyObject_RichCompareBool(a.ptr(), b.ptr(), Py_EQ); + if (result == -1) { throw py::error_already_set(); } + checks.append(result != 0); + }; + + auto it = x.begin(); + assert_equal(x[0], *it); + assert_equal(x[0], it[0]); + assert_equal(x[1], it[1]); + + assert_equal(x[1], *(++it)); + assert_equal(x[1], *(it++)); + assert_equal(x[2], *it); + assert_equal(x[3], *(it += 1)); + assert_equal(x[2], *(--it)); + assert_equal(x[2], *(it--)); + assert_equal(x[1], *it); + assert_equal(x[0], *(it -= 1)); + + assert_equal(it->attr("real"), x[0].attr("real")); + assert_equal((it + 1)->attr("real"), x[1].attr("real")); + + assert_equal(x[1], *(it + 1)); + assert_equal(x[1], *(1 + it)); + it += 3; + assert_equal(x[1], *(it - 2)); + + checks.append(static_cast(x.end() - x.begin()) == x.size()); + checks.append((x.begin() + static_cast(x.size())) == x.end()); + checks.append(x.begin() < x.end()); + + return checks; +} + +TEST_SUBMODULE(sequences_and_iterators, m) { + // test_sliceable + class Sliceable{ + public: + Sliceable(int n): size(n) {} + int start,stop,step; + int size; + }; + py::class_(m,"Sliceable") + .def(py::init()) + .def("__getitem__",[](const Sliceable &s, py::slice slice) { + ssize_t start, stop, step, slicelength; + if (!slice.compute(s.size, &start, &stop, &step, &slicelength)) + throw py::error_already_set(); + int istart = static_cast(start); + int istop = static_cast(stop); + int istep = static_cast(step); + return std::make_tuple(istart,istop,istep); + }) + ; + + // test_sequence + class Sequence { + public: + Sequence(size_t size) : m_size(size) { + print_created(this, "of size", m_size); + m_data = new float[size]; + memset(m_data, 0, sizeof(float) * size); + } + Sequence(const std::vector &value) : m_size(value.size()) { + print_created(this, "of size", m_size, "from std::vector"); + m_data = new float[m_size]; + memcpy(m_data, &value[0], sizeof(float) * m_size); + } + Sequence(const Sequence &s) : m_size(s.m_size) { + print_copy_created(this); + m_data = new float[m_size]; + memcpy(m_data, s.m_data, sizeof(float)*m_size); + } + Sequence(Sequence &&s) : m_size(s.m_size), m_data(s.m_data) { + print_move_created(this); + s.m_size = 0; + s.m_data = nullptr; + } + + ~Sequence() { print_destroyed(this); delete[] m_data; } + + Sequence &operator=(const Sequence &s) { + if (&s != this) { + delete[] m_data; + m_size = s.m_size; + m_data = new float[m_size]; + memcpy(m_data, s.m_data, sizeof(float)*m_size); + } + print_copy_assigned(this); + return *this; + } + + Sequence &operator=(Sequence &&s) { + if (&s != this) { + delete[] m_data; + m_size = s.m_size; + m_data = s.m_data; + s.m_size = 0; + s.m_data = nullptr; + } + print_move_assigned(this); + return *this; + } + + bool operator==(const Sequence &s) const { + if (m_size != s.size()) return false; + for (size_t i = 0; i < m_size; ++i) + if (m_data[i] != s[i]) + return false; + return true; + } + bool operator!=(const Sequence &s) const { return !operator==(s); } + + float operator[](size_t index) const { return m_data[index]; } + float &operator[](size_t index) { return m_data[index]; } + + bool contains(float v) const { + for (size_t i = 0; i < m_size; ++i) + if (v == m_data[i]) + return true; + return false; + } + + Sequence reversed() const { + Sequence result(m_size); + for (size_t i = 0; i < m_size; ++i) + result[m_size - i - 1] = m_data[i]; + return result; + } + + size_t size() const { return m_size; } + + const float *begin() const { return m_data; } + const float *end() const { return m_data+m_size; } + + private: + size_t m_size; + float *m_data; + }; + py::class_(m, "Sequence") + .def(py::init()) + .def(py::init&>()) + /// Bare bones interface + .def("__getitem__", [](const Sequence &s, size_t i) { + if (i >= s.size()) throw py::index_error(); + return s[i]; + }) + .def("__setitem__", [](Sequence &s, size_t i, float v) { + if (i >= s.size()) throw py::index_error(); + s[i] = v; + }) + .def("__len__", &Sequence::size) + /// Optional sequence protocol operations + .def("__iter__", [](const Sequence &s) { return py::make_iterator(s.begin(), s.end()); }, + py::keep_alive<0, 1>() /* Essential: keep object alive while iterator exists */) + .def("__contains__", [](const Sequence &s, float v) { return s.contains(v); }) + .def("__reversed__", [](const Sequence &s) -> Sequence { return s.reversed(); }) + /// Slicing protocol (optional) + .def("__getitem__", [](const Sequence &s, py::slice slice) -> Sequence* { + size_t start, stop, step, slicelength; + if (!slice.compute(s.size(), &start, &stop, &step, &slicelength)) + throw py::error_already_set(); + Sequence *seq = new Sequence(slicelength); + for (size_t i = 0; i < slicelength; ++i) { + (*seq)[i] = s[start]; start += step; + } + return seq; + }) + .def("__setitem__", [](Sequence &s, py::slice slice, const Sequence &value) { + size_t start, stop, step, slicelength; + if (!slice.compute(s.size(), &start, &stop, &step, &slicelength)) + throw py::error_already_set(); + if (slicelength != value.size()) + throw std::runtime_error("Left and right hand size of slice assignment have different sizes!"); + for (size_t i = 0; i < slicelength; ++i) { + s[start] = value[i]; start += step; + } + }) + /// Comparisons + .def(py::self == py::self) + .def(py::self != py::self) + // Could also define py::self + py::self for concatenation, etc. + ; + + // test_map_iterator + // Interface of a map-like object that isn't (directly) an unordered_map, but provides some basic + // map-like functionality. + class StringMap { + public: + StringMap() = default; + StringMap(std::unordered_map init) + : map(std::move(init)) {} + + void set(std::string key, std::string val) { map[key] = val; } + std::string get(std::string key) const { return map.at(key); } + size_t size() const { return map.size(); } + private: + std::unordered_map map; + public: + decltype(map.cbegin()) begin() const { return map.cbegin(); } + decltype(map.cend()) end() const { return map.cend(); } + }; + py::class_(m, "StringMap") + .def(py::init<>()) + .def(py::init>()) + .def("__getitem__", [](const StringMap &map, std::string key) { + try { return map.get(key); } + catch (const std::out_of_range&) { + throw py::key_error("key '" + key + "' does not exist"); + } + }) + .def("__setitem__", &StringMap::set) + .def("__len__", &StringMap::size) + .def("__iter__", [](const StringMap &map) { return py::make_key_iterator(map.begin(), map.end()); }, + py::keep_alive<0, 1>()) + .def("items", [](const StringMap &map) { return py::make_iterator(map.begin(), map.end()); }, + py::keep_alive<0, 1>()) + ; + + // test_generalized_iterators + class IntPairs { + public: + IntPairs(std::vector> data) : data_(std::move(data)) {} + const std::pair* begin() const { return data_.data(); } + private: + std::vector> data_; + }; + py::class_(m, "IntPairs") + .def(py::init>>()) + .def("nonzero", [](const IntPairs& s) { + return py::make_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel()); + }, py::keep_alive<0, 1>()) + .def("nonzero_keys", [](const IntPairs& s) { + return py::make_key_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel()); + }, py::keep_alive<0, 1>()) + ; + + +#if 0 + // Obsolete: special data structure for exposing custom iterator types to python + // kept here for illustrative purposes because there might be some use cases which + // are not covered by the much simpler py::make_iterator + + struct PySequenceIterator { + PySequenceIterator(const Sequence &seq, py::object ref) : seq(seq), ref(ref) { } + + float next() { + if (index == seq.size()) + throw py::stop_iteration(); + return seq[index++]; + } + + const Sequence &seq; + py::object ref; // keep a reference + size_t index = 0; + }; + + py::class_(seq, "Iterator") + .def("__iter__", [](PySequenceIterator &it) -> PySequenceIterator& { return it; }) + .def("__next__", &PySequenceIterator::next); + + On the actual Sequence object, the iterator would be constructed as follows: + .def("__iter__", [](py::object s) { return PySequenceIterator(s.cast(), s); }) +#endif + + // test_python_iterator_in_cpp + m.def("object_to_list", [](py::object o) { + auto l = py::list(); + for (auto item : o) { + l.append(item); + } + return l; + }); + + m.def("iterator_to_list", [](py::iterator it) { + auto l = py::list(); + while (it != py::iterator::sentinel()) { + l.append(*it); + ++it; + } + return l; + }); + + // test_sequence_length: check that Python sequences can be converted to py::sequence. + m.def("sequence_length", [](py::sequence seq) { return seq.size(); }); + + // Make sure that py::iterator works with std algorithms + m.def("count_none", [](py::object o) { + return std::count_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); }); + }); + + m.def("find_none", [](py::object o) { + auto it = std::find_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); }); + return it->is_none(); + }); + + m.def("count_nonzeros", [](py::dict d) { + return std::count_if(d.begin(), d.end(), [](std::pair p) { + return p.second.cast() != 0; + }); + }); + + m.def("tuple_iterator", &test_random_access_iterator); + m.def("list_iterator", &test_random_access_iterator); + m.def("sequence_iterator", &test_random_access_iterator); + + // test_iterator_passthrough + // #181: iterator passthrough did not compile + m.def("iterator_passthrough", [](py::iterator s) -> py::iterator { + return py::make_iterator(std::begin(s), std::end(s)); + }); + + // test_iterator_rvp + // #388: Can't make iterators via make_iterator() with different r/v policies + static std::vector list = { 1, 2, 3 }; + m.def("make_iterator_1", []() { return py::make_iterator(list); }); + m.def("make_iterator_2", []() { return py::make_iterator(list); }); +} diff --git a/diffvg/pybind11/tests/test_sequences_and_iterators.py b/diffvg/pybind11/tests/test_sequences_and_iterators.py new file mode 100644 index 0000000000000000000000000000000000000000..8f6c0c4bbdf71bb45759d83a630f910a4f117ecd --- /dev/null +++ b/diffvg/pybind11/tests/test_sequences_and_iterators.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import sequences_and_iterators as m +from pybind11_tests import ConstructorStats + + +def isclose(a, b, rel_tol=1e-05, abs_tol=0.0): + """Like math.isclose() from Python 3.5""" + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0): + return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list)) + + +def test_generalized_iterators(): + assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)] + assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)] + assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == [] + + assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3] + assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1] + assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == [] + + # __next__ must continue to raise StopIteration + it = m.IntPairs([(0, 0)]).nonzero() + for _ in range(3): + with pytest.raises(StopIteration): + next(it) + + it = m.IntPairs([(0, 0)]).nonzero_keys() + for _ in range(3): + with pytest.raises(StopIteration): + next(it) + + +def test_sliceable(): + sliceable = m.Sliceable(100) + assert sliceable[::] == (0, 100, 1) + assert sliceable[10::] == (10, 100, 1) + assert sliceable[:10:] == (0, 10, 1) + assert sliceable[::10] == (0, 100, 10) + assert sliceable[-10::] == (90, 100, 1) + assert sliceable[:-10:] == (0, 90, 1) + assert sliceable[::-10] == (99, -1, -10) + assert sliceable[50:60:1] == (50, 60, 1) + assert sliceable[50:60:-1] == (50, 60, -1) + + +def test_sequence(): + cstats = ConstructorStats.get(m.Sequence) + + s = m.Sequence(5) + assert cstats.values() == ['of size', '5'] + + assert "Sequence" in repr(s) + assert len(s) == 5 + assert s[0] == 0 and s[3] == 0 + assert 12.34 not in s + s[0], s[3] = 12.34, 56.78 + assert 12.34 in s + assert isclose(s[0], 12.34) and isclose(s[3], 56.78) + + rev = reversed(s) + assert cstats.values() == ['of size', '5'] + + rev2 = s[::-1] + assert cstats.values() == ['of size', '5'] + + it = iter(m.Sequence(0)) + for _ in range(3): # __next__ must continue to raise StopIteration + with pytest.raises(StopIteration): + next(it) + assert cstats.values() == ['of size', '0'] + + expected = [0, 56.78, 0, 0, 12.34] + assert allclose(rev, expected) + assert allclose(rev2, expected) + assert rev == rev2 + + rev[0::2] = m.Sequence([2.0, 2.0, 2.0]) + assert cstats.values() == ['of size', '3', 'from std::vector'] + + assert allclose(rev, [2, 56.78, 2, 0, 2]) + + assert cstats.alive() == 4 + del it + assert cstats.alive() == 3 + del s + assert cstats.alive() == 2 + del rev + assert cstats.alive() == 1 + del rev2 + assert cstats.alive() == 0 + + assert cstats.values() == [] + assert cstats.default_constructions == 0 + assert cstats.copy_constructions == 0 + assert cstats.move_constructions >= 1 + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + +def test_sequence_length(): + """#2076: Exception raised by len(arg) should be propagated """ + class BadLen(RuntimeError): + pass + + class SequenceLike(): + def __getitem__(self, i): + return None + + def __len__(self): + raise BadLen() + + with pytest.raises(BadLen): + m.sequence_length(SequenceLike()) + + assert m.sequence_length([1, 2, 3]) == 3 + assert m.sequence_length("hello") == 5 + + +def test_map_iterator(): + sm = m.StringMap({'hi': 'bye', 'black': 'white'}) + assert sm['hi'] == 'bye' + assert len(sm) == 2 + assert sm['black'] == 'white' + + with pytest.raises(KeyError): + assert sm['orange'] + sm['orange'] = 'banana' + assert sm['orange'] == 'banana' + + expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'} + for k in sm: + assert sm[k] == expected[k] + for k, v in sm.items(): + assert v == expected[k] + + it = iter(m.StringMap({})) + for _ in range(3): # __next__ must continue to raise StopIteration + with pytest.raises(StopIteration): + next(it) + + +def test_python_iterator_in_cpp(): + t = (1, 2, 3) + assert m.object_to_list(t) == [1, 2, 3] + assert m.object_to_list(iter(t)) == [1, 2, 3] + assert m.iterator_to_list(iter(t)) == [1, 2, 3] + + with pytest.raises(TypeError) as excinfo: + m.object_to_list(1) + assert "object is not iterable" in str(excinfo.value) + + with pytest.raises(TypeError) as excinfo: + m.iterator_to_list(1) + assert "incompatible function arguments" in str(excinfo.value) + + def bad_next_call(): + raise RuntimeError("py::iterator::advance() should propagate errors") + + with pytest.raises(RuntimeError) as excinfo: + m.iterator_to_list(iter(bad_next_call, None)) + assert str(excinfo.value) == "py::iterator::advance() should propagate errors" + + lst = [1, None, 0, None] + assert m.count_none(lst) == 2 + assert m.find_none(lst) is True + assert m.count_nonzeros({"a": 0, "b": 1, "c": 2}) == 2 + + r = range(5) + assert all(m.tuple_iterator(tuple(r))) + assert all(m.list_iterator(list(r))) + assert all(m.sequence_iterator(r)) + + +def test_iterator_passthrough(): + """#181: iterator passthrough did not compile""" + from pybind11_tests.sequences_and_iterators import iterator_passthrough + + assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15] + + +def test_iterator_rvp(): + """#388: Can't make iterators via make_iterator() with different r/v policies """ + import pybind11_tests.sequences_and_iterators as m + + assert list(m.make_iterator_1()) == [1, 2, 3] + assert list(m.make_iterator_2()) == [1, 2, 3] + assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2())) diff --git a/diffvg/pybind11/tests/test_smart_ptr.cpp b/diffvg/pybind11/tests/test_smart_ptr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bea90691d4a2703ee7a92ed3d5b975835a7f6013 --- /dev/null +++ b/diffvg/pybind11/tests/test_smart_ptr.cpp @@ -0,0 +1,369 @@ +/* + tests/test_smart_ptr.cpp -- binding classes with custom reference counting, + implicit conversions between types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#if defined(_MSC_VER) && _MSC_VER < 1910 +# pragma warning(disable: 4702) // unreachable code in system header +#endif + +#include "pybind11_tests.h" +#include "object.h" + +// Make pybind aware of the ref-counted wrapper type (s): + +// ref is a wrapper for 'Object' which uses intrusive reference counting +// It is always possible to construct a ref from an Object* pointer without +// possible inconsistencies, hence the 'true' argument at the end. +PYBIND11_DECLARE_HOLDER_TYPE(T, ref, true); +// Make pybind11 aware of the non-standard getter member function +namespace pybind11 { namespace detail { + template + struct holder_helper> { + static const T *get(const ref &p) { return p.get_ptr(); } + }; +}} + +// The following is not required anymore for std::shared_ptr, but it should compile without error: +PYBIND11_DECLARE_HOLDER_TYPE(T, std::shared_ptr); + +// This is just a wrapper around unique_ptr, but with extra fields to deliberately bloat up the +// holder size to trigger the non-simple-layout internal instance layout for single inheritance with +// large holder type: +template class huge_unique_ptr { + std::unique_ptr ptr; + uint64_t padding[10]; +public: + huge_unique_ptr(T *p) : ptr(p) {}; + T *get() { return ptr.get(); } +}; +PYBIND11_DECLARE_HOLDER_TYPE(T, huge_unique_ptr); + +// Simple custom holder that works like unique_ptr +template +class custom_unique_ptr { + std::unique_ptr impl; +public: + custom_unique_ptr(T* p) : impl(p) { } + T* get() const { return impl.get(); } + T* release_ptr() { return impl.release(); } +}; +PYBIND11_DECLARE_HOLDER_TYPE(T, custom_unique_ptr); + +// Simple custom holder that works like shared_ptr and has operator& overload +// To obtain address of an instance of this holder pybind should use std::addressof +// Attempt to get address via operator& may leads to segmentation fault +template +class shared_ptr_with_addressof_operator { + std::shared_ptr impl; +public: + shared_ptr_with_addressof_operator( ) = default; + shared_ptr_with_addressof_operator(T* p) : impl(p) { } + T* get() const { return impl.get(); } + T** operator&() { throw std::logic_error("Call of overloaded operator& is not expected"); } +}; +PYBIND11_DECLARE_HOLDER_TYPE(T, shared_ptr_with_addressof_operator); + +// Simple custom holder that works like unique_ptr and has operator& overload +// To obtain address of an instance of this holder pybind should use std::addressof +// Attempt to get address via operator& may leads to segmentation fault +template +class unique_ptr_with_addressof_operator { + std::unique_ptr impl; +public: + unique_ptr_with_addressof_operator() = default; + unique_ptr_with_addressof_operator(T* p) : impl(p) { } + T* get() const { return impl.get(); } + T* release_ptr() { return impl.release(); } + T** operator&() { throw std::logic_error("Call of overloaded operator& is not expected"); } +}; +PYBIND11_DECLARE_HOLDER_TYPE(T, unique_ptr_with_addressof_operator); + + +TEST_SUBMODULE(smart_ptr, m) { + + // test_smart_ptr + + // Object implementation in `object.h` + py::class_> obj(m, "Object"); + obj.def("getRefCount", &Object::getRefCount); + + // Custom object with builtin reference counting (see 'object.h' for the implementation) + class MyObject1 : public Object { + public: + MyObject1(int value) : value(value) { print_created(this, toString()); } + std::string toString() const { return "MyObject1[" + std::to_string(value) + "]"; } + protected: + virtual ~MyObject1() { print_destroyed(this); } + private: + int value; + }; + py::class_>(m, "MyObject1", obj) + .def(py::init()); + py::implicitly_convertible(); + + m.def("make_object_1", []() -> Object * { return new MyObject1(1); }); + m.def("make_object_2", []() -> ref { return new MyObject1(2); }); + m.def("make_myobject1_1", []() -> MyObject1 * { return new MyObject1(4); }); + m.def("make_myobject1_2", []() -> ref { return new MyObject1(5); }); + m.def("print_object_1", [](const Object *obj) { py::print(obj->toString()); }); + m.def("print_object_2", [](ref obj) { py::print(obj->toString()); }); + m.def("print_object_3", [](const ref &obj) { py::print(obj->toString()); }); + m.def("print_object_4", [](const ref *obj) { py::print((*obj)->toString()); }); + m.def("print_myobject1_1", [](const MyObject1 *obj) { py::print(obj->toString()); }); + m.def("print_myobject1_2", [](ref obj) { py::print(obj->toString()); }); + m.def("print_myobject1_3", [](const ref &obj) { py::print(obj->toString()); }); + m.def("print_myobject1_4", [](const ref *obj) { py::print((*obj)->toString()); }); + + // Expose constructor stats for the ref type + m.def("cstats_ref", &ConstructorStats::get); + + + // Object managed by a std::shared_ptr<> + class MyObject2 { + public: + MyObject2(const MyObject2 &) = default; + MyObject2(int value) : value(value) { print_created(this, toString()); } + std::string toString() const { return "MyObject2[" + std::to_string(value) + "]"; } + virtual ~MyObject2() { print_destroyed(this); } + private: + int value; + }; + py::class_>(m, "MyObject2") + .def(py::init()); + m.def("make_myobject2_1", []() { return new MyObject2(6); }); + m.def("make_myobject2_2", []() { return std::make_shared(7); }); + m.def("print_myobject2_1", [](const MyObject2 *obj) { py::print(obj->toString()); }); + m.def("print_myobject2_2", [](std::shared_ptr obj) { py::print(obj->toString()); }); + m.def("print_myobject2_3", [](const std::shared_ptr &obj) { py::print(obj->toString()); }); + m.def("print_myobject2_4", [](const std::shared_ptr *obj) { py::print((*obj)->toString()); }); + + // Object managed by a std::shared_ptr<>, additionally derives from std::enable_shared_from_this<> + class MyObject3 : public std::enable_shared_from_this { + public: + MyObject3(const MyObject3 &) = default; + MyObject3(int value) : value(value) { print_created(this, toString()); } + std::string toString() const { return "MyObject3[" + std::to_string(value) + "]"; } + virtual ~MyObject3() { print_destroyed(this); } + private: + int value; + }; + py::class_>(m, "MyObject3") + .def(py::init()); + m.def("make_myobject3_1", []() { return new MyObject3(8); }); + m.def("make_myobject3_2", []() { return std::make_shared(9); }); + m.def("print_myobject3_1", [](const MyObject3 *obj) { py::print(obj->toString()); }); + m.def("print_myobject3_2", [](std::shared_ptr obj) { py::print(obj->toString()); }); + m.def("print_myobject3_3", [](const std::shared_ptr &obj) { py::print(obj->toString()); }); + m.def("print_myobject3_4", [](const std::shared_ptr *obj) { py::print((*obj)->toString()); }); + + // test_smart_ptr_refcounting + m.def("test_object1_refcounting", []() { + ref o = new MyObject1(0); + bool good = o->getRefCount() == 1; + py::object o2 = py::cast(o, py::return_value_policy::reference); + // always request (partial) ownership for objects with intrusive + // reference counting even when using the 'reference' RVP + good &= o->getRefCount() == 2; + return good; + }); + + // test_unique_nodelete + // Object with a private destructor + class MyObject4 { + public: + MyObject4(int value) : value{value} { print_created(this); } + int value; + private: + ~MyObject4() { print_destroyed(this); } + }; + py::class_>(m, "MyObject4") + .def(py::init()) + .def_readwrite("value", &MyObject4::value); + + // test_unique_deleter + // Object with std::unique_ptr where D is not matching the base class + // Object with a protected destructor + class MyObject4a { + public: + MyObject4a(int i) { + value = i; + print_created(this); + }; + int value; + protected: + virtual ~MyObject4a() { print_destroyed(this); } + }; + py::class_>(m, "MyObject4a") + .def(py::init()) + .def_readwrite("value", &MyObject4a::value); + + // Object derived but with public destructor and no Deleter in default holder + class MyObject4b : public MyObject4a { + public: + MyObject4b(int i) : MyObject4a(i) { print_created(this); } + ~MyObject4b() { print_destroyed(this); } + }; + py::class_(m, "MyObject4b") + .def(py::init()); + + // test_large_holder + class MyObject5 { // managed by huge_unique_ptr + public: + MyObject5(int value) : value{value} { print_created(this); } + ~MyObject5() { print_destroyed(this); } + int value; + }; + py::class_>(m, "MyObject5") + .def(py::init()) + .def_readwrite("value", &MyObject5::value); + + // test_shared_ptr_and_references + struct SharedPtrRef { + struct A { + A() { print_created(this); } + A(const A &) { print_copy_created(this); } + A(A &&) { print_move_created(this); } + ~A() { print_destroyed(this); } + }; + + A value = {}; + std::shared_ptr shared = std::make_shared(); + }; + using A = SharedPtrRef::A; + py::class_>(m, "A"); + py::class_(m, "SharedPtrRef") + .def(py::init<>()) + .def_readonly("ref", &SharedPtrRef::value) + .def_property_readonly("copy", [](const SharedPtrRef &s) { return s.value; }, + py::return_value_policy::copy) + .def_readonly("holder_ref", &SharedPtrRef::shared) + .def_property_readonly("holder_copy", [](const SharedPtrRef &s) { return s.shared; }, + py::return_value_policy::copy) + .def("set_ref", [](SharedPtrRef &, const A &) { return true; }) + .def("set_holder", [](SharedPtrRef &, std::shared_ptr) { return true; }); + + // test_shared_ptr_from_this_and_references + struct SharedFromThisRef { + struct B : std::enable_shared_from_this { + B() { print_created(this); } + B(const B &) : std::enable_shared_from_this() { print_copy_created(this); } + B(B &&) : std::enable_shared_from_this() { print_move_created(this); } + ~B() { print_destroyed(this); } + }; + + B value = {}; + std::shared_ptr shared = std::make_shared(); + }; + using B = SharedFromThisRef::B; + py::class_>(m, "B"); + py::class_(m, "SharedFromThisRef") + .def(py::init<>()) + .def_readonly("bad_wp", &SharedFromThisRef::value) + .def_property_readonly("ref", [](const SharedFromThisRef &s) -> const B & { return *s.shared; }) + .def_property_readonly("copy", [](const SharedFromThisRef &s) { return s.value; }, + py::return_value_policy::copy) + .def_readonly("holder_ref", &SharedFromThisRef::shared) + .def_property_readonly("holder_copy", [](const SharedFromThisRef &s) { return s.shared; }, + py::return_value_policy::copy) + .def("set_ref", [](SharedFromThisRef &, const B &) { return true; }) + .def("set_holder", [](SharedFromThisRef &, std::shared_ptr) { return true; }); + + // Issue #865: shared_from_this doesn't work with virtual inheritance + struct SharedFromThisVBase : std::enable_shared_from_this { + SharedFromThisVBase() = default; + SharedFromThisVBase(const SharedFromThisVBase &) = default; + virtual ~SharedFromThisVBase() = default; + }; + struct SharedFromThisVirt : virtual SharedFromThisVBase {}; + static std::shared_ptr sft(new SharedFromThisVirt()); + py::class_>(m, "SharedFromThisVirt") + .def_static("get", []() { return sft.get(); }); + + // test_move_only_holder + struct C { + C() { print_created(this); } + ~C() { print_destroyed(this); } + }; + py::class_>(m, "TypeWithMoveOnlyHolder") + .def_static("make", []() { return custom_unique_ptr(new C); }) + .def_static("make_as_object", []() { return py::cast(custom_unique_ptr(new C)); }); + + // test_holder_with_addressof_operator + struct TypeForHolderWithAddressOf { + TypeForHolderWithAddressOf() { print_created(this); } + TypeForHolderWithAddressOf(const TypeForHolderWithAddressOf &) { print_copy_created(this); } + TypeForHolderWithAddressOf(TypeForHolderWithAddressOf &&) { print_move_created(this); } + ~TypeForHolderWithAddressOf() { print_destroyed(this); } + std::string toString() const { + return "TypeForHolderWithAddressOf[" + std::to_string(value) + "]"; + } + int value = 42; + }; + using HolderWithAddressOf = shared_ptr_with_addressof_operator; + py::class_(m, "TypeForHolderWithAddressOf") + .def_static("make", []() { return HolderWithAddressOf(new TypeForHolderWithAddressOf); }) + .def("get", [](const HolderWithAddressOf &self) { return self.get(); }) + .def("print_object_1", [](const TypeForHolderWithAddressOf *obj) { py::print(obj->toString()); }) + .def("print_object_2", [](HolderWithAddressOf obj) { py::print(obj.get()->toString()); }) + .def("print_object_3", [](const HolderWithAddressOf &obj) { py::print(obj.get()->toString()); }) + .def("print_object_4", [](const HolderWithAddressOf *obj) { py::print((*obj).get()->toString()); }); + + // test_move_only_holder_with_addressof_operator + struct TypeForMoveOnlyHolderWithAddressOf { + TypeForMoveOnlyHolderWithAddressOf(int value) : value{value} { print_created(this); } + ~TypeForMoveOnlyHolderWithAddressOf() { print_destroyed(this); } + std::string toString() const { + return "MoveOnlyHolderWithAddressOf[" + std::to_string(value) + "]"; + } + int value; + }; + using MoveOnlyHolderWithAddressOf = unique_ptr_with_addressof_operator; + py::class_(m, "TypeForMoveOnlyHolderWithAddressOf") + .def_static("make", []() { return MoveOnlyHolderWithAddressOf(new TypeForMoveOnlyHolderWithAddressOf(0)); }) + .def_readwrite("value", &TypeForMoveOnlyHolderWithAddressOf::value) + .def("print_object", [](const TypeForMoveOnlyHolderWithAddressOf *obj) { py::print(obj->toString()); }); + + // test_smart_ptr_from_default + struct HeldByDefaultHolder { }; + py::class_(m, "HeldByDefaultHolder") + .def(py::init<>()) + .def_static("load_shared_ptr", [](std::shared_ptr) {}); + + // test_shared_ptr_gc + // #187: issue involving std::shared_ptr<> return value policy & garbage collection + struct ElementBase { + virtual ~ElementBase() { } /* Force creation of virtual table */ + ElementBase() = default; + ElementBase(const ElementBase&) = delete; + }; + py::class_>(m, "ElementBase"); + + struct ElementA : ElementBase { + ElementA(int v) : v(v) { } + int value() { return v; } + int v; + }; + py::class_>(m, "ElementA") + .def(py::init()) + .def("value", &ElementA::value); + + struct ElementList { + void add(std::shared_ptr e) { l.push_back(e); } + std::vector> l; + }; + py::class_>(m, "ElementList") + .def(py::init<>()) + .def("add", &ElementList::add) + .def("get", [](ElementList &el) { + py::list list; + for (auto &e : el.l) + list.append(py::cast(e)); + return list; + }); +} diff --git a/diffvg/pybind11/tests/test_smart_ptr.py b/diffvg/pybind11/tests/test_smart_ptr.py new file mode 100644 index 0000000000000000000000000000000000000000..c9267f6878f1c0d912017bd2a6b0d21dd673c32b --- /dev/null +++ b/diffvg/pybind11/tests/test_smart_ptr.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +import pytest +from pybind11_tests import smart_ptr as m +from pybind11_tests import ConstructorStats + + +def test_smart_ptr(capture): + # Object1 + for i, o in enumerate([m.make_object_1(), m.make_object_2(), m.MyObject1(3)], start=1): + assert o.getRefCount() == 1 + with capture: + m.print_object_1(o) + m.print_object_2(o) + m.print_object_3(o) + m.print_object_4(o) + assert capture == "MyObject1[{i}]\n".format(i=i) * 4 + + for i, o in enumerate([m.make_myobject1_1(), m.make_myobject1_2(), m.MyObject1(6), 7], + start=4): + print(o) + with capture: + if not isinstance(o, int): + m.print_object_1(o) + m.print_object_2(o) + m.print_object_3(o) + m.print_object_4(o) + m.print_myobject1_1(o) + m.print_myobject1_2(o) + m.print_myobject1_3(o) + m.print_myobject1_4(o) + assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8) + + cstats = ConstructorStats.get(m.MyObject1) + assert cstats.alive() == 0 + expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4 + assert cstats.values() == expected_values + assert cstats.default_constructions == 0 + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Doesn't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + # Object2 + for i, o in zip([8, 6, 7], [m.MyObject2(8), m.make_myobject2_1(), m.make_myobject2_2()]): + print(o) + with capture: + m.print_myobject2_1(o) + m.print_myobject2_2(o) + m.print_myobject2_3(o) + m.print_myobject2_4(o) + assert capture == "MyObject2[{i}]\n".format(i=i) * 4 + + cstats = ConstructorStats.get(m.MyObject2) + assert cstats.alive() == 1 + o = None + assert cstats.alive() == 0 + assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]'] + assert cstats.default_constructions == 0 + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Doesn't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + # Object3 + for i, o in zip([9, 8, 9], [m.MyObject3(9), m.make_myobject3_1(), m.make_myobject3_2()]): + print(o) + with capture: + m.print_myobject3_1(o) + m.print_myobject3_2(o) + m.print_myobject3_3(o) + m.print_myobject3_4(o) + assert capture == "MyObject3[{i}]\n".format(i=i) * 4 + + cstats = ConstructorStats.get(m.MyObject3) + assert cstats.alive() == 1 + o = None + assert cstats.alive() == 0 + assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]'] + assert cstats.default_constructions == 0 + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Doesn't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + # Object + cstats = ConstructorStats.get(m.Object) + assert cstats.alive() == 0 + assert cstats.values() == [] + assert cstats.default_constructions == 10 + assert cstats.copy_constructions == 0 + # assert cstats.move_constructions >= 0 # Doesn't invoke any + assert cstats.copy_assignments == 0 + assert cstats.move_assignments == 0 + + # ref<> + cstats = m.cstats_ref() + assert cstats.alive() == 0 + assert cstats.values() == ['from pointer'] * 10 + assert cstats.default_constructions == 30 + assert cstats.copy_constructions == 12 + # assert cstats.move_constructions >= 0 # Doesn't invoke any + assert cstats.copy_assignments == 30 + assert cstats.move_assignments == 0 + + +def test_smart_ptr_refcounting(): + assert m.test_object1_refcounting() + + +def test_unique_nodelete(): + o = m.MyObject4(23) + assert o.value == 23 + cstats = ConstructorStats.get(m.MyObject4) + assert cstats.alive() == 1 + del o + assert cstats.alive() == 1 # Leak, but that's intentional + + +def test_unique_nodelete4a(): + o = m.MyObject4a(23) + assert o.value == 23 + cstats = ConstructorStats.get(m.MyObject4a) + assert cstats.alive() == 1 + del o + assert cstats.alive() == 1 # Leak, but that's intentional + + +def test_unique_deleter(): + o = m.MyObject4b(23) + assert o.value == 23 + cstats4a = ConstructorStats.get(m.MyObject4a) + assert cstats4a.alive() == 2 # Two because of previous test + cstats4b = ConstructorStats.get(m.MyObject4b) + assert cstats4b.alive() == 1 + del o + assert cstats4a.alive() == 1 # Should now only be one leftover from previous test + assert cstats4b.alive() == 0 # Should be deleted + + +def test_large_holder(): + o = m.MyObject5(5) + assert o.value == 5 + cstats = ConstructorStats.get(m.MyObject5) + assert cstats.alive() == 1 + del o + assert cstats.alive() == 0 + + +def test_shared_ptr_and_references(): + s = m.SharedPtrRef() + stats = ConstructorStats.get(m.A) + assert stats.alive() == 2 + + ref = s.ref # init_holder_helper(holder_ptr=false, owned=false) + assert stats.alive() == 2 + assert s.set_ref(ref) + with pytest.raises(RuntimeError) as excinfo: + assert s.set_holder(ref) + assert "Unable to cast from non-held to held instance" in str(excinfo.value) + + copy = s.copy # init_holder_helper(holder_ptr=false, owned=true) + assert stats.alive() == 3 + assert s.set_ref(copy) + assert s.set_holder(copy) + + holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false) + assert stats.alive() == 3 + assert s.set_ref(holder_ref) + assert s.set_holder(holder_ref) + + holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true) + assert stats.alive() == 3 + assert s.set_ref(holder_copy) + assert s.set_holder(holder_copy) + + del ref, copy, holder_ref, holder_copy, s + assert stats.alive() == 0 + + +def test_shared_ptr_from_this_and_references(): + s = m.SharedFromThisRef() + stats = ConstructorStats.get(m.B) + assert stats.alive() == 2 + + ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false) + assert stats.alive() == 2 + assert s.set_ref(ref) + assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference + + bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true) + assert stats.alive() == 2 + assert s.set_ref(bad_wp) + with pytest.raises(RuntimeError) as excinfo: + assert s.set_holder(bad_wp) + assert "Unable to cast from non-held to held instance" in str(excinfo.value) + + copy = s.copy # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false) + assert stats.alive() == 3 + assert s.set_ref(copy) + assert s.set_holder(copy) + + holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false) + assert stats.alive() == 3 + assert s.set_ref(holder_ref) + assert s.set_holder(holder_ref) + + holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false) + assert stats.alive() == 3 + assert s.set_ref(holder_copy) + assert s.set_holder(holder_copy) + + del ref, bad_wp, copy, holder_ref, holder_copy, s + assert stats.alive() == 0 + + z = m.SharedFromThisVirt.get() + y = m.SharedFromThisVirt.get() + assert y is z + + +def test_move_only_holder(): + a = m.TypeWithMoveOnlyHolder.make() + b = m.TypeWithMoveOnlyHolder.make_as_object() + stats = ConstructorStats.get(m.TypeWithMoveOnlyHolder) + assert stats.alive() == 2 + del b + assert stats.alive() == 1 + del a + assert stats.alive() == 0 + + +def test_holder_with_addressof_operator(): + # this test must not throw exception from c++ + a = m.TypeForHolderWithAddressOf.make() + a.print_object_1() + a.print_object_2() + a.print_object_3() + a.print_object_4() + + stats = ConstructorStats.get(m.TypeForHolderWithAddressOf) + assert stats.alive() == 1 + + np = m.TypeForHolderWithAddressOf.make() + assert stats.alive() == 2 + del a + assert stats.alive() == 1 + del np + assert stats.alive() == 0 + + b = m.TypeForHolderWithAddressOf.make() + c = b + assert b.get() is c.get() + assert stats.alive() == 1 + + del b + assert stats.alive() == 1 + + del c + assert stats.alive() == 0 + + +def test_move_only_holder_with_addressof_operator(): + a = m.TypeForMoveOnlyHolderWithAddressOf.make() + a.print_object() + + stats = ConstructorStats.get(m.TypeForMoveOnlyHolderWithAddressOf) + assert stats.alive() == 1 + + a.value = 42 + assert a.value == 42 + + del a + assert stats.alive() == 0 + + +def test_smart_ptr_from_default(): + instance = m.HeldByDefaultHolder() + with pytest.raises(RuntimeError) as excinfo: + m.HeldByDefaultHolder.load_shared_ptr(instance) + assert "Unable to load a custom holder type from a " \ + "default-holder instance" in str(excinfo.value) + + +def test_shared_ptr_gc(): + """#187: issue involving std::shared_ptr<> return value policy & garbage collection""" + el = m.ElementList() + for i in range(10): + el.add(m.ElementA(i)) + pytest.gc_collect() + for i, v in enumerate(el.get()): + assert i == v.value() diff --git a/diffvg/pybind11/tests/test_stl.cpp b/diffvg/pybind11/tests/test_stl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..928635788e484d98f3cc8cf701d9221bef0a8bac --- /dev/null +++ b/diffvg/pybind11/tests/test_stl.cpp @@ -0,0 +1,324 @@ +/* + tests/test_stl.cpp -- STL type casters + + Copyright (c) 2017 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#include "pybind11_tests.h" +#include "constructor_stats.h" +#include + +#include +#include + +// Test with `std::variant` in C++17 mode, or with `boost::variant` in C++11/14 +#if PYBIND11_HAS_VARIANT +using std::variant; +#elif defined(PYBIND11_TEST_BOOST) && (!defined(_MSC_VER) || _MSC_VER >= 1910) +# include +# define PYBIND11_HAS_VARIANT 1 +using boost::variant; + +namespace pybind11 { namespace detail { +template +struct type_caster> : variant_caster> {}; + +template <> +struct visit_helper { + template + static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) { + return boost::apply_visitor(args...); + } +}; +}} // namespace pybind11::detail +#endif + +PYBIND11_MAKE_OPAQUE(std::vector>); + +/// Issue #528: templated constructor +struct TplCtorClass { + template TplCtorClass(const T &) { } + bool operator==(const TplCtorClass &) const { return true; } +}; + +namespace std { + template <> + struct hash { size_t operator()(const TplCtorClass &) const { return 0; } }; +} + + +template