diff --git a/.gitattributes b/.gitattributes index 2cef747209554e21db58b2502f4d197f2a81c5f2..a1a49010f13bbe57099eccba71adb6bf148cfea5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -6,3 +6,5 @@ model_output/incremental_1_logs/tokenizer.model filter=lfs diff=lfs merge=lfs -text merged_tinyllama_logger/tokenizer.model filter=lfs diff=lfs merge=lfs -text model_output/incremental_1_logs/checkpoint-575/scheduler.pt filter=lfs diff=lfs merge=lfs -text +model_output/incremental_1_logs/checkpoint-575/tokenizer.model filter=lfs diff=lfs merge=lfs -text +model_output/incremental_1_logs/checkpoint-575/optimizer.pt filter=lfs diff=lfs merge=lfs -text diff --git a/model_output/incremental_1_logs/checkpoint-575/optimizer.pt b/model_output/incremental_1_logs/checkpoint-575/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..2130a7cccf9b0609ee76ded71edbca8904457791 --- /dev/null +++ b/model_output/incremental_1_logs/checkpoint-575/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68639301c10fe5b5553087ab7a7ab06e846c3635bea99c6d43fbdd0a72028253 +size 9133306 diff --git a/model_output/incremental_1_logs/checkpoint-575/tokenizer.model b/model_output/incremental_1_logs/checkpoint-575/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899 --- /dev/null +++ b/model_output/incremental_1_logs/checkpoint-575/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/model_output/incremental_1_logs/checkpoint-575/training_args.bin b/model_output/incremental_1_logs/checkpoint-575/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..18f5b155265f235de6e730f68a99508b4627e0f5 --- /dev/null +++ b/model_output/incremental_1_logs/checkpoint-575/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:290f55cf786a3f16cb2d84b8109bbdadf1005b363093d17cbd0bdbdb9cb95fdd +size 5176 diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: mpmath +Version: 1.3.0 +Summary: Python library for arbitrary-precision floating-point arithmetic +Home-page: http://mpmath.org/ +Author: Fredrik Johansson +Author-email: fredrik.johansson@gmail.com +License: BSD +Project-URL: Source, https://github.com/fredrik-johansson/mpmath +Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues +Project-URL: Documentation, http://mpmath.org/doc/current/ +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +License-File: LICENSE +Provides-Extra: develop +Requires-Dist: pytest (>=4.6) ; extra == 'develop' +Requires-Dist: pycodestyle ; extra == 'develop' +Requires-Dist: pytest-cov ; extra == 'develop' +Requires-Dist: codecov ; extra == 'develop' +Requires-Dist: wheel ; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: gmpy +Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy' +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' + +mpmath +====== + +|pypi version| |Build status| |Code coverage status| |Zenodo Badge| + +.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg + :target: https://pypi.python.org/pypi/mpmath +.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg + :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test +.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fredrik-johansson/mpmath +.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg + :target: https://zenodo.org/badge/latestdoi/2934512 + +A Python library for arbitrary-precision floating-point arithmetic. + +Website: http://mpmath.org/ +Main author: Fredrik Johansson + +Mpmath is free software released under the New BSD License (see the +LICENSE file for details) + +0. History and credits +---------------------- + +The following people (among others) have contributed major patches +or new features to mpmath: + +* Pearu Peterson +* Mario Pernici +* Ondrej Certik +* Vinzent Steinberg +* Nimish Telang +* Mike Taschuk +* Case Van Horsen +* Jorn Baayen +* Chris Smith +* Juan Arias de Reyna +* Ioannis Tziakos +* Aaron Meurer +* Stefan Krastanov +* Ken Allen +* Timo Hartmann +* Sergey B Kirpichev +* Kris Kuhlman +* Paul Masson +* Michael Kagalenko +* Jonathan Warner +* Max Gaukler +* Guillermo Navas-Palencia +* Nike Dattani + +Numerous other people have contributed by reporting bugs, +requesting new features, or suggesting improvements to the +documentation. + +For a detailed changelog, including individual contributions, +see the CHANGES file. + +Fredrik's work on mpmath during summer 2008 was sponsored by Google +as part of the Google Summer of Code program. + +Fredrik's work on mpmath during summer 2009 was sponsored by the +American Institute of Mathematics under the support of the National Science +Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms). + +Any opinions, findings, and conclusions or recommendations expressed in this +material are those of the author(s) and do not necessarily reflect the +views of the sponsors. + +Credit also goes to: + +* The authors of the GMP library and the Python wrapper + gmpy, enabling mpmath to become much faster at + high precision +* The authors of MPFR, pari/gp, MPFUN, and other arbitrary- + precision libraries, whose documentation has been helpful + for implementing many of the algorithms in mpmath +* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik; + Wolfram Research for MathWorld and the Wolfram Functions site. + These are the main references used for special functions + implementations. +* George Brandl for developing the Sphinx documentation tool + used to build mpmath's documentation + +Release history: + +* Version 1.3.0 released on March 7, 2023 +* Version 1.2.0 released on February 1, 2021 +* Version 1.1.0 released on December 11, 2018 +* Version 1.0.0 released on September 27, 2017 +* Version 0.19 released on June 10, 2014 +* Version 0.18 released on December 31, 2013 +* Version 0.17 released on February 1, 2011 +* Version 0.16 released on September 24, 2010 +* Version 0.15 released on June 6, 2010 +* Version 0.14 released on February 5, 2010 +* Version 0.13 released on August 13, 2009 +* Version 0.12 released on June 9, 2009 +* Version 0.11 released on January 26, 2009 +* Version 0.10 released on October 15, 2008 +* Version 0.9 released on August 23, 2008 +* Version 0.8 released on April 20, 2008 +* Version 0.7 released on March 12, 2008 +* Version 0.6 released on January 13, 2008 +* Version 0.5 released on November 24, 2007 +* Version 0.4 released on November 3, 2007 +* Version 0.3 released on October 5, 2007 +* Version 0.2 released on October 2, 2007 +* Version 0.1 released on September 27, 2007 + +1. Download & installation +-------------------------- + +Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested +with CPython 2.7, 3.5 through 3.7 and for PyPy. + +The latest release of mpmath can be downloaded from the mpmath +website and from https://github.com/fredrik-johansson/mpmath/releases + +It should also be available in the Python Package Index at +https://pypi.python.org/pypi/mpmath + +To install latest release of Mpmath with pip, simply run + +``pip install mpmath`` + +Or unpack the mpmath archive and run + +``python setup.py install`` + +Mpmath can also be installed using + +``python -m easy_install mpmath`` + +The latest development code is available from +https://github.com/fredrik-johansson/mpmath + +See the main documentation for more detailed instructions. + +2. Running tests +---------------- + +The unit tests in mpmath/tests/ can be run via the script +runtests.py, but it is recommended to run them with py.test +(https://pytest.org/), especially +to generate more useful reports in case there are failures. + +You may also want to check out the demo scripts in the demo +directory. + +The master branch is automatically tested by Travis CI. + +3. Documentation +---------------- + +Documentation in reStructuredText format is available in the +doc directory included with the source package. These files +are human-readable, but can be compiled to prettier HTML using +the build.py script (requires Sphinx, http://sphinx.pocoo.org/). + +See setup.txt in the documentation for more information. + +The most recent documentation is also available in HTML format: + +http://mpmath.org/doc/current/ + +4. Known problems +----------------- + +Mpmath is a work in progress. Major issues include: + +* Some functions may return incorrect values when given extremely + large arguments or arguments very close to singularities. + +* Directed rounding works for arithmetic operations. It is implemented + heuristically for other operations, and their results may be off by one + or two units in the last place (even if otherwise accurate). + +* Some IEEE 754 features are not available. Inifinities and NaN are + partially supported; denormal rounding is currently not available + at all. + +* The interface for switching precision and rounding is not finalized. + The current method is not threadsafe. + +5. Help and bug reports +----------------------- + +General questions and comments can be sent to the mpmath mailinglist, +mpmath@googlegroups.com + +You can also report bugs and send patches to the mpmath issue tracker, +https://github.com/fredrik-johansson/mpmath/issues diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f839ac4e3356b6688f0afbf53f73853ccd3737fe --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD @@ -0,0 +1,180 @@ +mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537 +mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630 +mpmath-1.3.0.dist-info/RECORD,, +mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7 +mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765 +mpmath/__pycache__/__init__.cpython-39.pyc,, +mpmath/__pycache__/ctx_base.cpython-39.pyc,, +mpmath/__pycache__/ctx_fp.cpython-39.pyc,, +mpmath/__pycache__/ctx_iv.cpython-39.pyc,, +mpmath/__pycache__/ctx_mp.cpython-39.pyc,, +mpmath/__pycache__/ctx_mp_python.cpython-39.pyc,, +mpmath/__pycache__/function_docs.cpython-39.pyc,, +mpmath/__pycache__/identification.cpython-39.pyc,, +mpmath/__pycache__/math2.cpython-39.pyc,, +mpmath/__pycache__/rational.cpython-39.pyc,, +mpmath/__pycache__/usertools.cpython-39.pyc,, +mpmath/__pycache__/visualization.cpython-39.pyc,, +mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162 +mpmath/calculus/__pycache__/__init__.cpython-39.pyc,, +mpmath/calculus/__pycache__/approximation.cpython-39.pyc,, +mpmath/calculus/__pycache__/calculus.cpython-39.pyc,, +mpmath/calculus/__pycache__/differentiation.cpython-39.pyc,, +mpmath/calculus/__pycache__/extrapolation.cpython-39.pyc,, +mpmath/calculus/__pycache__/inverselaplace.cpython-39.pyc,, +mpmath/calculus/__pycache__/odes.cpython-39.pyc,, +mpmath/calculus/__pycache__/optimization.cpython-39.pyc,, +mpmath/calculus/__pycache__/polynomials.cpython-39.pyc,, +mpmath/calculus/__pycache__/quadrature.cpython-39.pyc,, +mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817 +mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112 +mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226 +mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306 +mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056 +mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908 +mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856 +mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877 +mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432 +mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985 +mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572 +mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211 +mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452 +mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815 +mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512 +mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330 +mpmath/functions/__pycache__/__init__.cpython-39.pyc,, +mpmath/functions/__pycache__/bessel.cpython-39.pyc,, +mpmath/functions/__pycache__/elliptic.cpython-39.pyc,, +mpmath/functions/__pycache__/expintegrals.cpython-39.pyc,, +mpmath/functions/__pycache__/factorials.cpython-39.pyc,, +mpmath/functions/__pycache__/functions.cpython-39.pyc,, +mpmath/functions/__pycache__/hypergeometric.cpython-39.pyc,, +mpmath/functions/__pycache__/orthogonal.cpython-39.pyc,, +mpmath/functions/__pycache__/qfunctions.cpython-39.pyc,, +mpmath/functions/__pycache__/rszeta.cpython-39.pyc,, +mpmath/functions/__pycache__/signals.cpython-39.pyc,, +mpmath/functions/__pycache__/theta.cpython-39.pyc,, +mpmath/functions/__pycache__/zeta.cpython-39.pyc,, +mpmath/functions/__pycache__/zetazeros.cpython-39.pyc,, +mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938 +mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237 +mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644 +mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273 +mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100 +mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570 +mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097 +mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633 +mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184 +mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703 +mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320 +mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410 +mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858 +mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253 +mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790 +mpmath/libmp/__pycache__/__init__.cpython-39.pyc,, +mpmath/libmp/__pycache__/backend.cpython-39.pyc,, +mpmath/libmp/__pycache__/gammazeta.cpython-39.pyc,, +mpmath/libmp/__pycache__/libelefun.cpython-39.pyc,, +mpmath/libmp/__pycache__/libhyper.cpython-39.pyc,, +mpmath/libmp/__pycache__/libintmath.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpc.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpf.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpi.cpython-39.pyc,, +mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360 +mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469 +mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861 +mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624 +mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688 +mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875 +mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021 +mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622 +mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561 +mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94 +mpmath/matrices/__pycache__/__init__.cpython-39.pyc,, +mpmath/matrices/__pycache__/calculus.cpython-39.pyc,, +mpmath/matrices/__pycache__/eigen.cpython-39.pyc,, +mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc,, +mpmath/matrices/__pycache__/linalg.cpython-39.pyc,, +mpmath/matrices/__pycache__/matrices.cpython-39.pyc,, +mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609 +mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394 +mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534 +mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958 +mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331 +mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976 +mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath/tests/__pycache__/__init__.cpython-39.pyc,, +mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc,, +mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc,, +mpmath/tests/__pycache__/runtests.cpython-39.pyc,, +mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc,, +mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc,, +mpmath/tests/__pycache__/test_calculus.cpython-39.pyc,, +mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc,, +mpmath/tests/__pycache__/test_convert.cpython-39.pyc,, +mpmath/tests/__pycache__/test_diff.cpython-39.pyc,, +mpmath/tests/__pycache__/test_division.cpython-39.pyc,, +mpmath/tests/__pycache__/test_eigen.cpython-39.pyc,, +mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc,, +mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc,, +mpmath/tests/__pycache__/test_fp.cpython-39.pyc,, +mpmath/tests/__pycache__/test_functions.cpython-39.pyc,, +mpmath/tests/__pycache__/test_functions2.cpython-39.pyc,, +mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc,, +mpmath/tests/__pycache__/test_hp.cpython-39.pyc,, +mpmath/tests/__pycache__/test_identify.cpython-39.pyc,, +mpmath/tests/__pycache__/test_interval.cpython-39.pyc,, +mpmath/tests/__pycache__/test_levin.cpython-39.pyc,, +mpmath/tests/__pycache__/test_linalg.cpython-39.pyc,, +mpmath/tests/__pycache__/test_matrices.cpython-39.pyc,, +mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc,, +mpmath/tests/__pycache__/test_ode.cpython-39.pyc,, +mpmath/tests/__pycache__/test_pickle.cpython-39.pyc,, +mpmath/tests/__pycache__/test_power.cpython-39.pyc,, +mpmath/tests/__pycache__/test_quad.cpython-39.pyc,, +mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc,, +mpmath/tests/__pycache__/test_special.cpython-39.pyc,, +mpmath/tests/__pycache__/test_str.cpython-39.pyc,, +mpmath/tests/__pycache__/test_summation.cpython-39.pyc,, +mpmath/tests/__pycache__/test_trig.cpython-39.pyc,, +mpmath/tests/__pycache__/test_visualization.cpython-39.pyc,, +mpmath/tests/__pycache__/torture.cpython-39.pyc,, +mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228 +mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003 +mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189 +mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348 +mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686 +mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187 +mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306 +mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834 +mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466 +mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340 +mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905 +mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778 +mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225 +mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997 +mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955 +mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990 +mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917 +mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461 +mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692 +mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527 +mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090 +mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440 +mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944 +mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196 +mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822 +mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401 +mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227 +mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893 +mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132 +mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848 +mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544 +mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035 +mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799 +mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944 +mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868 +mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029 +mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627 diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mpmath diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b259ccf05583355b092af800a490b6b56e69a57 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43d57c0267442140e81b34de6580d3d331b8bfe3 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58dc74280b122acc53582d20e9a96f3ed28f4f7c Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e05f9d40e8c367f5e03901d64cf3acbbe2d878e Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c112f0f83ad76dc1dcb7702bffeb821d7b2b75 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a691b68017a3a28b71a54343755af81c4290f52 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d6e1cda045be3bb943f175d8398b3dc808ff97 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e22cfa07dcd3adb4ce345cfd141257297e5a8334 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf108a8a34607cf2a77c01bbd94475c579ae257 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fba49eca5ce7a32584b6418477123ff401eb8c3e Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f666fadc3cebe37e06e628df4443628e8493c929 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__init__.py b/phivenv/Lib/site-packages/mpmath/matrices/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..293697b9fcf8bd82d58ac4ff45acd73fadac82f9 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/matrices/__init__.py @@ -0,0 +1,2 @@ +from . import eigen # to set methods +from . import eigen_symmetric # to set methods diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173d0e5524240e8ac532129f0aa2fd2e86b03b45 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..137b7e831b7e95f1cedbf0bcef12df2d2346df89 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96a87eca443eefa5641f436fab27e217e74e0c9f Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6706964696615f6e89d2c4ee10a2588d1b387be2 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..341e2c29a1f5664b1fbe4720e3de4e94d6ceb6cc Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bee752f04a73521d05d06cd1f6155c4db189b9e Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__init__.py b/phivenv/Lib/site-packages/mpmath/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9b137bd106e9ff3cb806c1de8a1311a4a7c1615 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4b475dffe63ddbdd035081dd48159b9f671e108 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a247c65986bfbe1b74dad9e770c06a49adbd7ca2 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba3ef968b185450c5aeb38b9b3f5c45406338b5 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..367e1f3b6d942f192ebce575f67553a3738f0b6f Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d038c8eef0491b55e09cc5886c6889152bf8cfbf Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ca5ee091f574364e4d4a78b90930372b18be9ad Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7928f9d36b82e3bdd929c8dd8ebe0c76329b6163 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b58332de8ee86baa921a85074f3e702518eeedce Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b81cfed22349e08ed659adb59e9c6836996dcdc9 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..187215dd5f2f208c6aa688b03eb9040d2612ca13 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ec1cab154c0bdd68c0c6f5e08369fce6364cd0 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719e7218e3ee899d31b05124a95521b995b9894b Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b41c8937f6fd39af593aa8cb363f903b540659c4 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..108658ad2aabc133607b18e38c5ddcf1cd711b32 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae3fed261ec69a8ce129576790c1efd0d9187121 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7314728ced2a5c05bc1397d72a8b2c893ea0989d Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d87aab6f6aa7b3401f47e2c0ce91321889b92d Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f8286d96397adb405808d833632d64a9d83de4 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9db633f04eb1b96cfbdee8de961cabe64f62bda4 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b55fb850b7cc2daffd5374495e0f0c82c10c66a Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d1580b651c50c62f16e9166bd7cf421f723fe5 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a99be45b0fa475285c1462bc99967c9f72e8ed0 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebf10834b9ee985bbfbb5a3226ebf8f6f8b39ac7 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f05c5407039c9ca47112df78003d571e90065ded Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1326063a605883b9a66c2bfb67a9aab0290197a Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b3c3b1b940334ecc7fd67c275af136b6382b49 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb726cadf9795534768999fd750af9a5b0a3f84c Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9acd77bcaae6822bb247e3fb9fc20c982bb91ea1 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..434185b1743bbbd07ed1bbae69b6c37acb50bf41 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe2234d32424b2a3d28312c708d969044c6621c4 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae5966513539cbbfee70a8abad2a890b69ca6bfe Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb96740135099ab91aa314ea8230daaf4aa711d Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5f8a9af58e3897968d0b5c24aaa84eb8b34b4b6 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfca5ac87540f2cb09b8513d03773b1bce1dafb6 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dfe61dc52ee1b4a11446d5465f6356488b8e641 Binary files /dev/null and b/phivenv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/mpmath/tests/extratest_gamma.py b/phivenv/Lib/site-packages/mpmath/tests/extratest_gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..5a27b61b19aba0abf6bdb8adc16fc1ec7689b67a --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/extratest_gamma.py @@ -0,0 +1,215 @@ +from mpmath import * +from mpmath.libmp import ifac + +import sys +if "-dps" in sys.argv: + maxdps = int(sys.argv[sys.argv.index("-dps")+1]) +else: + maxdps = 1000 + +raise_ = "-raise" in sys.argv + +errcount = 0 + +def check(name, func, z, y): + global errcount + try: + x = func(z) + except: + errcount += 1 + if raise_: + raise + print() + print(name) + print("EXCEPTION") + import traceback + traceback.print_tb(sys.exc_info()[2]) + print() + return + xre = x.real + xim = x.imag + yre = y.real + yim = y.imag + tol = eps*8 + err = 0 + if abs(xre-yre) > abs(yre)*tol: + err = 1 + print() + print("Error! %s (re = %s, wanted %s, err=%s)" % (name, nstr(xre,10), nstr(yre,10), nstr(abs(xre-yre)))) + errcount += 1 + if raise_: + raise SystemExit + if abs(xim-yim) > abs(yim)*tol: + err = 1 + print() + print("Error! %s (im = %s, wanted %s, err=%s)" % (name, nstr(xim,10), nstr(yim,10), nstr(abs(xim-yim)))) + errcount += 1 + if raise_: + raise SystemExit + if not err: + sys.stdout.write("%s ok; " % name) + +def testcase(case): + z, result = case + print("Testing z =", z) + mp.dps = 1010 + z = eval(z) + mp.dps = maxdps + 50 + if result is None: + gamma_val = gamma(z) + loggamma_val = loggamma(z) + factorial_val = factorial(z) + rgamma_val = rgamma(z) + else: + loggamma_val = eval(result) + gamma_val = exp(loggamma_val) + factorial_val = z * gamma_val + rgamma_val = 1/gamma_val + for dps in [5, 10, 15, 25, 40, 60, 90, 120, 250, 600, 1000, 1800, 3600]: + if dps > maxdps: + break + mp.dps = dps + print("dps = %s" % dps) + check("gamma", gamma, z, gamma_val) + check("rgamma", rgamma, z, rgamma_val) + check("loggamma", loggamma, z, loggamma_val) + check("factorial", factorial, z, factorial_val) + print() + mp.dps = 15 + +testcases = [] + +# Basic values +for n in list(range(1,200)) + list(range(201,2000,17)): + testcases.append(["%s" % n, None]) +for n in range(-200,200): + testcases.append(["%s+0.5" % n, None]) + testcases.append(["%s+0.37" % n, None]) + +testcases += [\ +["(0.1+1j)", None], +["(-0.1+1j)", None], +["(0.1-1j)", None], +["(-0.1-1j)", None], +["10j", None], +["-10j", None], +["100j", None], +["10000j", None], +["-10000000j", None], +["(10**100)*j", None], +["125+(10**100)*j", None], +["-125+(10**100)*j", None], +["(10**10)*(1+j)", None], +["(10**10)*(-1+j)", None], +["(10**100)*(1+j)", None], +["(10**100)*(-1+j)", None], +["(1.5-1j)", None], +["(6+4j)", None], +["(4+1j)", None], +["(3.5+2j)", None], +["(1.5-1j)", None], +["(-6-4j)", None], +["(-2-3j)", None], +["(-2.5-2j)", None], +["(4+1j)", None], +["(3+3j)", None], +["(2-2j)", None], +["1", "0"], +["2", "0"], +["3", "log(2)"], +["4", "log(6)"], +["5", "log(24)"], +["0.5", "log(pi)/2"], +["1.5", "log(sqrt(pi)/2)"], +["2.5", "log(3*sqrt(pi)/4)"], +["mpf('0.37')", None], +["0.25", "log(sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))))"], +["-0.4", None], +["mpf('-1.9')", None], +["mpf('12.8')", None], +["mpf('33.7')", None], +["mpf('95.2')", None], +["mpf('160.3')", None], +["mpf('2057.8')", None], +["25", "log(ifac(24))"], +["80", "log(ifac(79))"], +["500", "log(ifac(500-1))"], +["8000", "log(ifac(8000-1))"], +["8000.5", None], +["mpf('8000.1')", None], +["mpf('1.37e10')", None], +["mpf('1.37e10')*(1+j)", None], +["mpf('1.37e10')*(-1+j)", None], +["mpf('1.37e10')*(-1-j)", None], +["mpf('1.37e10')*(-1+j)", None], +["mpf('1.37e100')", None], +["mpf('1.37e100')*(1+j)", None], +["mpf('1.37e100')*(-1+j)", None], +["mpf('1.37e100')*(-1-j)", None], +["mpf('1.37e100')*(-1+j)", None], +["3+4j", +"mpc('" +"-1.7566267846037841105306041816232757851567066070613445016197619371316057169" +"4723618263960834804618463052988607348289672535780644470689771115236512106002" +"5970873471563240537307638968509556191696167970488390423963867031934333890838" +"8009531786948197210025029725361069435208930363494971027388382086721660805397" +"9163230643216054580167976201709951509519218635460317367338612500626714783631" +"7498317478048447525674016344322545858832610325861086336204591943822302971823" +"5161814175530618223688296232894588415495615809337292518431903058265147109853" +"1710568942184987827643886816200452860853873815413367529829631430146227470517" +"6579967222200868632179482214312673161276976117132204633283806161971389519137" +"1243359764435612951384238091232760634271570950240717650166551484551654327989" +"9360285030081716934130446150245110557038117075172576825490035434069388648124" +"6678152254554001586736120762641422590778766100376515737713938521275749049949" +"1284143906816424244705094759339932733567910991920631339597278805393743140853" +"391550313363278558195609260225928','" +"4.74266443803465792819488940755002274088830335171164611359052405215840070271" +"5906813009373171139767051863542508136875688550817670379002790304870822775498" +"2809996675877564504192565392367259119610438951593128982646945990372179860613" +"4294436498090428077839141927485901735557543641049637962003652638924845391650" +"9546290137755550107224907606529385248390667634297183361902055842228798984200" +"9591180450211798341715874477629099687609819466457990642030707080894518168924" +"6805549314043258530272479246115112769957368212585759640878745385160943755234" +"9398036774908108204370323896757543121853650025529763655312360354244898913463" +"7115955702828838923393113618205074162812089732064414530813087483533203244056" +"0546577484241423134079056537777170351934430586103623577814746004431994179990" +"5318522939077992613855205801498201930221975721246498720895122345420698451980" +"0051215797310305885845964334761831751370672996984756815410977750799748813563" +"8784405288158432214886648743541773208808731479748217023665577802702269468013" +"673719173759245720489020315779001')"], +] + +for z in [4, 14, 34, 64]: + testcases.append(["(2+j)*%s/3" % z, None]) + testcases.append(["(-2+j)*%s/3" % z, None]) + testcases.append(["(1+2*j)*%s/3" % z, None]) + testcases.append(["(2-j)*%s/3" % z, None]) + testcases.append(["(20+j)*%s/3" % z, None]) + testcases.append(["(-20+j)*%s/3" % z, None]) + testcases.append(["(1+20*j)*%s/3" % z, None]) + testcases.append(["(20-j)*%s/3" % z, None]) + testcases.append(["(200+j)*%s/3" % z, None]) + testcases.append(["(-200+j)*%s/3" % z, None]) + testcases.append(["(1+200*j)*%s/3" % z, None]) + testcases.append(["(200-j)*%s/3" % z, None]) + +# Poles +for n in [0,1,2,3,4,25,-1,-2,-3,-4,-20,-21,-50,-51,-200,-201,-20000,-20001]: + for t in ['1e-5', '1e-20', '1e-100', '1e-10000']: + testcases.append(["fadd(%s,'%s',exact=True)" % (n, t), None]) + testcases.append(["fsub(%s,'%s',exact=True)" % (n, t), None]) + testcases.append(["fadd(%s,'%sj',exact=True)" % (n, t), None]) + testcases.append(["fsub(%s,'%sj',exact=True)" % (n, t), None]) + +if __name__ == "__main__": + from timeit import default_timer as clock + tot_time = 0.0 + for case in testcases: + t1 = clock() + testcase(case) + t2 = clock() + print("Test time:", t2-t1) + print() + tot_time += (t2-t1) + print("Total time:", tot_time) + print("Errors:", errcount) diff --git a/phivenv/Lib/site-packages/mpmath/tests/extratest_zeta.py b/phivenv/Lib/site-packages/mpmath/tests/extratest_zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..582b3d9cbd956b9cdf94309e0e718371fe716101 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/extratest_zeta.py @@ -0,0 +1,30 @@ +from mpmath import zetazero +from timeit import default_timer as clock + +def test_zetazero(): + cases = [\ + (399999999, 156762524.6750591511), + (241389216, 97490234.2276711795), + (526196239, 202950727.691229534), + (542964976, 209039046.578535272), + (1048449112, 388858885.231056486), + (1048449113, 388858885.384337406), + (1048449114, 388858886.002285122), + (1048449115, 388858886.00239369), + (1048449116, 388858886.690745053) + ] + for n, v in cases: + print(n, v) + t1 = clock() + ok = zetazero(n).ae(complex(0.5,v)) + t2 = clock() + print("ok =", ok, ("(time = %s)" % round(t2-t1,3))) + print("Now computing two huge zeros (this may take hours)") + print("Computing zetazero(8637740722917)") + ok = zetazero(8637740722917).ae(complex(0.5,2124447368584.39296466152)) + print("ok =", ok) + ok = zetazero(8637740722918).ae(complex(0.5,2124447368584.39298170604)) + print("ok =", ok) + +if __name__ == "__main__": + test_zetazero() diff --git a/phivenv/Lib/site-packages/mpmath/tests/runtests.py b/phivenv/Lib/site-packages/mpmath/tests/runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..70fde272fdc0e05e3d8951edddca380bd36139ab --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/runtests.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python + +""" +python runtests.py -py + Use py.test to run tests (more useful for debugging) + +python runtests.py -coverage + Generate test coverage report. Statistics are written to /tmp + +python runtests.py -profile + Generate profile stats (this is much slower) + +python runtests.py -nogmpy + Run tests without using GMPY even if it exists + +python runtests.py -strict + Enforce extra tests in normalize() + +python runtests.py -local + Insert '../..' at the beginning of sys.path to use local mpmath + +python runtests.py -skip ... + Skip tests from the listed modules + +Additional arguments are used to filter the tests to run. Only files that have +one of the arguments in their name are executed. + +""" + +import sys, os, traceback + +profile = False +if "-profile" in sys.argv: + sys.argv.remove('-profile') + profile = True + +coverage = False +if "-coverage" in sys.argv: + sys.argv.remove('-coverage') + coverage = True + +if "-nogmpy" in sys.argv: + sys.argv.remove('-nogmpy') + os.environ['MPMATH_NOGMPY'] = 'Y' + +if "-strict" in sys.argv: + sys.argv.remove('-strict') + os.environ['MPMATH_STRICT'] = 'Y' + +if "-local" in sys.argv: + sys.argv.remove('-local') + importdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), + '../..')) +else: + importdir = '' + +# TODO: add a flag for this +testdir = '' + +def testit(importdir='', testdir='', exit_on_fail=False): + """Run all tests in testdir while importing from importdir.""" + if importdir: + sys.path.insert(1, importdir) + if testdir: + sys.path.insert(1, testdir) + import os.path + import mpmath + print("mpmath imported from %s" % os.path.dirname(mpmath.__file__)) + print("mpmath backend: %s" % mpmath.libmp.backend.BACKEND) + print("mpmath mp class: %s" % repr(mpmath.mp)) + print("mpmath version: %s" % mpmath.__version__) + print("Python version: %s" % sys.version) + print("") + if "-py" in sys.argv: + sys.argv.remove('-py') + import py + py.test.cmdline.main() + else: + import glob + from timeit import default_timer as clock + modules = [] + args = sys.argv[1:] + excluded = [] + if '-skip' in args: + excluded = args[args.index('-skip')+1:] + args = args[:args.index('-skip')] + # search for tests in directory of this file if not otherwise specified + if not testdir: + pattern = os.path.dirname(sys.argv[0]) + else: + pattern = testdir + if pattern: + pattern += '/' + pattern += 'test*.py' + # look for tests (respecting specified filter) + for f in glob.glob(pattern): + name = os.path.splitext(os.path.basename(f))[0] + # If run as a script, only run tests given as args, if any are given + if args and __name__ == "__main__": + ok = False + for arg in args: + if arg in name: + ok = True + break + if not ok: + continue + elif name in excluded: + continue + module = __import__(name) + priority = module.__dict__.get('priority', 100) + if priority == 666: + modules = [[priority, name, module]] + break + modules.append([priority, name, module]) + # execute tests + modules.sort() + tstart = clock() + for priority, name, module in modules: + print(name) + for f in sorted(module.__dict__.keys()): + if f.startswith('test_'): + if coverage and ('numpy' in f): + continue + sys.stdout.write(" " + f[5:].ljust(25) + " ") + t1 = clock() + try: + module.__dict__[f]() + except: + etype, evalue, trb = sys.exc_info() + if etype in (KeyboardInterrupt, SystemExit): + raise + print("") + print("TEST FAILED!") + print("") + traceback.print_exc() + if exit_on_fail: + return + t2 = clock() + print("ok " + " " + ("%.7f" % (t2-t1)) + " s") + tend = clock() + print("") + print("finished tests in " + ("%.2f" % (tend-tstart)) + " seconds") + # clean sys.path + if importdir: + sys.path.remove(importdir) + if testdir: + sys.path.remove(testdir) + +if __name__ == '__main__': + if profile: + import cProfile + cProfile.run("testit('%s', '%s')" % (importdir, testdir), sort=1) + elif coverage: + import trace + tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], + trace=0, count=1) + tracer.run('testit(importdir, testdir)') + r = tracer.results() + r.write_results(show_missing=True, summary=True, coverdir="/tmp") + else: + testit(importdir, testdir) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_basic_ops.py b/phivenv/Lib/site-packages/mpmath/tests/test_basic_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f577c7fa9f9734876b6767f6cc21144df305d82f --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_basic_ops.py @@ -0,0 +1,451 @@ +import mpmath +from mpmath import * +from mpmath.libmp import * +import random +import sys + +try: + long = long +except NameError: + long = int + +def test_type_compare(): + assert mpf(2) == mpc(2,0) + assert mpf(0) == mpc(0) + assert mpf(2) != mpc(2, 0.00001) + assert mpf(2) == 2.0 + assert mpf(2) != 3.0 + assert mpf(2) == 2 + assert mpf(2) != '2.0' + assert mpc(2) != '2.0' + +def test_add(): + assert mpf(2.5) + mpf(3) == 5.5 + assert mpf(2.5) + 3 == 5.5 + assert mpf(2.5) + 3.0 == 5.5 + assert 3 + mpf(2.5) == 5.5 + assert 3.0 + mpf(2.5) == 5.5 + assert (3+0j) + mpf(2.5) == 5.5 + assert mpc(2.5) + mpf(3) == 5.5 + assert mpc(2.5) + 3 == 5.5 + assert mpc(2.5) + 3.0 == 5.5 + assert mpc(2.5) + (3+0j) == 5.5 + assert 3 + mpc(2.5) == 5.5 + assert 3.0 + mpc(2.5) == 5.5 + assert (3+0j) + mpc(2.5) == 5.5 + +def test_sub(): + assert mpf(2.5) - mpf(3) == -0.5 + assert mpf(2.5) - 3 == -0.5 + assert mpf(2.5) - 3.0 == -0.5 + assert 3 - mpf(2.5) == 0.5 + assert 3.0 - mpf(2.5) == 0.5 + assert (3+0j) - mpf(2.5) == 0.5 + assert mpc(2.5) - mpf(3) == -0.5 + assert mpc(2.5) - 3 == -0.5 + assert mpc(2.5) - 3.0 == -0.5 + assert mpc(2.5) - (3+0j) == -0.5 + assert 3 - mpc(2.5) == 0.5 + assert 3.0 - mpc(2.5) == 0.5 + assert (3+0j) - mpc(2.5) == 0.5 + +def test_mul(): + assert mpf(2.5) * mpf(3) == 7.5 + assert mpf(2.5) * 3 == 7.5 + assert mpf(2.5) * 3.0 == 7.5 + assert 3 * mpf(2.5) == 7.5 + assert 3.0 * mpf(2.5) == 7.5 + assert (3+0j) * mpf(2.5) == 7.5 + assert mpc(2.5) * mpf(3) == 7.5 + assert mpc(2.5) * 3 == 7.5 + assert mpc(2.5) * 3.0 == 7.5 + assert mpc(2.5) * (3+0j) == 7.5 + assert 3 * mpc(2.5) == 7.5 + assert 3.0 * mpc(2.5) == 7.5 + assert (3+0j) * mpc(2.5) == 7.5 + +def test_div(): + assert mpf(6) / mpf(3) == 2.0 + assert mpf(6) / 3 == 2.0 + assert mpf(6) / 3.0 == 2.0 + assert 6 / mpf(3) == 2.0 + assert 6.0 / mpf(3) == 2.0 + assert (6+0j) / mpf(3.0) == 2.0 + assert mpc(6) / mpf(3) == 2.0 + assert mpc(6) / 3 == 2.0 + assert mpc(6) / 3.0 == 2.0 + assert mpc(6) / (3+0j) == 2.0 + assert 6 / mpc(3) == 2.0 + assert 6.0 / mpc(3) == 2.0 + assert (6+0j) / mpc(3) == 2.0 + +def test_pow(): + assert mpf(6) ** mpf(3) == 216.0 + assert mpf(6) ** 3 == 216.0 + assert mpf(6) ** 3.0 == 216.0 + assert 6 ** mpf(3) == 216.0 + assert 6.0 ** mpf(3) == 216.0 + assert (6+0j) ** mpf(3.0) == 216.0 + assert mpc(6) ** mpf(3) == 216.0 + assert mpc(6) ** 3 == 216.0 + assert mpc(6) ** 3.0 == 216.0 + assert mpc(6) ** (3+0j) == 216.0 + assert 6 ** mpc(3) == 216.0 + assert 6.0 ** mpc(3) == 216.0 + assert (6+0j) ** mpc(3) == 216.0 + +def test_mixed_misc(): + assert 1 + mpf(3) == mpf(3) + 1 == 4 + assert 1 - mpf(3) == -(mpf(3) - 1) == -2 + assert 3 * mpf(2) == mpf(2) * 3 == 6 + assert 6 / mpf(2) == mpf(6) / 2 == 3 + assert 1.0 + mpf(3) == mpf(3) + 1.0 == 4 + assert 1.0 - mpf(3) == -(mpf(3) - 1.0) == -2 + assert 3.0 * mpf(2) == mpf(2) * 3.0 == 6 + assert 6.0 / mpf(2) == mpf(6) / 2.0 == 3 + +def test_add_misc(): + mp.dps = 15 + assert mpf(4) + mpf(-70) == -66 + assert mpf(1) + mpf(1.1)/80 == 1 + 1.1/80 + assert mpf((1, 10000000000)) + mpf(3) == mpf((1, 10000000000)) + assert mpf(3) + mpf((1, 10000000000)) == mpf((1, 10000000000)) + assert mpf((1, -10000000000)) + mpf(3) == mpf(3) + assert mpf(3) + mpf((1, -10000000000)) == mpf(3) + assert mpf(1) + 1e-15 != 1 + assert mpf(1) + 1e-20 == 1 + assert mpf(1.07e-22) + 0 == mpf(1.07e-22) + assert mpf(0) + mpf(1.07e-22) == mpf(1.07e-22) + +def test_complex_misc(): + # many more tests needed + assert 1 + mpc(2) == 3 + assert not mpc(2).ae(2 + 1e-13) + assert mpc(2+1e-15j).ae(2) + +def test_complex_zeros(): + for a in [0,2]: + for b in [0,3]: + for c in [0,4]: + for d in [0,5]: + assert mpc(a,b)*mpc(c,d) == complex(a,b)*complex(c,d) + +def test_hash(): + for i in range(-256, 256): + assert hash(mpf(i)) == hash(i) + assert hash(mpf(0.5)) == hash(0.5) + assert hash(mpc(2,3)) == hash(2+3j) + # Check that this doesn't fail + assert hash(inf) + # Check that overflow doesn't assign equal hashes to large numbers + assert hash(mpf('1e1000')) != hash('1e10000') + assert hash(mpc(100,'1e1000')) != hash(mpc(200,'1e1000')) + from mpmath.rational import mpq + assert hash(mp.mpq(1,3)) + assert hash(mp.mpq(0,1)) == 0 + assert hash(mp.mpq(-1,1)) == hash(-1) + assert hash(mp.mpq(1,1)) == hash(1) + assert hash(mp.mpq(5,1)) == hash(5) + assert hash(mp.mpq(1,2)) == hash(0.5) + if sys.version_info >= (3, 2): + assert hash(mpf(1)*2**2000) == hash(2**2000) + assert hash(mpf(1)/2**2000) == hash(mpq(1,2**2000)) + +# Advanced rounding test +def test_add_rounding(): + mp.dps = 15 + a = from_float(1e-50) + assert mpf_sub(mpf_add(fone, a, 53, round_up), fone, 53, round_up) == from_float(2.2204460492503131e-16) + assert mpf_sub(fone, a, 53, round_up) == fone + assert mpf_sub(fone, mpf_sub(fone, a, 53, round_down), 53, round_down) == from_float(1.1102230246251565e-16) + assert mpf_add(fone, a, 53, round_down) == fone + +def test_almost_equal(): + assert mpf(1.2).ae(mpf(1.20000001), 1e-7) + assert not mpf(1.2).ae(mpf(1.20000001), 1e-9) + assert not mpf(-0.7818314824680298).ae(mpf(-0.774695868667929)) + +def test_arithmetic_functions(): + import operator + ops = [(operator.add, fadd), (operator.sub, fsub), (operator.mul, fmul), + (operator.truediv, fdiv)] + a = mpf(0.27) + b = mpf(1.13) + c = mpc(0.51+2.16j) + d = mpc(1.08-0.99j) + for x in [a,b,c,d]: + for y in [a,b,c,d]: + for op, fop in ops: + if fop is not fdiv: + mp.prec = 200 + z0 = op(x,y) + mp.prec = 60 + z1 = op(x,y) + mp.prec = 53 + z2 = op(x,y) + assert fop(x, y, prec=60) == z1 + assert fop(x, y) == z2 + if fop is not fdiv: + assert fop(x, y, prec=inf) == z0 + assert fop(x, y, dps=inf) == z0 + assert fop(x, y, exact=True) == z0 + assert fneg(fneg(z1, exact=True), prec=inf) == z1 + assert fneg(z1) == -(+z1) + mp.dps = 15 + +def test_exact_integer_arithmetic(): + # XXX: re-fix this so that all operations are tested with all rounding modes + random.seed(0) + for prec in [6, 10, 25, 40, 100, 250, 725]: + for rounding in ['d', 'u', 'f', 'c', 'n']: + mp.dps = prec + M = 10**(prec-2) + M2 = 10**(prec//2-2) + for i in range(10): + a = random.randint(-M, M) + b = random.randint(-M, M) + assert mpf(a, rounding=rounding) == a + assert int(mpf(a, rounding=rounding)) == a + assert int(mpf(str(a), rounding=rounding)) == a + assert mpf(a) + mpf(b) == a + b + assert mpf(a) - mpf(b) == a - b + assert -mpf(a) == -a + a = random.randint(-M2, M2) + b = random.randint(-M2, M2) + assert mpf(a) * mpf(b) == a*b + assert mpf_mul(from_int(a), from_int(b), mp.prec, rounding) == from_int(a*b) + mp.dps = 15 + +def test_odd_int_bug(): + assert to_int(from_int(3), round_nearest) == 3 + +def test_str_1000_digits(): + mp.dps = 1001 + # last digit may be wrong + assert str(mpf(2)**0.5)[-10:-1] == '9518488472'[:9] + assert str(pi)[-10:-1] == '2164201989'[:9] + mp.dps = 15 + +def test_str_10000_digits(): + mp.dps = 10001 + # last digit may be wrong + assert str(mpf(2)**0.5)[-10:-1] == '5873258351'[:9] + assert str(pi)[-10:-1] == '5256375678'[:9] + mp.dps = 15 + +def test_monitor(): + f = lambda x: x**2 + a = [] + b = [] + g = monitor(f, a.append, b.append) + assert g(3) == 9 + assert g(4) == 16 + assert a[0] == ((3,), {}) + assert b[0] == 9 + +def test_nint_distance(): + assert nint_distance(mpf(-3)) == (-3, -inf) + assert nint_distance(mpc(-3)) == (-3, -inf) + assert nint_distance(mpf(-3.1)) == (-3, -3) + assert nint_distance(mpf(-3.01)) == (-3, -6) + assert nint_distance(mpf(-3.001)) == (-3, -9) + assert nint_distance(mpf(-3.0001)) == (-3, -13) + assert nint_distance(mpf(-2.9)) == (-3, -3) + assert nint_distance(mpf(-2.99)) == (-3, -6) + assert nint_distance(mpf(-2.999)) == (-3, -9) + assert nint_distance(mpf(-2.9999)) == (-3, -13) + assert nint_distance(mpc(-3+0.1j)) == (-3, -3) + assert nint_distance(mpc(-3+0.01j)) == (-3, -6) + assert nint_distance(mpc(-3.1+0.1j)) == (-3, -3) + assert nint_distance(mpc(-3.01+0.01j)) == (-3, -6) + assert nint_distance(mpc(-3.001+0.001j)) == (-3, -9) + assert nint_distance(mpf(0)) == (0, -inf) + assert nint_distance(mpf(0.01)) == (0, -6) + assert nint_distance(mpf('1e-100')) == (0, -332) + +def test_floor_ceil_nint_frac(): + mp.dps = 15 + for n in range(-10,10): + assert floor(n) == n + assert floor(n+0.5) == n + assert ceil(n) == n + assert ceil(n+0.5) == n+1 + assert nint(n) == n + # nint rounds to even + if n % 2 == 1: + assert nint(n+0.5) == n+1 + else: + assert nint(n+0.5) == n + assert floor(inf) == inf + assert floor(ninf) == ninf + assert isnan(floor(nan)) + assert ceil(inf) == inf + assert ceil(ninf) == ninf + assert isnan(ceil(nan)) + assert nint(inf) == inf + assert nint(ninf) == ninf + assert isnan(nint(nan)) + assert floor(0.1) == 0 + assert floor(0.9) == 0 + assert floor(-0.1) == -1 + assert floor(-0.9) == -1 + assert floor(10000000000.1) == 10000000000 + assert floor(10000000000.9) == 10000000000 + assert floor(-10000000000.1) == -10000000000-1 + assert floor(-10000000000.9) == -10000000000-1 + assert floor(1e-100) == 0 + assert floor(-1e-100) == -1 + assert floor(1e100) == 1e100 + assert floor(-1e100) == -1e100 + assert ceil(0.1) == 1 + assert ceil(0.9) == 1 + assert ceil(-0.1) == 0 + assert ceil(-0.9) == 0 + assert ceil(10000000000.1) == 10000000000+1 + assert ceil(10000000000.9) == 10000000000+1 + assert ceil(-10000000000.1) == -10000000000 + assert ceil(-10000000000.9) == -10000000000 + assert ceil(1e-100) == 1 + assert ceil(-1e-100) == 0 + assert ceil(1e100) == 1e100 + assert ceil(-1e100) == -1e100 + assert nint(0.1) == 0 + assert nint(0.9) == 1 + assert nint(-0.1) == 0 + assert nint(-0.9) == -1 + assert nint(10000000000.1) == 10000000000 + assert nint(10000000000.9) == 10000000000+1 + assert nint(-10000000000.1) == -10000000000 + assert nint(-10000000000.9) == -10000000000-1 + assert nint(1e-100) == 0 + assert nint(-1e-100) == 0 + assert nint(1e100) == 1e100 + assert nint(-1e100) == -1e100 + assert floor(3.2+4.6j) == 3+4j + assert ceil(3.2+4.6j) == 4+5j + assert nint(3.2+4.6j) == 3+5j + for n in range(-10,10): + assert frac(n) == 0 + assert frac(0.25) == 0.25 + assert frac(1.25) == 0.25 + assert frac(2.25) == 0.25 + assert frac(-0.25) == 0.75 + assert frac(-1.25) == 0.75 + assert frac(-2.25) == 0.75 + assert frac('1e100000000000000') == 0 + u = mpf('1e-100000000000000') + assert frac(u) == u + assert frac(-u) == 1 # rounding! + u = mpf('1e-400') + assert frac(-u, prec=0) == fsub(1, u, exact=True) + assert frac(3.25+4.75j) == 0.25+0.75j + +def test_isnan_etc(): + from mpmath.rational import mpq + assert isnan(nan) == True + assert isnan(3) == False + assert isnan(mpf(3)) == False + assert isnan(inf) == False + assert isnan(mpc(2,nan)) == True + assert isnan(mpc(2,nan)) == True + assert isnan(mpc(nan,nan)) == True + assert isnan(mpc(2,2)) == False + assert isnan(mpc(nan,inf)) == True + assert isnan(mpc(inf,inf)) == False + assert isnan(mpq((3,2))) == False + assert isnan(mpq((0,1))) == False + assert isinf(inf) == True + assert isinf(-inf) == True + assert isinf(3) == False + assert isinf(nan) == False + assert isinf(3+4j) == False + assert isinf(mpc(inf)) == True + assert isinf(mpc(3,inf)) == True + assert isinf(mpc(inf,3)) == True + assert isinf(mpc(inf,inf)) == True + assert isinf(mpc(nan,inf)) == True + assert isinf(mpc(inf,nan)) == True + assert isinf(mpc(nan,nan)) == False + assert isinf(mpq((3,2))) == False + assert isinf(mpq((0,1))) == False + assert isnormal(3) == True + assert isnormal(3.5) == True + assert isnormal(mpf(3.5)) == True + assert isnormal(0) == False + assert isnormal(mpf(0)) == False + assert isnormal(0.0) == False + assert isnormal(inf) == False + assert isnormal(-inf) == False + assert isnormal(nan) == False + assert isnormal(float(inf)) == False + assert isnormal(mpc(0,0)) == False + assert isnormal(mpc(3,0)) == True + assert isnormal(mpc(0,3)) == True + assert isnormal(mpc(3,3)) == True + assert isnormal(mpc(0,nan)) == False + assert isnormal(mpc(0,inf)) == False + assert isnormal(mpc(3,nan)) == False + assert isnormal(mpc(3,inf)) == False + assert isnormal(mpc(3,-inf)) == False + assert isnormal(mpc(nan,0)) == False + assert isnormal(mpc(inf,0)) == False + assert isnormal(mpc(nan,3)) == False + assert isnormal(mpc(inf,3)) == False + assert isnormal(mpc(inf,nan)) == False + assert isnormal(mpc(nan,inf)) == False + assert isnormal(mpc(nan,nan)) == False + assert isnormal(mpc(inf,inf)) == False + assert isnormal(mpq((3,2))) == True + assert isnormal(mpq((0,1))) == False + assert isint(3) == True + assert isint(0) == True + assert isint(long(3)) == True + assert isint(long(0)) == True + assert isint(mpf(3)) == True + assert isint(mpf(0)) == True + assert isint(mpf(-3)) == True + assert isint(mpf(3.2)) == False + assert isint(3.2) == False + assert isint(nan) == False + assert isint(inf) == False + assert isint(-inf) == False + assert isint(mpc(0)) == True + assert isint(mpc(3)) == True + assert isint(mpc(3.2)) == False + assert isint(mpc(3,inf)) == False + assert isint(mpc(inf)) == False + assert isint(mpc(3,2)) == False + assert isint(mpc(0,2)) == False + assert isint(mpc(3,2),gaussian=True) == True + assert isint(mpc(3,0),gaussian=True) == True + assert isint(mpc(0,3),gaussian=True) == True + assert isint(3+4j) == False + assert isint(3+4j, gaussian=True) == True + assert isint(3+0j) == True + assert isint(mpq((3,2))) == False + assert isint(mpq((3,9))) == False + assert isint(mpq((9,3))) == True + assert isint(mpq((0,4))) == True + assert isint(mpq((1,1))) == True + assert isint(mpq((-1,1))) == True + assert mp.isnpint(0) == True + assert mp.isnpint(1) == False + assert mp.isnpint(-1) == True + assert mp.isnpint(-1.1) == False + assert mp.isnpint(-1.0) == True + assert mp.isnpint(mp.mpq(1,2)) == False + assert mp.isnpint(mp.mpq(-1,2)) == False + assert mp.isnpint(mp.mpq(-3,1)) == True + assert mp.isnpint(mp.mpq(0,1)) == True + assert mp.isnpint(mp.mpq(1,1)) == False + assert mp.isnpint(0+0j) == True + assert mp.isnpint(-1+0j) == True + assert mp.isnpint(-1.1+0j) == False + assert mp.isnpint(-1+0.1j) == False + assert mp.isnpint(0+0.1j) == False + + +def test_issue_438(): + assert mpf(finf) == mpf('inf') + assert mpf(fninf) == mpf('-inf') + assert mpf(fnan)._mpf_ == mpf('nan')._mpf_ diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_bitwise.py b/phivenv/Lib/site-packages/mpmath/tests/test_bitwise.py new file mode 100644 index 0000000000000000000000000000000000000000..4f61b69fc8819cf275abaedd98847c58c3b5924a --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_bitwise.py @@ -0,0 +1,188 @@ +""" +Test bit-level integer and mpf operations +""" + +from mpmath import * +from mpmath.libmp import * + +def test_bitcount(): + assert bitcount(0) == 0 + assert bitcount(1) == 1 + assert bitcount(7) == 3 + assert bitcount(8) == 4 + assert bitcount(2**100) == 101 + assert bitcount(2**100-1) == 100 + +def test_trailing(): + assert trailing(0) == 0 + assert trailing(1) == 0 + assert trailing(2) == 1 + assert trailing(7) == 0 + assert trailing(8) == 3 + assert trailing(2**100) == 100 + assert trailing(2**100-1) == 0 + +def test_round_down(): + assert from_man_exp(0, -4, 4, round_down)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(0xff, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(-0xf0, -4, 4, round_down)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_down)[:3] == (1, 15, 0) + assert from_man_exp(-0xff, -4, 4, round_down)[:3] == (1, 15, 0) + +def test_round_up(): + assert from_man_exp(0, -4, 4, round_up)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_up)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_up)[:3] == (0, 1, 4) + assert from_man_exp(0xff, -4, 4, round_up)[:3] == (0, 1, 4) + assert from_man_exp(-0xf0, -4, 4, round_up)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_up)[:3] == (1, 1, 4) + assert from_man_exp(-0xff, -4, 4, round_up)[:3] == (1, 1, 4) + +def test_round_floor(): + assert from_man_exp(0, -4, 4, round_floor)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(0xff, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(-0xf0, -4, 4, round_floor)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_floor)[:3] == (1, 1, 4) + assert from_man_exp(-0xff, -4, 4, round_floor)[:3] == (1, 1, 4) + +def test_round_ceiling(): + assert from_man_exp(0, -4, 4, round_ceiling)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_ceiling)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_ceiling)[:3] == (0, 1, 4) + assert from_man_exp(0xff, -4, 4, round_ceiling)[:3] == (0, 1, 4) + assert from_man_exp(-0xf0, -4, 4, round_ceiling)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_ceiling)[:3] == (1, 15, 0) + assert from_man_exp(-0xff, -4, 4, round_ceiling)[:3] == (1, 15, 0) + +def test_round_nearest(): + assert from_man_exp(0, -4, 4, round_nearest)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_nearest)[:3] == (0, 15, 0) + assert from_man_exp(0xf7, -4, 4, round_nearest)[:3] == (0, 15, 0) + assert from_man_exp(0xf8, -4, 4, round_nearest)[:3] == (0, 1, 4) # 1111.1000 -> 10000.0 + assert from_man_exp(0xf9, -4, 4, round_nearest)[:3] == (0, 1, 4) # 1111.1001 -> 10000.0 + assert from_man_exp(0xe8, -4, 4, round_nearest)[:3] == (0, 7, 1) # 1110.1000 -> 1110.0 + assert from_man_exp(0xe9, -4, 4, round_nearest)[:3] == (0, 15, 0) # 1110.1001 -> 1111.0 + assert from_man_exp(-0xf0, -4, 4, round_nearest)[:3] == (1, 15, 0) + assert from_man_exp(-0xf7, -4, 4, round_nearest)[:3] == (1, 15, 0) + assert from_man_exp(-0xf8, -4, 4, round_nearest)[:3] == (1, 1, 4) + assert from_man_exp(-0xf9, -4, 4, round_nearest)[:3] == (1, 1, 4) + assert from_man_exp(-0xe8, -4, 4, round_nearest)[:3] == (1, 7, 1) + assert from_man_exp(-0xe9, -4, 4, round_nearest)[:3] == (1, 15, 0) + +def test_rounding_bugs(): + # 1 less than power-of-two cases + assert from_man_exp(72057594037927935, -56, 53, round_up) == (0, 1, 0, 1) + assert from_man_exp(73786976294838205979, -65, 53, round_nearest) == (0, 1, 1, 1) + assert from_man_exp(31, 0, 4, round_up) == (0, 1, 5, 1) + assert from_man_exp(-31, 0, 4, round_floor) == (1, 1, 5, 1) + assert from_man_exp(255, 0, 7, round_up) == (0, 1, 8, 1) + assert from_man_exp(-255, 0, 7, round_floor) == (1, 1, 8, 1) + +def test_rounding_issue_200(): + a = from_man_exp(9867,-100) + b = from_man_exp(9867,-200) + c = from_man_exp(-1,0) + z = (1, 1023, -10, 10) + assert mpf_add(a, c, 10, 'd') == z + assert mpf_add(b, c, 10, 'd') == z + assert mpf_add(c, a, 10, 'd') == z + assert mpf_add(c, b, 10, 'd') == z + +def test_perturb(): + a = fone + b = from_float(0.99999999999999989) + c = from_float(1.0000000000000002) + assert mpf_perturb(a, 0, 53, round_nearest) == a + assert mpf_perturb(a, 1, 53, round_nearest) == a + assert mpf_perturb(a, 0, 53, round_up) == c + assert mpf_perturb(a, 0, 53, round_ceiling) == c + assert mpf_perturb(a, 0, 53, round_down) == a + assert mpf_perturb(a, 0, 53, round_floor) == a + assert mpf_perturb(a, 1, 53, round_up) == a + assert mpf_perturb(a, 1, 53, round_ceiling) == a + assert mpf_perturb(a, 1, 53, round_down) == b + assert mpf_perturb(a, 1, 53, round_floor) == b + a = mpf_neg(a) + b = mpf_neg(b) + c = mpf_neg(c) + assert mpf_perturb(a, 0, 53, round_nearest) == a + assert mpf_perturb(a, 1, 53, round_nearest) == a + assert mpf_perturb(a, 0, 53, round_up) == a + assert mpf_perturb(a, 0, 53, round_floor) == a + assert mpf_perturb(a, 0, 53, round_down) == b + assert mpf_perturb(a, 0, 53, round_ceiling) == b + assert mpf_perturb(a, 1, 53, round_up) == c + assert mpf_perturb(a, 1, 53, round_floor) == c + assert mpf_perturb(a, 1, 53, round_down) == a + assert mpf_perturb(a, 1, 53, round_ceiling) == a + +def test_add_exact(): + ff = from_float + assert mpf_add(ff(3.0), ff(2.5)) == ff(5.5) + assert mpf_add(ff(3.0), ff(-2.5)) == ff(0.5) + assert mpf_add(ff(-3.0), ff(2.5)) == ff(-0.5) + assert mpf_add(ff(-3.0), ff(-2.5)) == ff(-5.5) + assert mpf_sub(mpf_add(fone, ff(1e-100)), fone) == ff(1e-100) + assert mpf_sub(mpf_add(ff(1e-100), fone), fone) == ff(1e-100) + assert mpf_sub(mpf_add(fone, ff(-1e-100)), fone) == ff(-1e-100) + assert mpf_sub(mpf_add(ff(-1e-100), fone), fone) == ff(-1e-100) + assert mpf_add(fone, fzero) == fone + assert mpf_add(fzero, fone) == fone + assert mpf_add(fzero, fzero) == fzero + +def test_long_exponent_shifts(): + mp.dps = 15 + # Check for possible bugs due to exponent arithmetic overflow + # in a C implementation + x = mpf(1) + for p in [32, 64]: + a = ldexp(1,2**(p-1)) + b = ldexp(1,2**p) + c = ldexp(1,2**(p+1)) + d = ldexp(1,-2**(p-1)) + e = ldexp(1,-2**p) + f = ldexp(1,-2**(p+1)) + assert (x+a) == a + assert (x+b) == b + assert (x+c) == c + assert (x+d) == x + assert (x+e) == x + assert (x+f) == x + assert (a+x) == a + assert (b+x) == b + assert (c+x) == c + assert (d+x) == x + assert (e+x) == x + assert (f+x) == x + assert (x-a) == -a + assert (x-b) == -b + assert (x-c) == -c + assert (x-d) == x + assert (x-e) == x + assert (x-f) == x + assert (a-x) == a + assert (b-x) == b + assert (c-x) == c + assert (d-x) == -x + assert (e-x) == -x + assert (f-x) == -x + +def test_float_rounding(): + mp.prec = 64 + for x in [mpf(1), mpf(1)+eps, mpf(1)-eps, -mpf(1)+eps, -mpf(1)-eps]: + fa = float(x) + fb = float(fadd(x,0,prec=53,rounding='n')) + assert fa == fb + z = mpc(x,x) + ca = complex(z) + cb = complex(fadd(z,0,prec=53,rounding='n')) + assert ca == cb + for rnd in ['n', 'd', 'u', 'f', 'c']: + fa = to_float(x._mpf_, rnd=rnd) + fb = to_float(fadd(x,0,prec=53,rounding=rnd)._mpf_, rnd=rnd) + assert fa == fb + mp.prec = 53 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_calculus.py b/phivenv/Lib/site-packages/mpmath/tests/test_calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..f0a59773d672f0db20bb5072773472a5a3cc1d1f --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_calculus.py @@ -0,0 +1,216 @@ +import pytest +from mpmath import * + +def test_approximation(): + mp.dps = 15 + f = lambda x: cos(2-2*x)/x + p, err = chebyfit(f, [2, 4], 8, error=True) + assert err < 1e-5 + for i in range(10): + x = 2 + i/5. + assert abs(polyval(p, x) - f(x)) < err + +def test_limits(): + mp.dps = 15 + assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6) + assert limit(lambda n: (1+1/n)**n, inf).ae(e) + +def test_polyval(): + assert polyval([], 3) == 0 + assert polyval([0], 3) == 0 + assert polyval([5], 3) == 5 + # 4x^3 - 2x + 5 + p = [4, 0, -2, 5] + assert polyval(p,4) == 253 + assert polyval(p,4,derivative=True) == (253, 190) + +def test_polyroots(): + p = polyroots([1,-4]) + assert p[0].ae(4) + p, q = polyroots([1,2,3]) + assert p.ae(-1 - sqrt(2)*j) + assert q.ae(-1 + sqrt(2)*j) + #this is not a real test, it only tests a specific case + assert polyroots([1]) == [] + pytest.raises(ValueError, lambda: polyroots([0])) + +def test_polyroots_legendre(): + n = 64 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + with mp.workdps(3): + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=n*10) + + roots = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=n*10) + roots = [str(r) for r in roots] + assert roots == \ + ['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', '-0.961', + '-0.946', '-0.93', '-0.911', '-0.889', '-0.866', '-0.841', + '-0.813', '-0.784', '-0.753', '-0.72', '-0.685', '-0.649', + '-0.611', '-0.572', '-0.531', '-0.489', '-0.446', '-0.402', + '-0.357', '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', '0.17', '0.217', + '0.265', '0.311', '0.357', '0.402', '0.446', '0.489', '0.531', + '0.572', '0.611', '0.649', '0.685', '0.72', '0.753', '0.784', + '0.813', '0.841', '0.866', '0.889', '0.911', '0.93', '0.946', + '0.961', '0.973', '0.983', '0.991', '0.996', '0.999'] + +def test_polyroots_legendre_init(): + extra_prec = 100 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + roots_init = matrix(['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', + '-0.961', '-0.946', '-0.93', '-0.911', '-0.889', + '-0.866', '-0.841', '-0.813', '-0.784', '-0.753', + '-0.72', '-0.685', '-0.649', '-0.611', '-0.572', + '-0.531', '-0.489', '-0.446', '-0.402', '-0.357', + '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', + '0.17', '0.217', '0.265', ' 0.311', '0.357', + '0.402', '0.446', '0.489', '0.531', '0.572', + '0.611', '0.649', '0.685', '0.72', '0.753', + '0.784', '0.813', '0.841', '0.866', '0.889', + '0.911', '0.93', '0.946', '0.961', '0.973', + '0.983', '0.991', '0.996', '0.999', '1.0']) + with mp.workdps(2*mp.dps): + roots_exact = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=2*extra_prec) + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=extra_prec) + roots,err = polyroots(coeffs, maxsteps=5, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init) + assert max(matrix(roots_exact)-matrix(roots).apply(abs)) < err + roots1,err1 = polyroots(coeffs, maxsteps=25, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init[:60]) + assert max(matrix(roots_exact)-matrix(roots1).apply(abs)) < err1 + +def test_pade(): + one = mpf(1) + mp.dps = 20 + N = 10 + a = [one] + k = 1 + for i in range(1, N+1): + k *= i + a.append(one/k) + p, q = pade(a, N//2, N//2) + for x in arange(0, 1, 0.1): + r = polyval(p[::-1], x)/polyval(q[::-1], x) + assert(r.ae(exp(x), 1.0e-10)) + mp.dps = 15 + +def test_fourier(): + mp.dps = 15 + c, s = fourier(lambda x: x+1, [-1, 2], 2) + #plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2]) + assert c[0].ae(1.5) + assert c[1].ae(-3*sqrt(3)/(2*pi)) + assert c[2].ae(3*sqrt(3)/(4*pi)) + assert s[0] == 0 + assert s[1].ae(3/(2*pi)) + assert s[2].ae(3/(4*pi)) + assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442) + +def test_differint(): + mp.dps = 15 + assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3) + +def test_invlap(): + mp.dps = 15 + t = 0.01 + fp = lambda p: 1/(p+1)**2 + ft = lambda t: t*exp(-t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + + t = 0.01 + fp = lambda p: log(p)/p + ft = lambda t: -euler-log(t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_compatibility.py b/phivenv/Lib/site-packages/mpmath/tests/test_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..f26d6044b521306b6d1eaeadc5c7839be226dc54 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_compatibility.py @@ -0,0 +1,77 @@ +from mpmath import * +from random import seed, randint, random +import math + +# Test compatibility with Python floats, which are +# IEEE doubles (53-bit) + +N = 5000 +seed(1) + +# Choosing exponents between roughly -140, 140 ensures that +# the Python floats don't overflow or underflow +xs = [(random()-1) * 10**randint(-140, 140) for x in range(N)] +ys = [(random()-1) * 10**randint(-140, 140) for x in range(N)] + +# include some equal values +ys[int(N*0.8):] = xs[int(N*0.8):] + +# Detect whether Python is compiled to use 80-bit floating-point +# instructions, in which case the double compatibility test breaks +uses_x87 = -4.1974624032366689e+117 / -8.4657370748010221e-47 \ + == 4.9581771393902231e+163 + +def test_double_compatibility(): + mp.prec = 53 + for x, y in zip(xs, ys): + mpx = mpf(x) + mpy = mpf(y) + assert mpf(x) == x + assert (mpx < mpy) == (x < y) + assert (mpx > mpy) == (x > y) + assert (mpx == mpy) == (x == y) + assert (mpx != mpy) == (x != y) + assert (mpx <= mpy) == (x <= y) + assert (mpx >= mpy) == (x >= y) + assert mpx == mpx + if uses_x87: + mp.prec = 64 + a = mpx + mpy + b = mpx * mpy + c = mpx / mpy + d = mpx % mpy + mp.prec = 53 + assert +a == x + y + assert +b == x * y + assert +c == x / y + assert +d == x % y + else: + assert mpx + mpy == x + y + assert mpx * mpy == x * y + assert mpx / mpy == x / y + assert mpx % mpy == x % y + assert abs(mpx) == abs(x) + assert mpf(repr(x)) == x + assert ceil(mpx) == math.ceil(x) + assert floor(mpx) == math.floor(x) + +def test_sqrt(): + # this fails quite often. it appers to be float + # that rounds the wrong way, not mpf + fail = 0 + mp.prec = 53 + for x in xs: + x = abs(x) + mp.prec = 100 + mp_high = mpf(x)**0.5 + mp.prec = 53 + mp_low = mpf(x)**0.5 + fp = x**0.5 + assert abs(mp_low-mp_high) <= abs(fp-mp_high) + fail += mp_low != fp + assert fail < N/10 + +def test_bugs(): + # particular bugs + assert mpf(4.4408920985006262E-16) < mpf(1.7763568394002505E-15) + assert mpf(-4.4408920985006262E-16) > mpf(-1.7763568394002505E-15) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_convert.py b/phivenv/Lib/site-packages/mpmath/tests/test_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1db5b55c89e980e08fc3fa43cc9715ad68cac9 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_convert.py @@ -0,0 +1,233 @@ +import random +from mpmath import * +from mpmath.libmp import * + + +def test_basic_string(): + """ + Test basic string conversion + """ + mp.dps = 15 + assert mpf('3') == mpf('3.0') == mpf('0003.') == mpf('0.03e2') == mpf(3.0) + assert mpf('30') == mpf('30.0') == mpf('00030.') == mpf(30.0) + for i in range(10): + for j in range(10): + assert mpf('%ie%i' % (i,j)) == i * 10**j + assert str(mpf('25000.0')) == '25000.0' + assert str(mpf('2500.0')) == '2500.0' + assert str(mpf('250.0')) == '250.0' + assert str(mpf('25.0')) == '25.0' + assert str(mpf('2.5')) == '2.5' + assert str(mpf('0.25')) == '0.25' + assert str(mpf('0.025')) == '0.025' + assert str(mpf('0.0025')) == '0.0025' + assert str(mpf('0.00025')) == '0.00025' + assert str(mpf('0.000025')) == '2.5e-5' + assert str(mpf(0)) == '0.0' + assert str(mpf('2.5e1000000000000000000000')) == '2.5e+1000000000000000000000' + assert str(mpf('2.6e-1000000000000000000000')) == '2.6e-1000000000000000000000' + assert str(mpf(1.23402834e-15)) == '1.23402834e-15' + assert str(mpf(-1.23402834e-15)) == '-1.23402834e-15' + assert str(mpf(-1.2344e-15)) == '-1.2344e-15' + assert repr(mpf(-1.2344e-15)) == "mpf('-1.2343999999999999e-15')" + assert str(mpf("2163048125L")) == '2163048125.0' + assert str(mpf("-2163048125l")) == '-2163048125.0' + assert str(mpf("-2163048125L/1088391168")) == '-1.98738118113799' + assert str(mpf("2163048125/1088391168l")) == '1.98738118113799' + +def test_pretty(): + mp.pretty = True + assert repr(mpf(2.5)) == '2.5' + assert repr(mpc(2.5,3.5)) == '(2.5 + 3.5j)' + mp.pretty = False + iv.pretty = True + assert repr(mpi(2.5,3.5)) == '[2.5, 3.5]' + iv.pretty = False + +def test_str_whitespace(): + assert mpf('1.26 ') == 1.26 + +def test_unicode(): + mp.dps = 15 + try: + unicode = unicode + except NameError: + unicode = str + assert mpf(unicode('2.76')) == 2.76 + assert mpf(unicode('inf')) == inf + +def test_str_format(): + assert to_str(from_float(0.1),15,strip_zeros=False) == '0.100000000000000' + assert to_str(from_float(0.0),15,show_zero_exponent=True) == '0.0e+0' + assert to_str(from_float(0.0),0,show_zero_exponent=True) == '.0e+0' + assert to_str(from_float(0.0),0,show_zero_exponent=False) == '.0' + assert to_str(from_float(0.0),1,show_zero_exponent=True) == '0.0e+0' + assert to_str(from_float(0.0),1,show_zero_exponent=False) == '0.0' + assert to_str(from_float(1.23),3,show_zero_exponent=True) == '1.23e+0' + assert to_str(from_float(1.23456789000000e-2),15,strip_zeros=False,min_fixed=0,max_fixed=0) == '1.23456789000000e-2' + assert to_str(from_float(1.23456789000000e+2),15,strip_zeros=False,min_fixed=0,max_fixed=0) == '1.23456789000000e+2' + assert to_str(from_float(2.1287e14), 15, max_fixed=1000) == '212870000000000.0' + assert to_str(from_float(2.1287e15), 15, max_fixed=1000) == '2128700000000000.0' + assert to_str(from_float(2.1287e16), 15, max_fixed=1000) == '21287000000000000.0' + assert to_str(from_float(2.1287e30), 15, max_fixed=1000) == '2128700000000000000000000000000.0' + +def test_tight_string_conversion(): + mp.dps = 15 + # In an old version, '0.5' wasn't recognized as representing + # an exact binary number and was erroneously rounded up or down + assert from_str('0.5', 10, round_floor) == fhalf + assert from_str('0.5', 10, round_ceiling) == fhalf + +def test_eval_repr_invariant(): + """Test that eval(repr(x)) == x""" + random.seed(123) + for dps in [10, 15, 20, 50, 100]: + mp.dps = dps + for i in range(1000): + a = mpf(random.random())**0.5 * 10**random.randint(-100, 100) + assert eval(repr(a)) == a + mp.dps = 15 + +def test_str_bugs(): + mp.dps = 15 + # Decimal rounding used to give the wrong exponent in some cases + assert str(mpf('1e600')) == '1.0e+600' + assert str(mpf('1e10000')) == '1.0e+10000' + +def test_str_prec0(): + assert to_str(from_float(1.234), 0) == '.0e+0' + assert to_str(from_float(1e-15), 0) == '.0e-15' + assert to_str(from_float(1e+15), 0) == '.0e+15' + assert to_str(from_float(-1e-15), 0) == '-.0e-15' + assert to_str(from_float(-1e+15), 0) == '-.0e+15' + +def test_convert_rational(): + mp.dps = 15 + assert from_rational(30, 5, 53, round_nearest) == (0, 3, 1, 2) + assert from_rational(-7, 4, 53, round_nearest) == (1, 7, -2, 3) + assert to_rational((0, 1, -1, 1)) == (1, 2) + +def test_custom_class(): + class mympf: + @property + def _mpf_(self): + return mpf(3.5)._mpf_ + class mympc: + @property + def _mpc_(self): + return mpf(3.5)._mpf_, mpf(2.5)._mpf_ + assert mpf(2) + mympf() == 5.5 + assert mympf() + mpf(2) == 5.5 + assert mpf(mympf()) == 3.5 + assert mympc() + mpc(2) == mpc(5.5, 2.5) + assert mpc(2) + mympc() == mpc(5.5, 2.5) + assert mpc(mympc()) == (3.5+2.5j) + +def test_conversion_methods(): + class SomethingRandom: + pass + class SomethingReal: + def _mpmath_(self, prec, rounding): + return mp.make_mpf(from_str('1.3', prec, rounding)) + class SomethingComplex: + def _mpmath_(self, prec, rounding): + return mp.make_mpc((from_str('1.3', prec, rounding), \ + from_str('1.7', prec, rounding))) + x = mpf(3) + z = mpc(3) + a = SomethingRandom() + y = SomethingReal() + w = SomethingComplex() + for d in [15, 45]: + mp.dps = d + assert (x+y).ae(mpf('4.3')) + assert (y+x).ae(mpf('4.3')) + assert (x+w).ae(mpc('4.3', '1.7')) + assert (w+x).ae(mpc('4.3', '1.7')) + assert (z+y).ae(mpc('4.3')) + assert (y+z).ae(mpc('4.3')) + assert (z+w).ae(mpc('4.3', '1.7')) + assert (w+z).ae(mpc('4.3', '1.7')) + x-y; y-x; x-w; w-x; z-y; y-z; z-w; w-z + x*y; y*x; x*w; w*x; z*y; y*z; z*w; w*z + x/y; y/x; x/w; w/x; z/y; y/z; z/w; w/z + x**y; y**x; x**w; w**x; z**y; y**z; z**w; w**z + x==y; y==x; x==w; w==x; z==y; y==z; z==w; w==z + mp.dps = 15 + assert x.__add__(a) is NotImplemented + assert x.__radd__(a) is NotImplemented + assert x.__lt__(a) is NotImplemented + assert x.__gt__(a) is NotImplemented + assert x.__le__(a) is NotImplemented + assert x.__ge__(a) is NotImplemented + assert x.__eq__(a) is NotImplemented + assert x.__ne__(a) is NotImplemented + # implementation detail + if hasattr(x, "__cmp__"): + assert x.__cmp__(a) is NotImplemented + assert x.__sub__(a) is NotImplemented + assert x.__rsub__(a) is NotImplemented + assert x.__mul__(a) is NotImplemented + assert x.__rmul__(a) is NotImplemented + assert x.__div__(a) is NotImplemented + assert x.__rdiv__(a) is NotImplemented + assert x.__mod__(a) is NotImplemented + assert x.__rmod__(a) is NotImplemented + assert x.__pow__(a) is NotImplemented + assert x.__rpow__(a) is NotImplemented + assert z.__add__(a) is NotImplemented + assert z.__radd__(a) is NotImplemented + assert z.__eq__(a) is NotImplemented + assert z.__ne__(a) is NotImplemented + assert z.__sub__(a) is NotImplemented + assert z.__rsub__(a) is NotImplemented + assert z.__mul__(a) is NotImplemented + assert z.__rmul__(a) is NotImplemented + assert z.__div__(a) is NotImplemented + assert z.__rdiv__(a) is NotImplemented + assert z.__pow__(a) is NotImplemented + assert z.__rpow__(a) is NotImplemented + +def test_mpmathify(): + assert mpmathify('1/2') == 0.5 + assert mpmathify('(1.0+1.0j)') == mpc(1, 1) + assert mpmathify('(1.2e-10 - 3.4e5j)') == mpc('1.2e-10', '-3.4e5') + assert mpmathify('1j') == mpc(1j) + +def test_issue548(): + try: + # This expression is invalid, but may trigger the ReDOS vulnerability + # in the regular expression for parsing complex numbers. + mpmathify('(' + '1' * 5000 + '!j') + except: + return + # The expression is invalid and should raise an exception. + assert False + +def test_compatibility(): + try: + import numpy as np + from fractions import Fraction + from decimal import Decimal + import decimal + except ImportError: + return + # numpy types + for nptype in np.core.numerictypes.typeDict.values(): + if issubclass(nptype, np.complexfloating): + x = nptype(complex(0.5, -0.5)) + elif issubclass(nptype, np.floating): + x = nptype(0.5) + elif issubclass(nptype, np.integer): + x = nptype(2) + # Handle the weird types + try: diff = np.abs(type(np.sqrt(x))(sqrt(x)) - np.sqrt(x)) + except: continue + assert diff < 2.0**-53 + #Fraction and Decimal + oldprec = mp.prec + mp.prec = 1000 + decimal.getcontext().prec = mp.dps + assert sqrt(Fraction(2, 3)).ae(sqrt(mpf('2/3'))) + assert sqrt(Decimal(2)/Decimal(3)).ae(sqrt(mpf('2/3'))) + mp.prec = oldprec diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_diff.py b/phivenv/Lib/site-packages/mpmath/tests/test_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..f5711609da38862eb4fd62c88d35f1704c9425a4 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_diff.py @@ -0,0 +1,61 @@ +from mpmath import * + +def test_diff(): + mp.dps = 15 + assert diff(log, 2.0, n=0).ae(log(2)) + assert diff(cos, 1.0).ae(-sin(1)) + assert diff(abs, 0.0) == 0 + assert diff(abs, 0.0, direction=1) == 1 + assert diff(abs, 0.0, direction=-1) == -1 + assert diff(exp, 1.0).ae(e) + assert diff(exp, 1.0, n=5).ae(e) + assert diff(exp, 2.0, n=5, direction=3*j).ae(e**2) + assert diff(lambda x: x**2, 3.0, method='quad').ae(6) + assert diff(lambda x: 3+x**5, 3.0, n=2, method='quad').ae(540) + assert diff(lambda x: 3+x**5, 3.0, n=2, method='step').ae(540) + assert diffun(sin)(2).ae(cos(2)) + assert diffun(sin, n=2)(2).ae(-sin(2)) + +def test_diffs(): + mp.dps = 15 + assert [chop(d) for d in diffs(sin, 0, 1)] == [0, 1] + assert [chop(d) for d in diffs(sin, 0, 1, method='quad')] == [0, 1] + assert [chop(d) for d in diffs(sin, 0, 2)] == [0, 1, 0] + assert [chop(d) for d in diffs(sin, 0, 2, method='quad')] == [0, 1, 0] + +def test_taylor(): + mp.dps = 15 + # Easy to test since the coefficients are exact in floating-point + assert taylor(sqrt, 1, 4) == [1, 0.5, -0.125, 0.0625, -0.0390625] + +def test_diff_partial(): + mp.dps = 15 + x,y,z = xyz = 2,3,7 + f = lambda x,y,z: 3*x**2 * (y+2)**3 * z**5 + assert diff(f, xyz, (0,0,0)).ae(25210500) + assert diff(f, xyz, (0,0,1)).ae(18007500) + assert diff(f, xyz, (0,0,2)).ae(10290000) + assert diff(f, xyz, (0,1,0)).ae(15126300) + assert diff(f, xyz, (0,1,1)).ae(10804500) + assert diff(f, xyz, (0,1,2)).ae(6174000) + assert diff(f, xyz, (0,2,0)).ae(6050520) + assert diff(f, xyz, (0,2,1)).ae(4321800) + assert diff(f, xyz, (0,2,2)).ae(2469600) + assert diff(f, xyz, (1,0,0)).ae(25210500) + assert diff(f, xyz, (1,0,1)).ae(18007500) + assert diff(f, xyz, (1,0,2)).ae(10290000) + assert diff(f, xyz, (1,1,0)).ae(15126300) + assert diff(f, xyz, (1,1,1)).ae(10804500) + assert diff(f, xyz, (1,1,2)).ae(6174000) + assert diff(f, xyz, (1,2,0)).ae(6050520) + assert diff(f, xyz, (1,2,1)).ae(4321800) + assert diff(f, xyz, (1,2,2)).ae(2469600) + assert diff(f, xyz, (2,0,0)).ae(12605250) + assert diff(f, xyz, (2,0,1)).ae(9003750) + assert diff(f, xyz, (2,0,2)).ae(5145000) + assert diff(f, xyz, (2,1,0)).ae(7563150) + assert diff(f, xyz, (2,1,1)).ae(5402250) + assert diff(f, xyz, (2,1,2)).ae(3087000) + assert diff(f, xyz, (2,2,0)).ae(3025260) + assert diff(f, xyz, (2,2,1)).ae(2160900) + assert diff(f, xyz, (2,2,2)).ae(1234800) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_division.py b/phivenv/Lib/site-packages/mpmath/tests/test_division.py new file mode 100644 index 0000000000000000000000000000000000000000..c704cadeb953793ac0a887aa09c4278cf68a2824 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_division.py @@ -0,0 +1,143 @@ +from mpmath.libmp import * +from mpmath import mpf, mp + +from random import randint, choice, seed + +all_modes = [round_floor, round_ceiling, round_down, round_up, round_nearest] + +fb = from_bstr +fi = from_int +ff = from_float + + +def test_div_1_3(): + a = fi(1) + b = fi(3) + c = fi(-1) + + # floor rounds down, ceiling rounds up + assert mpf_div(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_div(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_div(a, b, 7, round_down) == fb('0.01010101') + assert mpf_div(a, b, 7, round_up) == fb('0.01010110') + assert mpf_div(a, b, 7, round_nearest) == fb('0.01010101') + + # floor rounds up, ceiling rounds down + assert mpf_div(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_nearest) == fb('-0.01010101') + +def test_mpf_divi_1_3(): + a = 1 + b = fi(3) + c = -1 + assert mpf_rdiv_int(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_down) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_up) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_nearest) == fb('0.01010101') + assert mpf_rdiv_int(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_nearest) == fb('-0.01010101') + + +def test_div_300(): + + q = fi(1000000) + a = fi(300499999) # a/q is a little less than a half-integer + b = fi(300500000) # b/q exactly a half-integer + c = fi(300500001) # c/q is a little more than a half-integer + + # Check nearest integer rounding (prec=9 as 2**8 < 300 < 2**9) + + assert mpf_div(a, q, 9, round_down) == fi(300) + assert mpf_div(b, q, 9, round_down) == fi(300) + assert mpf_div(c, q, 9, round_down) == fi(300) + assert mpf_div(a, q, 9, round_up) == fi(301) + assert mpf_div(b, q, 9, round_up) == fi(301) + assert mpf_div(c, q, 9, round_up) == fi(301) + + # Nearest even integer is down + assert mpf_div(a, q, 9, round_nearest) == fi(300) + assert mpf_div(b, q, 9, round_nearest) == fi(300) + assert mpf_div(c, q, 9, round_nearest) == fi(301) + + # Nearest even integer is up + a = fi(301499999) + b = fi(301500000) + c = fi(301500001) + assert mpf_div(a, q, 9, round_nearest) == fi(301) + assert mpf_div(b, q, 9, round_nearest) == fi(302) + assert mpf_div(c, q, 9, round_nearest) == fi(302) + + +def test_tight_integer_division(): + # Test that integer division at tightest possible precision is exact + N = 100 + seed(1) + for i in range(N): + a = choice([1, -1]) * randint(1, 1< 1: + print("original matrix (hessenberg):\n", A) + + n = A.rows + + Q, H = mp.hessenberg(A) + + if verbose > 1: + print("Q:\n",Q) + print("H:\n",H) + + B = Q * H * Q.transpose_conj() + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for x in xrange(n): + for y in xrange(n): + err0 += abs(A[y,x] - B[y,x]) + err0 /= n * n + + err1 = 0 + for x in xrange(n): + for y in xrange(x + 2, n): + err1 += abs(H[y,x]) + + if verbose > 0: + print("difference (H):", err0, err1) + + if verbose > 1: + print("B:\n", B) + + assert err0 < eps + assert err1 == 0 + + +def run_schur(A, verbose = 0): + if verbose > 1: + print("original matrix (schur):\n", A) + + n = A.rows + + Q, R = mp.schur(A) + + if verbose > 1: + print("Q:\n", Q) + print("R:\n", R) + + B = Q * R * Q.transpose_conj() + C = Q * Q.transpose_conj() + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for x in xrange(n): + for y in xrange(n): + err0 += abs(A[y,x] - B[y,x]) + err0 /= n * n + + err1 = 0 + for x in xrange(n): + for y in xrange(n): + if x == y: + C[y,x] -= 1 + err1 += abs(C[y,x]) + err1 /= n * n + + err2 = 0 + for x in xrange(n): + for y in xrange(x + 1, n): + err2 += abs(R[y,x]) + + if verbose > 0: + print("difference (S):", err0, err1, err2) + + if verbose > 1: + print("B:\n", B) + + assert err0 < eps + assert err1 < eps + assert err2 == 0 + +def run_eig(A, verbose = 0): + if verbose > 1: + print("original matrix (eig):\n", A) + + n = A.rows + + E, EL, ER = mp.eig(A, left = True, right = True) + + if verbose > 1: + print("E:\n", E) + print("EL:\n", EL) + print("ER:\n", ER) + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for i in xrange(n): + B = A * ER[:,i] - E[i] * ER[:,i] + err0 = max(err0, mp.mnorm(B)) + + B = EL[i,:] * A - EL[i,:] * E[i] + err0 = max(err0, mp.mnorm(B)) + + err0 /= n * n + + if verbose > 0: + print("difference (E):", err0) + + assert err0 < eps + +##################### + +def test_eig_dyn(): + v = 0 + for i in xrange(5): + n = 1 + int(mp.rand() * 5) + if mp.rand() > 0.5: + # real + A = 2 * mp.randmatrix(n, n) - 1 + if mp.rand() > 0.5: + A *= 10 + for x in xrange(n): + for y in xrange(n): + A[x,y] = int(A[x,y]) + else: + A = (2 * mp.randmatrix(n, n) - 1) + 1j * (2 * mp.randmatrix(n, n) - 1) + if mp.rand() > 0.5: + A *= 10 + for x in xrange(n): + for y in xrange(n): + A[x,y] = int(mp.re(A[x,y])) + 1j * int(mp.im(A[x,y])) + + run_hessenberg(A, verbose = v) + run_schur(A, verbose = v) + run_eig(A, verbose = v) + +def test_eig(): + v = 0 + AS = [] + + A = mp.matrix([[2, 1, 0], # jordan block of size 3 + [0, 2, 1], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A = mp.matrix([[2, 0, 0], # jordan block of size 2 + [0, 2, 1], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A = mp.matrix([[2, 0, 1], # jordan block of size 2 + [0, 2, 0], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A= mp.matrix([[0, 0, 1], # cyclic + [1, 0, 0], + [0, 1, 0]]) + AS.append(A) + AS.append(A.transpose()) + + for A in AS: + run_hessenberg(A, verbose = v) + run_schur(A, verbose = v) + run_eig(A, verbose = v) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py b/phivenv/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py new file mode 100644 index 0000000000000000000000000000000000000000..aab3d8ea3142aada6e14ad6d3ea25a7e8293554d --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from mpmath import mp +from mpmath import libmp + +xrange = libmp.backend.xrange + +def run_eigsy(A, verbose = False): + if verbose: + print("original matrix:\n", str(A)) + + D, Q = mp.eigsy(A) + B = Q * mp.diag(D) * Q.transpose() + C = A - B + E = Q * Q.transpose() - mp.eye(A.rows) + + if verbose: + print("eigenvalues:\n", D) + print("eigenvectors:\n", Q) + + NC = mp.mnorm(C) + NE = mp.mnorm(E) + + if verbose: + print("difference:", NC, "\n", C, "\n") + print("difference:", NE, "\n", E, "\n") + + eps = mp.exp( 0.8 * mp.log(mp.eps)) + + assert NC < eps + assert NE < eps + + return NC + +def run_eighe(A, verbose = False): + if verbose: + print("original matrix:\n", str(A)) + + D, Q = mp.eighe(A) + B = Q * mp.diag(D) * Q.transpose_conj() + C = A - B + E = Q * Q.transpose_conj() - mp.eye(A.rows) + + if verbose: + print("eigenvalues:\n", D) + print("eigenvectors:\n", Q) + + NC = mp.mnorm(C) + NE = mp.mnorm(E) + + if verbose: + print("difference:", NC, "\n", C, "\n") + print("difference:", NE, "\n", E, "\n") + + eps = mp.exp( 0.8 * mp.log(mp.eps)) + + assert NC < eps + assert NE < eps + + return NC + +def run_svd_r(A, full_matrices = False, verbose = True): + + m, n = A.rows, A.cols + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + if verbose: + print("original matrix:\n", str(A)) + print("full", full_matrices) + + U, S0, V = mp.svd_r(A, full_matrices = full_matrices) + + S = mp.zeros(U.cols, V.rows) + for j in xrange(min(m, n)): + S[j,j] = S0[j] + + if verbose: + print("U:\n", str(U)) + print("S:\n", str(S0)) + print("V:\n", str(V)) + + C = U * S * V - A + err = mp.mnorm(C) + if verbose: + print("C\n", str(C), "\n", err) + assert err < eps + + D = V * V.transpose() - mp.eye(V.rows) + err = mp.mnorm(D) + if verbose: + print("D:\n", str(D), "\n", err) + assert err < eps + + E = U.transpose() * U - mp.eye(U.cols) + err = mp.mnorm(E) + if verbose: + print("E:\n", str(E), "\n", err) + assert err < eps + +def run_svd_c(A, full_matrices = False, verbose = True): + + m, n = A.rows, A.cols + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + if verbose: + print("original matrix:\n", str(A)) + print("full", full_matrices) + + U, S0, V = mp.svd_c(A, full_matrices = full_matrices) + + S = mp.zeros(U.cols, V.rows) + for j in xrange(min(m, n)): + S[j,j] = S0[j] + + if verbose: + print("U:\n", str(U)) + print("S:\n", str(S0)) + print("V:\n", str(V)) + + C = U * S * V - A + err = mp.mnorm(C) + if verbose: + print("C\n", str(C), "\n", err) + assert err < eps + + D = V * V.transpose_conj() - mp.eye(V.rows) + err = mp.mnorm(D) + if verbose: + print("D:\n", str(D), "\n", err) + assert err < eps + + E = U.transpose_conj() * U - mp.eye(U.cols) + err = mp.mnorm(E) + if verbose: + print("E:\n", str(E), "\n", err) + assert err < eps + +def run_gauss(qtype, a, b): + eps = 1e-5 + + d, e = mp.gauss_quadrature(len(a), qtype) + d -= mp.matrix(a) + e -= mp.matrix(b) + + assert mp.mnorm(d) < eps + assert mp.mnorm(e) < eps + +def irandmatrix(n, range = 10): + """ + random matrix with integer entries + """ + A = mp.matrix(n, n) + for i in xrange(n): + for j in xrange(n): + A[i,j]=int( (2 * mp.rand() - 1) * range) + return A + +####################### + +def test_eighe_fixed_matrix(): + A = mp.matrix([[2, 3], [3, 5]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[7, -11], [-11, 13]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[2, 11, 7], [11, 3, 13], [7, 13, 5]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[2, 0, 7], [0, 3, 1], [7, 1, 5]]) + run_eigsy(A) + run_eighe(A) + + # + + A = mp.matrix([[2, 3+7j], [3-7j, 5]]) + run_eighe(A) + + A = mp.matrix([[2, -11j, 0], [+11j, 3, 29j], [0, -29j, 5]]) + run_eighe(A) + + A = mp.matrix([[2, 11 + 17j, 7 + 19j], [11 - 17j, 3, -13 + 23j], [7 - 19j, -13 - 23j, 5]]) + run_eighe(A) + +def test_eigsy_randmatrix(): + N = 5 + + for a in xrange(10): + A = 2 * mp.randmatrix(N, N) - 1 + + for i in xrange(0, N): + for j in xrange(i + 1, N): + A[j,i] = A[i,j] + + run_eigsy(A) + +def test_eighe_randmatrix(): + N = 5 + + for a in xrange(10): + A = (2 * mp.randmatrix(N, N) - 1) + 1j * (2 * mp.randmatrix(N, N) - 1) + + for i in xrange(0, N): + A[i,i] = mp.re(A[i,i]) + for j in xrange(i + 1, N): + A[j,i] = mp.conj(A[i,j]) + + run_eighe(A) + +def test_eigsy_irandmatrix(): + N = 4 + R = 4 + + for a in xrange(10): + A=irandmatrix(N, R) + + for i in xrange(0, N): + for j in xrange(i + 1, N): + A[j,i] = A[i,j] + + run_eigsy(A) + +def test_eighe_irandmatrix(): + N = 4 + R = 4 + + for a in xrange(10): + A=irandmatrix(N, R) + 1j * irandmatrix(N, R) + + for i in xrange(0, N): + A[i,i] = mp.re(A[i,i]) + for j in xrange(i + 1, N): + A[j,i] = mp.conj(A[i,j]) + + run_eighe(A) + +def test_svd_r_rand(): + for i in xrange(5): + full = mp.rand() > 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = 2 * mp.randmatrix(m, n) - 1 + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(A[x,y]) + + run_svd_r(A, full_matrices = full, verbose = False) + +def test_svd_c_rand(): + for i in xrange(5): + full = mp.rand() > 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = (2 * mp.randmatrix(m, n) - 1) + 1j * (2 * mp.randmatrix(m, n) - 1) + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(mp.re(A[x,y])) + 1j * int(mp.im(A[x,y])) + + run_svd_c(A, full_matrices=full, verbose=False) + +def test_svd_test_case(): + # a test case from Golub and Reinsch + # (see wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).) + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + a = [[22, 10, 2, 3, 7], + [14, 7, 10, 0, 8], + [-1, 13, -1, -11, 3], + [-3, -2, 13, -2, 4], + [ 9, 8, 1, -2, 4], + [ 9, 1, -7, 5, -1], + [ 2, -6, 6, 5, 1], + [ 4, 5, 0, -2, 2]] + + a = mp.matrix(a) + b = mp.matrix([mp.sqrt(1248), 20, mp.sqrt(384), 0, 0]) + + S = mp.svd_r(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + S = mp.svd_c(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + +def test_gauss_quadrature_static(): + a = [-0.57735027, 0.57735027] + b = [ 1, 1] + run_gauss("legendre", a , b) + + a = [ -0.906179846, -0.538469310, 0, 0.538469310, 0.906179846] + b = [ 0.23692689, 0.47862867, 0.56888889, 0.47862867, 0.23692689] + run_gauss("legendre", a , b) + + a = [ 0.06943184, 0.33000948, 0.66999052, 0.93056816] + b = [ 0.17392742, 0.32607258, 0.32607258, 0.17392742] + run_gauss("legendre01", a , b) + + a = [-0.70710678, 0.70710678] + b = [ 0.88622693, 0.88622693] + run_gauss("hermite", a , b) + + a = [ -2.02018287, -0.958572465, 0, 0.958572465, 2.02018287] + b = [ 0.01995324, 0.39361932, 0.94530872, 0.39361932, 0.01995324] + run_gauss("hermite", a , b) + + a = [ 0.41577456, 2.29428036, 6.28994508] + b = [ 0.71109301, 0.27851773, 0.01038926] + run_gauss("laguerre", a , b) + +def test_gauss_quadrature_dynamic(verbose = False): + n = 5 + + A = mp.randmatrix(2 * n, 1) + + def F(x): + r = 0 + for i in xrange(len(A) - 1, -1, -1): + r = r * x + A[i] + return r + + def run(qtype, FW, R, alpha = 0, beta = 0): + X, W = mp.gauss_quadrature(n, qtype, alpha = alpha, beta = beta) + + a = 0 + for i in xrange(len(X)): + a += W[i] * F(X[i]) + + b = mp.quad(lambda x: FW(x) * F(x), R) + + c = mp.fabs(a - b) + + if verbose: + print(qtype, c, a, b) + + assert c < 1e-5 + + run("legendre", lambda x: 1, [-1, 1]) + run("legendre01", lambda x: 1, [0, 1]) + run("hermite", lambda x: mp.exp(-x*x), [-mp.inf, mp.inf]) + run("laguerre", lambda x: mp.exp(-x), [0, mp.inf]) + run("glaguerre", lambda x: mp.sqrt(x)*mp.exp(-x), [0, mp.inf], alpha = 1 / mp.mpf(2)) + run("chebyshev1", lambda x: 1/mp.sqrt(1-x*x), [-1, 1]) + run("chebyshev2", lambda x: mp.sqrt(1-x*x), [-1, 1]) + run("jacobi", lambda x: (1-x)**(1/mp.mpf(3)) * (1+x)**(1/mp.mpf(5)), [-1, 1], alpha = 1 / mp.mpf(3), beta = 1 / mp.mpf(5) ) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_elliptic.py b/phivenv/Lib/site-packages/mpmath/tests/test_elliptic.py new file mode 100644 index 0000000000000000000000000000000000000000..4dddc2df34b8d2fa7f2028b3501e5b7f140d8912 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_elliptic.py @@ -0,0 +1,670 @@ +""" +Limited tests of the elliptic functions module. A full suite of +extensive testing can be found in elliptic_torture_tests.py + +Author of the first version: M.T. Taschuk + +References: + +[1] Abramowitz & Stegun. 'Handbook of Mathematical Functions, 9th Ed.', + (Dover duplicate of 1972 edition) +[2] Whittaker 'A Course of Modern Analysis, 4th Ed.', 1946, + Cambridge University Press + +""" + +import mpmath +import random +import pytest + +from mpmath import * + +def mpc_ae(a, b, eps=eps): + res = True + res = res and a.real.ae(b.real, eps) + res = res and a.imag.ae(b.imag, eps) + return res + +zero = mpf(0) +one = mpf(1) + +jsn = ellipfun('sn') +jcn = ellipfun('cn') +jdn = ellipfun('dn') + +calculate_nome = lambda k: qfrom(k=k) + +def test_ellipfun(): + mp.dps = 15 + assert ellipfun('ss', 0, 0) == 1 + assert ellipfun('cc', 0, 0) == 1 + assert ellipfun('dd', 0, 0) == 1 + assert ellipfun('nn', 0, 0) == 1 + assert ellipfun('sn', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cn', 0.25, 0).ae(cos(0.25)) + assert ellipfun('dn', 0.25, 0).ae(1) + assert ellipfun('ns', 0.25, 0).ae(csc(0.25)) + assert ellipfun('nc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('nd', 0.25, 0).ae(1) + assert ellipfun('sc', 0.25, 0).ae(tan(0.25)) + assert ellipfun('sd', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cd', 0.25, 0).ae(cos(0.25)) + assert ellipfun('cs', 0.25, 0).ae(cot(0.25)) + assert ellipfun('dc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('ds', 0.25, 0).ae(csc(0.25)) + assert ellipfun('sn', 0.25, 1).ae(tanh(0.25)) + assert ellipfun('cn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('dn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('ns', 0.25, 1).ae(coth(0.25)) + assert ellipfun('nc', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('nd', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('sc', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('sd', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('cd', 0.25, 1).ae(1) + assert ellipfun('cs', 0.25, 1).ae(csch(0.25)) + assert ellipfun('dc', 0.25, 1).ae(1) + assert ellipfun('ds', 0.25, 1).ae(csch(0.25)) + assert ellipfun('sn', 0.25, 0.5).ae(0.24615967096986145833) + assert ellipfun('cn', 0.25, 0.5).ae(0.96922928989378439337) + assert ellipfun('dn', 0.25, 0.5).ae(0.98473484156599474563) + assert ellipfun('ns', 0.25, 0.5).ae(4.0624038700573130369) + assert ellipfun('nc', 0.25, 0.5).ae(1.0317476065024692949) + assert ellipfun('nd', 0.25, 0.5).ae(1.0155017958029488665) + assert ellipfun('sc', 0.25, 0.5).ae(0.25397465134058993408) + assert ellipfun('sd', 0.25, 0.5).ae(0.24997558792415733063) + assert ellipfun('cd', 0.25, 0.5).ae(0.98425408443195497052) + assert ellipfun('cs', 0.25, 0.5).ae(3.9374008182374110826) + assert ellipfun('dc', 0.25, 0.5).ae(1.0159978158253033913) + assert ellipfun('ds', 0.25, 0.5).ae(4.0003906313579720593) + + + + +def test_calculate_nome(): + mp.dps = 100 + + q = calculate_nome(zero) + assert(q == zero) + + mp.dps = 25 + # used Mathematica's EllipticNomeQ[m] + math1 = [(mpf(1)/10, mpf('0.006584651553858370274473060')), + (mpf(2)/10, mpf('0.01394285727531826872146409')), + (mpf(3)/10, mpf('0.02227743615715350822901627')), + (mpf(4)/10, mpf('0.03188334731336317755064299')), + (mpf(5)/10, mpf('0.04321391826377224977441774')), + (mpf(6)/10, mpf('0.05702025781460967637754953')), + (mpf(7)/10, mpf('0.07468994353717944761143751')), + (mpf(8)/10, mpf('0.09927369733882489703607378')), + (mpf(9)/10, mpf('0.1401731269542615524091055')), + (mpf(9)/10, mpf('0.1401731269542615524091055'))] + + for i in math1: + m = i[0] + q = calculate_nome(sqrt(m)) + assert q.ae(i[1]) + + mp.dps = 15 + +def test_jtheta(): + mp.dps = 25 + + z = q = zero + for n in range(1,5): + value = jtheta(n, z, q) + assert(value == (n-1)//2) + + for q in [one, mpf(2)]: + for n in range(1,5): + pytest.raises(ValueError, lambda: jtheta(n, z, q)) + + z = one/10 + q = one/11 + + # Mathematical N[EllipticTheta[1, 1/10, 1/11], 25] + res = mpf('0.1069552990104042681962096') + result = jtheta(1, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[2, 1/10, 1/11], 25] + res = mpf('1.101385760258855791140606') + result = jtheta(2, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[3, 1/10, 1/11], 25] + res = mpf('1.178319743354331061795905') + result = jtheta(3, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[4, 1/10, 1/11], 25] + res = mpf('0.8219318954665153577314573') + result = jtheta(4, z, q) + assert(result.ae(res)) + + # test for sin zeros for jtheta(1, z, q) + # test for cos zeros for jtheta(2, z, q) + z1 = pi + z2 = pi/2 + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + result = jtheta(1, z1, q) + assert(result.ae(0)) + result = jtheta(2, z2, q) + assert(result.ae(0)) + mp.dps = 15 + + +def test_jtheta_issue_79(): + # near the circle of covergence |q| = 1 the convergence slows + # down; for |q| > Q_LIM the theta functions raise ValueError + mp.dps = 30 + mp.dps += 30 + q = mpf(6)/10 - one/10**6 - mpf(8)/10 * j + mp.dps -= 30 + # Mathematica run first + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 2000] + # then it works: + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 30] + res = mpf('32.0031009628901652627099524264') + \ + mpf('16.6153027998236087899308935624') * j + result = jtheta(3, 1, q) + # check that for abs(q) > Q_LIM a ValueError exception is raised + mp.dps += 30 + q = mpf(6)/10 - one/10**7 - mpf(8)/10 * j + mp.dps -= 30 + pytest.raises(ValueError, lambda: jtheta(3, 1, q)) + + # bug reported in issue 79 + mp.dps = 100 + z = (1+j)/3 + q = mpf(368983957219251)/10**15 + mpf(636363636363636)/10**15 * j + # Mathematica N[EllipticTheta[1, z, q], 35] + res = mpf('2.4439389177990737589761828991467471') + \ + mpf('0.5446453005688226915290954851851490') *j + mp.dps = 30 + result = jtheta(1, z, q) + assert(result.ae(res)) + mp.dps = 80 + z = 3 + 4*j + q = 0.5 + 0.5*j + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 80 + z = 3 + j + q1 = exp(j*3) + # longer test + # for n in range(1, 6) + for n in range(1, 2): + mp.dps = 80 + q = q1*(1 - mpf(1)/10**n) + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 15 + # issue 79 about high derivatives + assert jtheta(3, 4.5, 0.25, 9).ae(1359.04892680683) + assert jtheta(3, 4.5, 0.25, 50).ae(-6.14832772630905e+33) + mp.dps = 50 + r = jtheta(3, 4.5, 0.25, 9) + assert r.ae('1359.048926806828939547859396600218966947753213803') + r = jtheta(3, 4.5, 0.25, 50) + assert r.ae('-6148327726309051673317975084654262.4119215720343656') + +def test_jtheta_identities(): + """ + Tests the some of the jacobi identidies found in Abramowitz, + Sec. 16.28, Pg. 576. The identities are tested to 1 part in 10^98. + """ + mp.dps = 110 + eps1 = ldexp(eps, 30) + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + zstring = str(10*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.1 + # v_1(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_2(0, q)**2 + # - v_2(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(1, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + zstring = str(100*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.2 + # v_2(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_2(0, q)**2 + # - v_1(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(2, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.3 + # v_3(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_3(0, q)**2 + # - v_1(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(3, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.4 + # v_4(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_3(0, q)**2 + # - v_2(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(4, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.5 + # v_2(0, q)**4 + v_4(0, q)**4 == v_3(0, q)**4 + term1 = (jtheta(2, zero, q))**4 + term2 = (jtheta(4, zero, q))**4 + term3 = (jtheta(3, zero, q))**4 + equality = term1 + term2 - term3 + assert(equality.ae(0, eps1)) + mp.dps = 15 + +def test_jtheta_complex(): + mp.dps = 30 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + # Mathematica N[EllipticTheta[1, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.31618034835986160705729105731678285') + \ + mpf('0.07542013825835103435142515194358975') * j + r = jtheta(1, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[2, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6530986428239765928634711417951828') + \ + mpf('0.2015344864707197230526742145361455') * j + r = jtheta(2, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[3, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6520564411784228184326012700348340') + \ + mpf('0.1998129119671271328684690067401823') * j + r = jtheta(3, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[4, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.37619082382228348252047624089973824') - \ + mpf('0.15623022130983652972686227200681074') * j + r = jtheta(4, z, q) + assert(mpc_ae(r, res)) + + # check some theta function identities + mp.dos = 100 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + mp.dps += 10 + a = [0,0, jtheta(2, 0, q), jtheta(3, 0, q), jtheta(4, 0, q)] + t = [0, jtheta(1, z, q), jtheta(2, z, q), jtheta(3, z, q), jtheta(4, z, q)] + r = [(t[2]*a[4])**2 - (t[4]*a[2])**2 + (t[1] *a[3])**2, + (t[3]*a[4])**2 - (t[4]*a[3])**2 + (t[1] *a[2])**2, + (t[1]*a[4])**2 - (t[3]*a[2])**2 + (t[2] *a[3])**2, + (t[4]*a[4])**2 - (t[3]*a[3])**2 + (t[2] *a[2])**2, + a[2]**4 + a[4]**4 - a[3]**4] + mp.dps -= 10 + for x in r: + assert(mpc_ae(x, mpc(0))) + mp.dps = 15 + +def test_djtheta(): + mp.dps = 30 + + z = one/7 + j/3 + q = one/8 + j/5 + # Mathematica N[EllipticThetaPrime[1, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('1.5555195883277196036090928995803201') - \ + mpf('0.02439761276895463494054149673076275') * j + result = jtheta(1, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[2, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.19825296689470982332701283509685662') - \ + mpf('0.46038135182282106983251742935250009') * j + result = jtheta(2, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[3, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.36492498415476212680896699407390026') - \ + mpf('0.57743812698666990209897034525640369') * j + result = jtheta(3, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[4, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('-0.38936892528126996010818803742007352') + \ + mpf('0.66549886179739128256269617407313625') * j + result = jtheta(4, z, q, 1) + assert(mpc_ae(result, res)) + + for i in range(10): + q = (one*random.random() + j*random.random())/2 + # identity in Wittaker, Watson &21.41 + a = jtheta(1, 0, q, 1) + b = jtheta(2, 0, q)*jtheta(3, 0, q)*jtheta(4, 0, q) + assert(a.ae(b)) + + # test higher derivatives + mp.dps = 20 + for q,z in [(one/3, one/5), (one/3 + j/8, one/5), + (one/3, one/5 + j/8), (one/3 + j/7, one/5 + j/8)]: + for n in [1, 2, 3, 4]: + r = jtheta(n, z, q, 2) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=2) + assert r.ae(r1) + r = jtheta(n, z, q, 3) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=3) + assert r.ae(r1) + + # identity in Wittaker, Watson &21.41 + q = one/3 + z = zero + a = [0]*5 + a[1] = jtheta(1, z, q, 3)/jtheta(1, z, q, 1) + for n in [2,3,4]: + a[n] = jtheta(n, z, q, 2)/jtheta(n, z, q) + equality = a[2] + a[3] + a[4] - a[1] + assert(equality.ae(0)) + mp.dps = 15 + +def test_jsn(): + """ + Test some special cases of the sn(z, q) function. + """ + mp.dps = 100 + + # trival case + result = jsn(zero, zero) + assert(result == zero) + + # Abramowitz Table 16.5 + # + # sn(0, m) = 0 + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + equality = jsn(zero, q) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.1 + # + # sn(z, 0) = sin(z), m == 0 + # + # sn(z, 1) = tanh(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for sn function. + + mp.dps = 25 + arg = one/10 + #N[JacobiSN[1/10, 2^-100], 25] + res = mpf('0.09983341664682815230681420') + m = ldexp(one, -100) + result = jsn(arg, m) + assert(result.ae(res)) + + # N[JacobiSN[1/10, 1/10], 25] + res = mpf('0.09981686718599080096451168') + result = jsn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jcn(): + """ + Test some special cases of the cn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # cn(0, q) = 1 + qstring = str(random.random()) + q = mpf(qstring) + cn = jcn(zero, q) + assert(cn.ae(one)) + + # Abramowitz Table 16.6.2 + # + # cn(u, 0) = cos(u), m == 0 + # + # cn(u, 1) = sech(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for cn function. + + mp.dps = 25 + arg = one/10 + m = ldexp(one, -100) + #N[JacobiCN[1/10, 2^-100], 25] + res = mpf('0.9950041652780257660955620') + result = jcn(arg, m) + assert(result.ae(res)) + + # N[JacobiCN[1/10, 1/10], 25] + res = mpf('0.9950058256237368748520459') + result = jcn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jdn(): + """ + Test some special cases of the dn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # dn(0, q) = 1 + mstring = str(random.random()) + m = mpf(mstring) + + dn = jdn(zero, m) + assert(dn.ae(one)) + + mp.dps = 25 + # N[JacobiDN[1/10, 1/10], 25] + res = mpf('0.9995017055025556219713297') + arg = one/10 + result = jdn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + + +def test_sn_cn_dn_identities(): + """ + Tests the some of the jacobi elliptic function identities found + on Mathworld. Haven't found in Abramowitz. + """ + mp.dps = 100 + N = 5 + for i in range(N): + qstring = str(random.random()) + q = mpf(qstring) + zstring = str(100*random.random()) + z = mpf(zstring) + + # MathWorld + # sn(z, q)**2 + cn(z, q)**2 == 1 + term1 = jsn(z, q)**2 + term2 = jcn(z, q)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + # MathWorld + # k**2 * sn(z, m)**2 + dn(z, m)**2 == 1 + for i in range(N): + mstring = str(random.random()) + m = mpf(qstring) + k = m.sqrt() + zstring = str(10*random.random()) + z = mpf(zstring) + term1 = k**2 * jsn(z, m)**2 + term2 = jdn(z, m)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + + for i in range(N): + mstring = str(random.random()) + m = mpf(mstring) + k = m.sqrt() + zstring = str(random.random()) + z = mpf(zstring) + + # MathWorld + # k**2 * cn(z, m)**2 + (1 - k**2) = dn(z, m)**2 + term1 = k**2 * jcn(z, m)**2 + term2 = 1 - k**2 + term3 = jdn(z, m)**2 + equality = term3 - term1 - term2 + assert(equality.ae(0)) + + K = ellipk(k**2) + # Abramowitz Table 16.5 + # sn(K, m) = 1; K is K(k), first complete elliptic integral + r = jsn(K, m) + assert(r.ae(one)) + + # Abramowitz Table 16.5 + # cn(K, q) = 0; K is K(k), first complete elliptic integral + equality = jcn(K, m) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.3 + # dn(z, 0) = 1, m == 0 + z = m + value = jdn(z, zero) + assert(value.ae(one)) + + mp.dps = 15 + +def test_sn_cn_dn_complex(): + mp.dps = 30 + # N[JacobiSN[1/4 + I/8, 1/3 + I/7], 35] in Mathematica + res = mpf('0.2495674401066275492326652143537') + \ + mpf('0.12017344422863833381301051702823') * j + u = mpf(1)/4 + j/8 + m = mpf(1)/3 + j/7 + r = jsn(u, m) + assert(mpc_ae(r, res)) + + #N[JacobiCN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.9762691700944007312693721148331') - \ + mpf('0.0307203994181623243583169154824')*j + r = jcn(u, m) + #assert r.real.ae(res.real) + #assert r.imag.ae(res.imag) + assert(mpc_ae(r, res)) + + #N[JacobiDN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.99639490163039577560547478589753039') - \ + mpf('0.01346296520008176393432491077244994')*j + r = jdn(u, m) + assert(mpc_ae(r, res)) + mp.dps = 15 + +def test_elliptic_integrals(): + # Test cases from Carlson's paper + mp.dps = 15 + assert elliprd(0,2,1).ae(1.7972103521033883112) + assert elliprd(2,3,4).ae(0.16510527294261053349) + assert elliprd(j,-j,2).ae(0.65933854154219768919) + assert elliprd(0,j,-j).ae(1.2708196271909686299 + 2.7811120159520578777j) + assert elliprd(0,j-1,j).ae(-1.8577235439239060056 - 0.96193450888838559989j) + assert elliprd(-2-j,-j,-1+j).ae(1.8249027393703805305 - 1.2218475784827035855j) + # extra test cases + assert elliprg(0,0,0) == 0 + assert elliprg(0,0,16).ae(2) + assert elliprg(0,16,0).ae(2) + assert elliprg(16,0,0).ae(2) + assert elliprg(1,4,0).ae(1.2110560275684595248036) + assert elliprg(1,0,4).ae(1.2110560275684595248036) + assert elliprg(0,4,1).ae(1.2110560275684595248036) + # should be symmetric -- fixes a bug present in the paper + x,y,z = 1,1j,-1+1j + assert elliprg(x,y,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(x,z,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,x,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,z,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,x,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,y,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + + for n in [5, 15, 30, 60, 100]: + mp.dps = n + assert elliprf(1,2,0).ae('1.3110287771460599052324197949455597068413774757158115814084108519003952935352071251151477664807145467230678763') + assert elliprf(0.5,1,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j,-j,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j-1,j,0).ae(mpc('0.79612586584233913293056938229563057846592264089185680214929401744498956943287031832657642790719940442165621412', + '-1.2138566698364959864300942567386038975419875860741507618279563735753073152507112254567291141460317931258599889')) + assert elliprf(2,3,4).ae('0.58408284167715170669284916892566789240351359699303216166309375305508295130412919665541330837704050454472379308') + assert elliprf(j,-j,2).ae('1.0441445654064360931078658361850779139591660747973017593275012615517220315993723776182276555339288363064476126') + assert elliprf(j-1,j,1-j).ae(mpc('0.93912050218619371196624617169781141161485651998254431830645241993282941057500174238125105410055253623847335313', + '-0.53296252018635269264859303449447908970360344322834582313172115220559316331271520508208025270300138589669326136')) + assert elliprc(0,0.25).ae(+pi) + assert elliprc(2.25,2).ae(+ln2) + assert elliprc(0,j).ae(mpc('1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532', + '-1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532')) + assert elliprc(-j,j).ae(mpc('1.2260849569072198222319655083097718755633725139745941606203839524036426936825652935738621522906572884239069297', + '-0.34471136988767679699935618332997956653521218571295874986708834375026550946053920574015526038040124556716711353')) + assert elliprc(0.25,-2).ae(ln2/3) + assert elliprc(j,-1).ae(mpc('0.77778596920447389875196055840799837589537035343923012237628610795937014001905822029050288316217145443865649819', + '0.1983248499342877364755170948292130095921681309577950696116251029742793455964385947473103628983664877025779304')) + assert elliprj(0,1,2,3).ae('0.77688623778582332014190282640545501102298064276022952731669118325952563819813258230708177398475643634103990878') + assert elliprj(2,3,4,5).ae('0.14297579667156753833233879421985774801466647854232626336218889885463800128817976132826443904216546421431528308') + assert elliprj(2,3,4,-1+j).ae(mpc('0.13613945827770535203521374457913768360237593025944342652613569368333226052158214183059386307242563164036672709', + '-0.38207561624427164249600936454845112611060375760094156571007648297226090050927156176977091273224510621553615189')) + assert elliprj(j,-j,0,2).ae('1.6490011662710884518243257224860232300246792717163891216346170272567376981346412066066050103935109581019055806') + assert elliprj(-1+j,-1-j,1,2).ae('0.94148358841220238083044612133767270187474673547917988681610772381758628963408843935027667916713866133196845063') + assert elliprj(j,-j,0,1-j).ae(mpc('1.8260115229009316249372594065790946657011067182850435297162034335356430755397401849070610280860044610878657501', + '1.2290661908643471500163617732957042849283739403009556715926326841959667290840290081010472716420690899886276961')) + assert elliprj(-1+j,-1-j,1,-3+j).ae(mpc('-0.61127970812028172123588152373622636829986597243716610650831553882054127570542477508023027578037045504958619422', + '-1.0684038390006807880182112972232562745485871763154040245065581157751693730095703406209466903752930797510491155')) + assert elliprj(-1+j,-2-j,-j,-1+j).ae(mpc('1.8249027393703805304622013339009022294368078659619988943515764258335975852685224202567854526307030593012768954', + '-1.2218475784827035854568450371590419833166777535029296025352291308244564398645467465067845461070602841312456831')) + + assert elliprg(0,16,16).ae(+pi) + assert elliprg(2,3,4).ae('1.7255030280692277601061148835701141842692457170470456590515892070736643637303053506944907685301315299153040991') + assert elliprg(0,j,-j).ae('0.42360654239698954330324956174109581824072295516347109253028968632986700241706737986160014699730561497106114281') + assert elliprg(j-1,j,0).ae(mpc('0.44660591677018372656731970402124510811555212083508861036067729944477855594654762496407405328607219895053798354', + '0.70768352357515390073102719507612395221369717586839400605901402910893345301718731499237159587077682267374159282')) + assert elliprg(-j,j-1,j).ae(mpc('0.36023392184473309033675652092928695596803358846377334894215349632203382573844427952830064383286995172598964266', + '0.40348623401722113740956336997761033878615232917480045914551915169013722542827052849476969199578321834819903921')) + assert elliprg(0, mpf('0.0796'), 4).ae('1.0284758090288040009838871385180217366569777284430590125081211090574701293154645750017813190805144572673802094') + mp.dps = 15 + + # more test cases for the branch of ellippi / elliprj + assert elliprj(-1-0.5j, -10-6j, -10-3j, -5+10j).ae(0.128470516743927699 + 0.102175950778504625j, abs_eps=1e-8) + assert elliprj(1.987, 4.463 - 1.614j, 0, -3.965).ae(-0.341575118513811305 - 0.394703757004268486j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.632j, 1.654, -0.9609).ae(-1.14735199581485639 - 0.134450158867472264j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037-0.632j, 1.654, -0.9609).ae(1.758765901861727 - 0.161002343366626892j, abs_eps=1e-5) + assert elliprj(0.3068, -4.037+0.0632j, 1.654, -0.9609).ae(-1.17157627949475577 - 0.069182614173988811j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.00632j, 1.654, -0.9609).ae(-1.17337595670549633 - 0.0623069224526925j, abs_eps=1e-8) + + # these require accurate integration + assert elliprj(0.3068, -4.037-0.0632j, 1.654, -0.9609).ae(1.77940452391261626 + 0.0388711305592447234j) + assert elliprj(0.3068, -4.037-0.00632j, 1.654, -0.9609).ae(1.77806722756403055 + 0.0592749824572262329j) + # issue #571 + assert ellippi(2.1 + 0.94j, 2.3 + 0.98j, 2.5 + 0.01j).ae(-0.40652414240811963438 + 2.1547659461404749309j) + + assert ellippi(2.0-1.0j, 2.0+1.0j).ae(1.8578723151271115 - 1.18642180609983531j) + assert ellippi(2.0-0.5j, 0.5+1.0j).ae(0.936761970766645807 - 1.61876787838890786j) + assert ellippi(2.0, 1.0+1.0j).ae(0.999881420735506708 - 2.4139272867045391j) + assert ellippi(2.0+1.0j, 2.0-1.0j).ae(1.8578723151271115 + 1.18642180609983531j) + assert ellippi(2.0+1.0j, 2.0).ae(2.78474654927885845 + 2.02204728966993314j) + +def test_issue_238(): + assert isnan(qfrom(m=nan)) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_fp.py b/phivenv/Lib/site-packages/mpmath/tests/test_fp.py new file mode 100644 index 0000000000000000000000000000000000000000..99f3759c3071c4d55e0481472f3d16c1f5df1fef --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_fp.py @@ -0,0 +1,1671 @@ +""" +Easy-to-use test-generating code: + +cases = ''' +exp 2.25 +log 2.25 +''' + +from mpmath import * +mp.dps = 20 +for test in cases.splitlines(): + if not test: + continue + words = test.split() + fname = words[0] + args = words[1:] + argstr = ", ".join(args) + testline = "%s(%s)" % (fname, argstr) + ans = str(eval(testline)) + print " assert ae(fp.%s, %s)" % (testline, ans) + +""" + +from mpmath import fp + +def ae(x, y, tol=1e-12): + if x == y: + return True + return abs(x-y) <= tol*abs(y) + +def test_conj(): + assert fp.conj(4) == 4 + assert fp.conj(3+4j) == 3-4j + assert fp.fdot([1,2],[3,2+1j], conjugate=True) == 7-2j + +def test_fp_number_parts(): + assert ae(fp.arg(3), 0.0) + assert ae(fp.arg(-3), 3.1415926535897932385) + assert ae(fp.arg(3j), 1.5707963267948966192) + assert ae(fp.arg(-3j), -1.5707963267948966192) + assert ae(fp.arg(2+3j), 0.98279372324732906799) + assert ae(fp.arg(-1-1j), -2.3561944901923449288) + assert ae(fp.re(2.5), 2.5) + assert ae(fp.re(2.5+3j), 2.5) + assert ae(fp.im(2.5), 0.0) + assert ae(fp.im(2.5+3j), 3.0) + assert ae(fp.floor(2.5), 2.0) + assert ae(fp.floor(2), 2.0) + assert ae(fp.floor(2.0+0j), (2.0 + 0.0j)) + assert ae(fp.floor(-1.5-0.5j), (-2.0 - 1.0j)) + assert ae(fp.ceil(2.5), 3.0) + assert ae(fp.ceil(2), 2.0) + assert ae(fp.ceil(2.0+0j), (2.0 + 0.0j)) + assert ae(fp.ceil(-1.5-0.5j), (-1.0 + 0.0j)) + +def test_fp_cospi_sinpi(): + assert ae(fp.sinpi(0), 0.0) + assert ae(fp.sinpi(0.25), 0.7071067811865475244) + assert ae(fp.sinpi(0.5), 1.0) + assert ae(fp.sinpi(0.75), 0.7071067811865475244) + assert ae(fp.sinpi(1), 0.0) + assert ae(fp.sinpi(1.25), -0.7071067811865475244) + assert ae(fp.sinpi(1.5), -1.0) + assert ae(fp.sinpi(1.75), -0.7071067811865475244) + assert ae(fp.sinpi(2), 0.0) + assert ae(fp.sinpi(2.25), 0.7071067811865475244) + assert ae(fp.sinpi(0+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.sinpi(0.25+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(0.5+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.sinpi(0.75+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.sinpi(1+3j), (0.0 - 6195.8238636085899556j)) + assert ae(fp.sinpi(1.25+3j), (-4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.sinpi(1.5+3j), (-6195.8239443081075259 + 0.0j)) + assert ae(fp.sinpi(1.75+3j), (-4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(2+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.sinpi(2.25+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(-0.75), -0.7071067811865475244) + assert ae(fp.sinpi(-1e-10), -3.1415926535897933529e-10) + assert ae(fp.sinpi(1e-10), 3.1415926535897933529e-10) + assert ae(fp.sinpi(1e-10+1e-10j), (3.141592653589793353e-10 + 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(1e-10-1e-10j), (3.141592653589793353e-10 - 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(-1e-10+1e-10j), (-3.141592653589793353e-10 + 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(-1e-10-1e-10j), (-3.141592653589793353e-10 - 3.1415926535897933528e-10j)) + assert ae(fp.cospi(0), 1.0) + assert ae(fp.cospi(0.25), 0.7071067811865475244) + assert ae(fp.cospi(0.5), 0.0) + assert ae(fp.cospi(0.75), -0.7071067811865475244) + assert ae(fp.cospi(1), -1.0) + assert ae(fp.cospi(1.25), -0.7071067811865475244) + assert ae(fp.cospi(1.5), 0.0) + assert ae(fp.cospi(1.75), 0.7071067811865475244) + assert ae(fp.cospi(2), 1.0) + assert ae(fp.cospi(2.25), 0.7071067811865475244) + assert ae(fp.cospi(0+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(0.25+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(0.5+3j), (0.0 - 6195.8238636085899556j)) + assert ae(fp.cospi(0.75+3j), (-4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(1+3j), (-6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(1.25+3j), (-4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.cospi(1.5+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.cospi(1.75+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.cospi(2+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(2.25+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(-0.75), -0.7071067811865475244) + assert ae(fp.sinpi(-0.7), -0.80901699437494750611) + assert ae(fp.cospi(-0.7), -0.5877852522924730163) + assert ae(fp.cospi(-3+2j), (-267.74676148374822225 + 0.0j)) + assert ae(fp.sinpi(-3+2j), (0.0 - 267.74489404101651426j)) + assert ae(fp.sinpi(-0.7+2j), (-216.6116802292079471 - 157.37650009392034693j)) + assert ae(fp.cospi(-0.7+2j), (-157.37759774921754565 + 216.61016943630197336j)) + +def test_fp_expj(): + assert ae(fp.expj(0), (1.0 + 0.0j)) + assert ae(fp.expj(1), (0.5403023058681397174 + 0.84147098480789650665j)) + assert ae(fp.expj(2), (-0.416146836547142387 + 0.9092974268256816954j)) + assert ae(fp.expj(0.75), (0.73168886887382088631 + 0.68163876002333416673j)) + assert ae(fp.expj(2+3j), (-0.020718731002242879378 + 0.045271253156092975488j)) + assert ae(fp.expjpi(0), (1.0 + 0.0j)) + assert ae(fp.expjpi(1), (-1.0 + 0.0j)) + assert ae(fp.expjpi(2), (1.0 + 0.0j)) + assert ae(fp.expjpi(0.75), (-0.7071067811865475244 + 0.7071067811865475244j)) + assert ae(fp.expjpi(2+3j), (0.000080699517570304599239 + 0.0j)) + +def test_fp_bernoulli(): + assert ae(fp.bernoulli(0), 1.0) + assert ae(fp.bernoulli(1), -0.5) + assert ae(fp.bernoulli(2), 0.16666666666666666667) + assert ae(fp.bernoulli(10), 0.075757575757575757576) + assert ae(fp.bernoulli(11), 0.0) + +def test_fp_gamma(): + assert ae(fp.gamma(1), 1.0) + assert ae(fp.gamma(1.5), 0.88622692545275801365) + assert ae(fp.gamma(10), 362880.0) + assert ae(fp.gamma(-0.5), -3.5449077018110320546) + assert ae(fp.gamma(-7.1), 0.0016478244570263333622) + assert ae(fp.gamma(12.3), 83385367.899970000963) + assert ae(fp.gamma(2+0j), (1.0 + 0.0j)) + assert ae(fp.gamma(-2.5+0j), (-0.94530872048294188123 + 0.0j)) + assert ae(fp.gamma(3+4j), (0.0052255384713692141947 - 0.17254707929430018772j)) + assert ae(fp.gamma(-3-4j), (0.00001460997305874775607 - 0.000020760733311509070396j)) + assert ae(fp.fac(0), 1.0) + assert ae(fp.fac(1), 1.0) + assert ae(fp.fac(20), 2432902008176640000.0) + assert ae(fp.fac(-3.5), -0.94530872048294188123) + assert ae(fp.fac(2+3j), (-0.44011340763700171113 - 0.06363724312631702183j)) + assert ae(fp.loggamma(1.0), 0.0) + assert ae(fp.loggamma(2.0), 0.0) + assert ae(fp.loggamma(3.0), 0.69314718055994530942) + assert ae(fp.loggamma(7.25), 7.0521854507385394449) + assert ae(fp.loggamma(1000.0), 5905.2204232091812118) + assert ae(fp.loggamma(1e50), 1.1412925464970229298e+52) + assert ae(fp.loggamma(1e25+1e25j), (5.6125802751733671621e+26 + 5.7696599078528568383e+26j)) + assert ae(fp.loggamma(3+4j), (-1.7566267846037841105 + 4.7426644380346579282j)) + assert ae(fp.loggamma(-0.5), (1.2655121234846453965 - 3.1415926535897932385j)) + assert ae(fp.loggamma(-1.25), (1.3664317612369762346 - 6.2831853071795864769j)) + assert ae(fp.loggamma(-2.75), (0.0044878975359557733115 - 9.4247779607693797154j)) + assert ae(fp.loggamma(-3.5), (-1.3090066849930420464 - 12.566370614359172954j)) + assert ae(fp.loggamma(-4.5), (-2.8130840817693161197 - 15.707963267948966192j)) + assert ae(fp.loggamma(-2+3j), (-6.776523813485657093 - 4.568791367260286402j)) + assert ae(fp.loggamma(-1000.3), (-5912.8440347785205041 - 3144.7342462433830317j)) + assert ae(fp.loggamma(-100-100j), (-632.35117666833135562 - 158.37641469650352462j)) + assert ae(fp.loggamma(1e-10), 23.025850929882735237) + assert ae(fp.loggamma(-1e-10), (23.02585092999817837 - 3.1415926535897932385j)) + assert ae(fp.loggamma(1e-10j), (23.025850929940456804 - 1.5707963268526181857j)) + assert ae(fp.loggamma(1e-10j-1e-10), (22.679277339718205716 - 2.3561944902500664954j)) + +def test_fp_psi(): + assert ae(fp.psi(0, 3.7), 1.1671535393615114409) + assert ae(fp.psi(0, 0.5), -1.9635100260214234794) + assert ae(fp.psi(0, 1), -0.57721566490153286061) + assert ae(fp.psi(0, -2.5), 1.1031566406452431872) + assert ae(fp.psi(0, 12.9), 2.5179671503279156347) + assert ae(fp.psi(0, 100), 4.6001618527380874002) + assert ae(fp.psi(0, 2500.3), 7.8239660143238547877) + assert ae(fp.psi(0, 1e40), 92.103403719761827391) + assert ae(fp.psi(0, 1e200), 460.51701859880913677) + assert ae(fp.psi(0, 3.7+0j), (1.1671535393615114409 + 0.0j)) + assert ae(fp.psi(1, 3), 0.39493406684822643647) + assert ae(fp.psi(3, 2+3j), (-0.05383196209159972116 + 0.0076890935247364805218j)) + assert ae(fp.psi(4, -0.5+1j), (1.2719531355492328195 - 18.211833410936276774j)) + assert ae(fp.harmonic(0), 0.0) + assert ae(fp.harmonic(1), 1.0) + assert ae(fp.harmonic(2), 1.5) + assert ae(fp.harmonic(100), 5.1873775176396202608) + assert ae(fp.harmonic(-2.5), 1.2803723055467760478) + assert ae(fp.harmonic(2+3j), (1.9390425294578375875 + 0.87336044981834544043j)) + assert ae(fp.harmonic(-5-4j), (2.3725754822349437733 - 2.4160904444801621j)) + +def test_fp_zeta(): + assert ae(fp.zeta(1e100), 1.0) + assert ae(fp.zeta(3), 1.2020569031595942854) + assert ae(fp.zeta(2+0j), (1.6449340668482264365 + 0.0j)) + assert ae(fp.zeta(0.93), -13.713619351638164784) + assert ae(fp.zeta(1.74), 1.9796863545771774095) + assert ae(fp.zeta(0.0), -0.5) + assert ae(fp.zeta(-1.0), -0.083333333333333333333) + assert ae(fp.zeta(-2.0), 0.0) + assert ae(fp.zeta(-3.0), 0.0083333333333333333333) + assert ae(fp.zeta(-500.0), 0.0) + assert ae(fp.zeta(-7.4), 0.0036537321227995882447) + assert ae(fp.zeta(2.1), 1.5602165335033620158) + assert ae(fp.zeta(26.9), 1.0000000079854809935) + assert ae(fp.zeta(26), 1.0000000149015548284) + assert ae(fp.zeta(27), 1.0000000074507117898) + assert ae(fp.zeta(28), 1.0000000037253340248) + assert ae(fp.zeta(27.1), 1.000000006951755045) + assert ae(fp.zeta(32.7), 1.0000000001433243232) + assert ae(fp.zeta(100), 1.0) + assert ae(fp.altzeta(3.5), 0.92755357777394803511) + assert ae(fp.altzeta(1), 0.69314718055994530942) + assert ae(fp.altzeta(2), 0.82246703342411321824) + assert ae(fp.altzeta(0), 0.5) + assert ae(fp.zeta(-2+3j, 1), (0.13297115587929864827 + 0.12305330040458776494j)) + assert ae(fp.zeta(-2+3j, 5), (18.384866151867576927 - 11.377015110597711009j)) + assert ae(fp.zeta(1.0000000001), 9999999173.1735741337) + assert ae(fp.zeta(0.9999999999), -9999999172.0191428039) + assert ae(fp.zeta(1+0.000000001j), (0.57721566490153286061 - 999999999.99999993765j)) + assert ae(fp.primezeta(2.5+4j), (-0.16922458243438033385 - 0.010847965298387727811j)) + assert ae(fp.primezeta(4), 0.076993139764246844943) + assert ae(fp.riemannr(3.7), 2.3034079839110855717) + assert ae(fp.riemannr(8), 3.9011860449341499474) + assert ae(fp.riemannr(3+4j), (2.2369653314259991796 + 1.6339943856990281694j)) + +def test_fp_hyp2f1(): + assert ae(fp.hyp2f1(1, (3,2), 3.25, 5.0), (-0.46600275923108143059 - 0.74393667908854842325j)) + assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 5.0), (-5.9208875603806515987 - 2.3813557707889590686j)) + assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 2+3j), (0.17174552030925080445 + 0.19589781970539389999j)) + +def test_fp_erf(): + assert fp.erf(2) == fp.erf(2.0) == fp.erf(2.0+0.0j) + assert fp.erf(fp.inf) == 1.0 + assert fp.erf(fp.ninf) == -1.0 + assert ae(fp.erf(0), 0.0) + assert ae(fp.erf(-0), -0.0) + assert ae(fp.erf(0.3), 0.32862675945912741619) + assert ae(fp.erf(-0.3), -0.32862675945912741619) + assert ae(fp.erf(0.9), 0.79690821242283213966) + assert ae(fp.erf(-0.9), -0.79690821242283213966) + assert ae(fp.erf(1.0), 0.84270079294971486934) + assert ae(fp.erf(-1.0), -0.84270079294971486934) + assert ae(fp.erf(1.1), 0.88020506957408172966) + assert ae(fp.erf(-1.1), -0.88020506957408172966) + assert ae(fp.erf(8.5), 1.0) + assert ae(fp.erf(-8.5), -1.0) + assert ae(fp.erf(9.1), 1.0) + assert ae(fp.erf(-9.1), -1.0) + assert ae(fp.erf(20.0), 1.0) + assert ae(fp.erf(-20.0), -1.0) + assert ae(fp.erf(10000.0), 1.0) + assert ae(fp.erf(-10000.0), -1.0) + assert ae(fp.erf(1e+50), 1.0) + assert ae(fp.erf(-1e+50), -1.0) + assert ae(fp.erf(1j), 1.650425758797542876j) + assert ae(fp.erf(-1j), -1.650425758797542876j) + assert ae(fp.erf((2+3j)), (-20.829461427614568389 + 8.6873182714701631444j)) + assert ae(fp.erf(-(2+3j)), -(-20.829461427614568389 + 8.6873182714701631444j)) + assert ae(fp.erf((8+9j)), (-1072004.2525062051158 + 364149.91954310255423j)) + assert ae(fp.erf(-(8+9j)), -(-1072004.2525062051158 + 364149.91954310255423j)) + assert fp.erfc(fp.inf) == 0.0 + assert fp.erfc(fp.ninf) == 2.0 + assert fp.erfc(0) == 1 + assert fp.erfc(-0.0) == 1 + assert fp.erfc(0+0j) == 1 + assert ae(fp.erfc(0.3), 0.67137324054087258381) + assert ae(fp.erfc(-0.3), 1.3286267594591274162) + assert ae(fp.erfc(0.9), 0.20309178757716786034) + assert ae(fp.erfc(-0.9), 1.7969082124228321397) + assert ae(fp.erfc(1.0), 0.15729920705028513066) + assert ae(fp.erfc(-1.0), 1.8427007929497148693) + assert ae(fp.erfc(1.1), 0.11979493042591827034) + assert ae(fp.erfc(-1.1), 1.8802050695740817297) + assert ae(fp.erfc(8.5), 2.7623240713337714461e-33) + assert ae(fp.erfc(-8.5), 2.0) + assert ae(fp.erfc(9.1), 6.6969004279886077452e-38) + assert ae(fp.erfc(-9.1), 2.0) + assert ae(fp.erfc(20.0), 5.3958656116079009289e-176) + assert ae(fp.erfc(-20.0), 2.0) + assert ae(fp.erfc(10000.0), 0.0) + assert ae(fp.erfc(-10000.0), 2.0) + assert ae(fp.erfc(1e+50), 0.0) + assert ae(fp.erfc(-1e+50), 2.0) + assert ae(fp.erfc(1j), (1.0 - 1.650425758797542876j)) + assert ae(fp.erfc(-1j), (1.0 + 1.650425758797542876j)) + assert ae(fp.erfc((2+3j)), (21.829461427614568389 - 8.6873182714701631444j), 1e-13) + assert ae(fp.erfc(-(2+3j)), (-19.829461427614568389 + 8.6873182714701631444j), 1e-13) + assert ae(fp.erfc((8+9j)), (1072005.2525062051158 - 364149.91954310255423j)) + assert ae(fp.erfc(-(8+9j)), (-1072003.2525062051158 + 364149.91954310255423j)) + assert ae(fp.erfc(20+0j), (5.3958656116079009289e-176 + 0.0j)) + +def test_fp_lambertw(): + assert ae(fp.lambertw(0.0), 0.0) + assert ae(fp.lambertw(1.0), 0.567143290409783873) + assert ae(fp.lambertw(7.5), 1.5662309537823875394) + assert ae(fp.lambertw(-0.25), -0.35740295618138890307) + assert ae(fp.lambertw(-10.0), (1.3699809685212708156 + 2.140194527074713196j)) + assert ae(fp.lambertw(0+0j), (0.0 + 0.0j)) + assert ae(fp.lambertw(4+0j), (1.2021678731970429392 + 0.0j)) + assert ae(fp.lambertw(1000.5), 5.2500227450408980127) + assert ae(fp.lambertw(1e100), 224.84310644511850156) + assert ae(fp.lambertw(-1000.0), (5.1501630246362515223 + 2.6641981432905204596j)) + assert ae(fp.lambertw(1e-10), 9.9999999990000003645e-11) + assert ae(fp.lambertw(1e-10j), (1.0000000000000000728e-20 + 1.0000000000000000364e-10j)) + assert ae(fp.lambertw(3+4j), (1.2815618061237758782 + 0.53309522202097107131j)) + assert ae(fp.lambertw(-3-4j), (1.0750730665692549276 - 1.3251023817343588823j)) + assert ae(fp.lambertw(10000+1000j), (7.2361526563371602186 + 0.087567810943839352034j)) + assert ae(fp.lambertw(0.0, -1), -fp.inf) + assert ae(fp.lambertw(1.0, -1), (-1.5339133197935745079 - 4.3751851530618983855j)) + assert ae(fp.lambertw(7.5, -1), (0.44125668415098614999 - 4.8039842008452390179j)) + assert ae(fp.lambertw(-0.25, -1), -2.1532923641103496492) + assert ae(fp.lambertw(-10.0, -1), (1.3699809685212708156 - 2.140194527074713196j)) + assert ae(fp.lambertw(0+0j, -1), -fp.inf) + assert ae(fp.lambertw(4+0j, -1), (-0.15730793189620765317 - 4.6787800704666656212j)) + assert ae(fp.lambertw(1000.5, -1), (4.9153765415404024736 - 5.4465682700815159569j)) + assert ae(fp.lambertw(1e100, -1), (224.84272130101601052 - 6.2553713838167244141j)) + assert ae(fp.lambertw(-1000.0, -1), (5.1501630246362515223 - 2.6641981432905204596j)) + assert ae(fp.lambertw(1e-10, -1), (-26.303186778379041521 - 3.2650939117038283975j)) + assert ae(fp.lambertw(1e-10j, -1), (-26.297238779529035028 - 1.6328071613455765135j)) + assert ae(fp.lambertw(3+4j, -1), (0.25856740686699741676 - 3.8521166861614355895j)) + assert ae(fp.lambertw(-3-4j, -1), (-0.32028750204310768396 - 6.8801677192091972343j)) + assert ae(fp.lambertw(10000+1000j, -1), (7.0255308742285435567 - 5.5177506835734067601j)) + assert ae(fp.lambertw(0.0, 2), -fp.inf) + assert ae(fp.lambertw(1.0, 2), (-2.4015851048680028842 + 10.776299516115070898j)) + assert ae(fp.lambertw(7.5, 2), (-0.38003357962843791529 + 10.960916473368746184j)) + assert ae(fp.lambertw(-0.25, 2), (-4.0558735269061511898 + 13.852334658567271386j)) + assert ae(fp.lambertw(-10.0, 2), (-0.34479123764318858696 + 14.112740596763592363j)) + assert ae(fp.lambertw(0+0j, 2), -fp.inf) + assert ae(fp.lambertw(4+0j, 2), (-1.0070343323804262788 + 10.903476551861683082j)) + assert ae(fp.lambertw(1000.5, 2), (4.4076185165459395295 + 11.365524591091402177j)) + assert ae(fp.lambertw(1e100, 2), (224.84156762724875878 + 12.510785262632255672j)) + assert ae(fp.lambertw(-1000.0, 2), (4.1984245610246530756 + 14.420478573754313845j)) + assert ae(fp.lambertw(1e-10, 2), (-26.362258095445866488 + 9.7800247407031482519j)) + assert ae(fp.lambertw(1e-10j, 2), (-26.384250801683084252 + 11.403535950607739763j)) + assert ae(fp.lambertw(3+4j, 2), (-0.86554679943333993562 + 11.849956798331992027j)) + assert ae(fp.lambertw(-3-4j, 2), (-0.55792273874679112639 + 8.7173627024159324811j)) + assert ae(fp.lambertw(10000+1000j, 2), (6.6223802254585662734 + 11.61348646825020766j)) + +def test_fp_stress_ei_e1(): + # Can be tightened on recent Pythons with more accurate math/cmath + ATOL = 1e-13 + PTOL = 1e-12 + v = fp.e1(1.1641532182693481445e-10) + assert ae(v, 22.296641293693077672, tol=ATOL) + assert type(v) is float + v = fp.e1(0.25) + assert ae(v, 1.0442826344437381945, tol=ATOL) + assert type(v) is float + v = fp.e1(1.0) + assert ae(v, 0.21938393439552027368, tol=ATOL) + assert type(v) is float + v = fp.e1(2.0) + assert ae(v, 0.048900510708061119567, tol=ATOL) + assert type(v) is float + v = fp.e1(5.0) + assert ae(v, 0.0011482955912753257973, tol=ATOL) + assert type(v) is float + v = fp.e1(20.0) + assert ae(v, 9.8355252906498816904e-11, tol=ATOL) + assert type(v) is float + v = fp.e1(30.0) + assert ae(v, 3.0215520106888125448e-15, tol=ATOL) + assert type(v) is float + v = fp.e1(40.0) + assert ae(v, 1.0367732614516569722e-19, tol=ATOL) + assert type(v) is float + v = fp.e1(50.0) + assert ae(v, 3.7832640295504590187e-24, tol=ATOL) + assert type(v) is float + v = fp.e1(80.0) + assert ae(v, 2.2285432586884729112e-37, tol=ATOL) + assert type(v) is float + v = fp.e1((1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (22.296641293693077672 + 0.0j), tol=ATOL) + assert ae(v.real, 22.296641293693077672, tol=PTOL) + assert v.imag == 0 + v = fp.e1((0.25 + 0.0j)) + assert ae(v, (1.0442826344437381945 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0442826344437381945, tol=PTOL) + assert v.imag == 0 + v = fp.e1((1.0 + 0.0j)) + assert ae(v, (0.21938393439552027368 + 0.0j), tol=ATOL) + assert ae(v.real, 0.21938393439552027368, tol=PTOL) + assert v.imag == 0 + v = fp.e1((2.0 + 0.0j)) + assert ae(v, (0.048900510708061119567 + 0.0j), tol=ATOL) + assert ae(v.real, 0.048900510708061119567, tol=PTOL) + assert v.imag == 0 + v = fp.e1((5.0 + 0.0j)) + assert ae(v, (0.0011482955912753257973 + 0.0j), tol=ATOL) + assert ae(v.real, 0.0011482955912753257973, tol=PTOL) + assert v.imag == 0 + v = fp.e1((20.0 + 0.0j)) + assert ae(v, (9.8355252906498816904e-11 + 0.0j), tol=ATOL) + assert ae(v.real, 9.8355252906498816904e-11, tol=PTOL) + assert v.imag == 0 + v = fp.e1((30.0 + 0.0j)) + assert ae(v, (3.0215520106888125448e-15 + 0.0j), tol=ATOL) + assert ae(v.real, 3.0215520106888125448e-15, tol=PTOL) + assert v.imag == 0 + v = fp.e1((40.0 + 0.0j)) + assert ae(v, (1.0367732614516569722e-19 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0367732614516569722e-19, tol=PTOL) + assert v.imag == 0 + v = fp.e1((50.0 + 0.0j)) + assert ae(v, (3.7832640295504590187e-24 + 0.0j), tol=ATOL) + assert ae(v.real, 3.7832640295504590187e-24, tol=PTOL) + assert v.imag == 0 + v = fp.e1((80.0 + 0.0j)) + assert ae(v, (2.2285432586884729112e-37 + 0.0j), tol=ATOL) + assert ae(v.real, 2.2285432586884729112e-37, tol=PTOL) + assert v.imag == 0 + v = fp.e1((4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (20.880034622014215597 - 0.24497866301044883237j), tol=ATOL) + assert ae(v.real, 20.880034622014215597, tol=PTOL) + assert ae(v.imag, -0.24497866301044883237, tol=PTOL) + v = fp.e1((1.0 + 0.25j)) + assert ae(v, (0.19731063945004229095 - 0.087366045774299963672j), tol=ATOL) + assert ae(v.real, 0.19731063945004229095, tol=PTOL) + assert ae(v.imag, -0.087366045774299963672, tol=PTOL) + v = fp.e1((4.0 + 1.0j)) + assert ae(v, (0.0013106173980145506944 - 0.0034542480199350626699j), tol=ATOL) + assert ae(v.real, 0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, -0.0034542480199350626699, tol=PTOL) + v = fp.e1((8.0 + 2.0j)) + assert ae(v, (-0.000022278049065270225945 - 0.000029191940456521555288j), tol=ATOL) + assert ae(v.real, -0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, -0.000029191940456521555288, tol=PTOL) + v = fp.e1((20.0 + 5.0j)) + assert ae(v, (4.7711374515765346894e-11 + 8.2902652405126947359e-11j), tol=ATOL) + assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, 8.2902652405126947359e-11, tol=PTOL) + v = fp.e1((80.0 + 20.0j)) + assert ae(v, (3.8353473865788235787e-38 - 2.129247592349605139e-37j), tol=ATOL) + assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, -2.129247592349605139e-37, tol=PTOL) + v = fp.e1((120.0 + 30.0j)) + assert ae(v, (2.3836002337480334716e-55 + 5.6704043587126198306e-55j), tol=ATOL) + assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, 5.6704043587126198306e-55, tol=PTOL) + v = fp.e1((160.0 + 40.0j)) + assert ae(v, (-1.6238022898654510661e-72 - 1.104172355572287367e-72j), tol=ATOL) + assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, -1.104172355572287367e-72, tol=PTOL) + v = fp.e1((200.0 + 50.0j)) + assert ae(v, (6.6800061461666228487e-90 + 1.4473816083541016115e-91j), tol=ATOL) + assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, 1.4473816083541016115e-91, tol=PTOL) + v = fp.e1((320.0 + 80.0j)) + assert ae(v, (4.2737871527778786157e-143 + 3.1789935525785660314e-142j), tol=ATOL) + assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, 3.1789935525785660314e-142, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703413105017 - 0.7853981632810329878j), tol=ATOL) + assert ae(v.real, 21.950067703413105017, tol=PTOL) + assert ae(v.imag, -0.7853981632810329878, tol=PTOL) + v = fp.e1((0.25 + 0.25j)) + assert ae(v, (0.71092525792923287894 - 0.56491812441304194711j), tol=ATOL) + assert ae(v.real, 0.71092525792923287894, tol=PTOL) + assert ae(v.imag, -0.56491812441304194711, tol=PTOL) + v = fp.e1((1.0 + 1.0j)) + assert ae(v, (0.00028162445198141832551 - 0.17932453503935894015j), tol=ATOL) + assert ae(v.real, 0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, -0.17932453503935894015, tol=PTOL) + v = fp.e1((2.0 + 2.0j)) + assert ae(v, (-0.033767089606562004246 - 0.018599414169750541925j), tol=ATOL) + assert ae(v.real, -0.033767089606562004246, tol=PTOL) + assert ae(v.imag, -0.018599414169750541925, tol=PTOL) + v = fp.e1((5.0 + 5.0j)) + assert ae(v, (0.0007266506660356393891 + 0.00047102780163522245054j), tol=ATOL) + assert ae(v.real, 0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, 0.00047102780163522245054, tol=PTOL) + v = fp.e1((20.0 + 20.0j)) + assert ae(v, (-2.3824537449367396579e-11 - 6.6969873156525615158e-11j), tol=ATOL) + assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, -6.6969873156525615158e-11, tol=PTOL) + v = fp.e1((30.0 + 30.0j)) + assert ae(v, (1.7316045841744061617e-15 + 1.3065678019487308689e-15j), tol=ATOL) + assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, 1.3065678019487308689e-15, tol=PTOL) + v = fp.e1((40.0 + 40.0j)) + assert ae(v, (-7.4001043002899232182e-20 - 4.991847855336816304e-21j), tol=ATOL) + assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, -4.991847855336816304e-21, tol=PTOL) + v = fp.e1((50.0 + 50.0j)) + assert ae(v, (2.3566128324644641219e-24 - 1.3188326726201614778e-24j), tol=ATOL) + assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, -1.3188326726201614778e-24, tol=PTOL) + v = fp.e1((80.0 + 80.0j)) + assert ae(v, (9.8279750572186526673e-38 + 1.243952841288868831e-37j), tol=ATOL) + assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, 1.243952841288868831e-37, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621664969632 - 1.3258176632023711778j), tol=ATOL) + assert ae(v.real, 20.880034621664969632, tol=PTOL) + assert ae(v.imag, -1.3258176632023711778, tol=PTOL) + v = fp.e1((0.25 + 1.0j)) + assert ae(v, (-0.16868306393667788761 - 0.4858011885947426971j), tol=ATOL) + assert ae(v.real, -0.16868306393667788761, tol=PTOL) + assert ae(v.imag, -0.4858011885947426971, tol=PTOL) + v = fp.e1((1.0 + 4.0j)) + assert ae(v, (0.03373591813926547318 + 0.073523452241083821877j), tol=ATOL) + assert ae(v.real, 0.03373591813926547318, tol=PTOL) + assert ae(v.imag, 0.073523452241083821877, tol=PTOL) + v = fp.e1((2.0 + 8.0j)) + assert ae(v, (-0.015392833434733785143 - 0.0031747121557605415914j), tol=ATOL) + assert ae(v.real, -0.015392833434733785143, tol=PTOL) + assert ae(v.imag, -0.0031747121557605415914, tol=PTOL) + v = fp.e1((5.0 + 20.0j)) + assert ae(v, (-0.00024419662286542966525 - 0.00021008322966152755674j), tol=ATOL) + assert ae(v.real, -0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, -0.00021008322966152755674, tol=PTOL) + v = fp.e1((20.0 + 80.0j)) + assert ae(v, (2.3255552781051330088e-11 + 8.9463918891349438007e-12j), tol=ATOL) + assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, 8.9463918891349438007e-12, tol=PTOL) + v = fp.e1((30.0 + 120.0j)) + assert ae(v, (-2.7068919097124652332e-16 - 7.0477762411705130239e-16j), tol=ATOL) + assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, -7.0477762411705130239e-16, tol=PTOL) + v = fp.e1((40.0 + 160.0j)) + assert ae(v, (-1.1695597827678024687e-20 + 2.2907401455645736661e-20j), tol=ATOL) + assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, 2.2907401455645736661e-20, tol=PTOL) + v = fp.e1((50.0 + 200.0j)) + assert ae(v, (9.0323746914410162531e-25 - 2.3950601790033530935e-25j), tol=ATOL) + assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, -2.3950601790033530935e-25, tol=PTOL) + v = fp.e1((80.0 + 320.0j)) + assert ae(v, (3.4819106748728063576e-38 - 4.215653005615772724e-38j), tol=ATOL) + assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, -4.215653005615772724e-38, tol=PTOL) + v = fp.e1((0.0 + 1.1641532182693481445e-10j)) + assert ae(v, (22.29664129357666235 - 1.5707963266784812974j), tol=ATOL) + assert ae(v.real, 22.29664129357666235, tol=PTOL) + assert ae(v.imag, -1.5707963266784812974, tol=PTOL) + v = fp.e1((0.0 + 0.25j)) + assert ae(v, (0.82466306258094565309 - 1.3216627564751394551j), tol=ATOL) + assert ae(v.real, 0.82466306258094565309, tol=PTOL) + assert ae(v.imag, -1.3216627564751394551, tol=PTOL) + v = fp.e1((0.0 + 1.0j)) + assert ae(v, (-0.33740392290096813466 - 0.62471325642771360429j), tol=ATOL) + assert ae(v.real, -0.33740392290096813466, tol=PTOL) + assert ae(v.imag, -0.62471325642771360429, tol=PTOL) + v = fp.e1((0.0 + 2.0j)) + assert ae(v, (-0.4229808287748649957 + 0.034616650007798229345j), tol=ATOL) + assert ae(v.real, -0.4229808287748649957, tol=PTOL) + assert ae(v.imag, 0.034616650007798229345, tol=PTOL) + v = fp.e1((0.0 + 5.0j)) + assert ae(v, (0.19002974965664387862 - 0.020865081850222481957j), tol=ATOL) + assert ae(v.real, 0.19002974965664387862, tol=PTOL) + assert ae(v.imag, -0.020865081850222481957, tol=PTOL) + v = fp.e1((0.0 + 20.0j)) + assert ae(v, (-0.04441982084535331654 - 0.022554625751456779068j), tol=ATOL) + assert ae(v.real, -0.04441982084535331654, tol=PTOL) + assert ae(v.imag, -0.022554625751456779068, tol=PTOL) + v = fp.e1((0.0 + 30.0j)) + assert ae(v, (0.033032417282071143779 - 0.0040397867645455082476j), tol=ATOL) + assert ae(v.real, 0.033032417282071143779, tol=PTOL) + assert ae(v.imag, -0.0040397867645455082476, tol=PTOL) + v = fp.e1((0.0 + 40.0j)) + assert ae(v, (-0.019020007896208766962 + 0.016188792559887887544j), tol=ATOL) + assert ae(v.real, -0.019020007896208766962, tol=PTOL) + assert ae(v.imag, 0.016188792559887887544, tol=PTOL) + v = fp.e1((0.0 + 50.0j)) + assert ae(v, (0.0056283863241163054402 - 0.019179254308960724503j), tol=ATOL) + assert ae(v.real, 0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, -0.019179254308960724503, tol=PTOL) + v = fp.e1((0.0 + 80.0j)) + assert ae(v, (0.012402501155070958192 + 0.0015345601175906961199j), tol=ATOL) + assert ae(v.real, 0.012402501155070958192, tol=PTOL) + assert ae(v.imag, 0.0015345601175906961199, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621432138988 - 1.8157749894560994861j), tol=ATOL) + assert ae(v.real, 20.880034621432138988, tol=PTOL) + assert ae(v.imag, -1.8157749894560994861, tol=PTOL) + v = fp.e1((-0.25 + 1.0j)) + assert ae(v, (-0.59066621214766308594 - 0.74474454765205036972j), tol=ATOL) + assert ae(v.real, -0.59066621214766308594, tol=PTOL) + assert ae(v.imag, -0.74474454765205036972, tol=PTOL) + v = fp.e1((-1.0 + 4.0j)) + assert ae(v, (0.49739047283060471093 + 0.41543605404038863174j), tol=ATOL) + assert ae(v.real, 0.49739047283060471093, tol=PTOL) + assert ae(v.imag, 0.41543605404038863174, tol=PTOL) + v = fp.e1((-2.0 + 8.0j)) + assert ae(v, (-0.8705211147733730969 + 0.24099328498605539667j), tol=ATOL) + assert ae(v.real, -0.8705211147733730969, tol=PTOL) + assert ae(v.imag, 0.24099328498605539667, tol=PTOL) + v = fp.e1((-5.0 + 20.0j)) + assert ae(v, (-7.0789514293925893007 - 1.6102177171960790536j), tol=ATOL) + assert ae(v.real, -7.0789514293925893007, tol=PTOL) + assert ae(v.imag, -1.6102177171960790536, tol=PTOL) + v = fp.e1((-20.0 + 80.0j)) + assert ae(v, (5855431.4907298084434 - 720920.93315409165707j), tol=ATOL) + assert ae(v.real, 5855431.4907298084434, tol=PTOL) + assert ae(v.imag, -720920.93315409165707, tol=PTOL) + v = fp.e1((-30.0 + 120.0j)) + assert ae(v, (-65402491644.703470747 - 56697658399.657460294j), tol=ATOL) + assert ae(v.real, -65402491644.703470747, tol=PTOL) + assert ae(v.imag, -56697658399.657460294, tol=PTOL) + v = fp.e1((-40.0 + 160.0j)) + assert ae(v, (25504929379604.776769 + 1429035198630573.2463j), tol=ATOL) + assert ae(v.real, 25504929379604.776769, tol=PTOL) + assert ae(v.imag, 1429035198630573.2463, tol=PTOL) + v = fp.e1((-50.0 + 200.0j)) + assert ae(v, (18437746526988116954.0 - 17146362239046152345.0j), tol=ATOL) + assert ae(v.real, 18437746526988116954.0, tol=PTOL) + assert ae(v.imag, -17146362239046152345.0, tol=PTOL) + v = fp.e1((-80.0 + 320.0j)) + assert ae(v, (3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL) + v = fp.e1((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (20.880034621082893023 - 2.8966139903465137624j), tol=ATOL) + assert ae(v.real, 20.880034621082893023, tol=PTOL) + assert ae(v.imag, -2.8966139903465137624, tol=PTOL) + v = fp.e1((-1.0 + 0.25j)) + assert ae(v, (-1.8942716983721074932 - 2.4689102827070540799j), tol=ATOL) + assert ae(v.real, -1.8942716983721074932, tol=PTOL) + assert ae(v.imag, -2.4689102827070540799, tol=PTOL) + v = fp.e1((-4.0 + 1.0j)) + assert ae(v, (-14.806699492675420438 + 9.1384225230837893776j), tol=ATOL) + assert ae(v.real, -14.806699492675420438, tol=PTOL) + assert ae(v.imag, 9.1384225230837893776, tol=PTOL) + v = fp.e1((-8.0 + 2.0j)) + assert ae(v, (54.633252667426386294 + 413.20318163814670688j), tol=ATOL) + assert ae(v.real, 54.633252667426386294, tol=PTOL) + assert ae(v.imag, 413.20318163814670688, tol=PTOL) + v = fp.e1((-20.0 + 5.0j)) + assert ae(v, (-711836.97165402624643 - 24745250.939695900956j), tol=ATOL) + assert ae(v.real, -711836.97165402624643, tol=PTOL) + assert ae(v.imag, -24745250.939695900956, tol=PTOL) + v = fp.e1((-80.0 + 20.0j)) + assert ae(v, (-4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL) + v = fp.e1((-120.0 + 30.0j)) + assert ae(v, (9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL) + v = fp.e1((-160.0 + 40.0j)) + assert ae(v, (8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL) + v = fp.e1((-200.0 + 50.0j)) + assert ae(v, (-3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, -3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL) + v = fp.e1((-320.0 + 80.0j)) + assert ae(v, (9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL) + v = fp.e1(-1.1641532182693481445e-10) + assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 22.296641293460247028, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-0.25) + assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 0.54254326466191372953, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-1.0) + assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.8951178163559367555, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-2.0) + assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.9542343560018901634, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-5.0) + assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -40.185275355803177455, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-20.0) + assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -25615652.66405658882, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-30.0) + assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -368973209407.27419706, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-40.0) + assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6039718263611241.5784, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-50.0) + assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-80.0) + assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 22.296641293460247028, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-0.25 + 0.0j)) + assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 0.54254326466191372953, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-1.0 + 0.0j)) + assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.8951178163559367555, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-2.0 + 0.0j)) + assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.9542343560018901634, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-5.0 + 0.0j)) + assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -40.185275355803177455, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-20.0 + 0.0j)) + assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -25615652.66405658882, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-30.0 + 0.0j)) + assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -368973209407.27419706, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-40.0 + 0.0j)) + assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6039718263611241.5784, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-50.0 + 0.0j)) + assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-80.0 + 0.0j)) + assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (20.880034621082893023 + 2.8966139903465137624j), tol=ATOL) + assert ae(v.real, 20.880034621082893023, tol=PTOL) + assert ae(v.imag, 2.8966139903465137624, tol=PTOL) + v = fp.e1((-1.0 - 0.25j)) + assert ae(v, (-1.8942716983721074932 + 2.4689102827070540799j), tol=ATOL) + assert ae(v.real, -1.8942716983721074932, tol=PTOL) + assert ae(v.imag, 2.4689102827070540799, tol=PTOL) + v = fp.e1((-4.0 - 1.0j)) + assert ae(v, (-14.806699492675420438 - 9.1384225230837893776j), tol=ATOL) + assert ae(v.real, -14.806699492675420438, tol=PTOL) + assert ae(v.imag, -9.1384225230837893776, tol=PTOL) + v = fp.e1((-8.0 - 2.0j)) + assert ae(v, (54.633252667426386294 - 413.20318163814670688j), tol=ATOL) + assert ae(v.real, 54.633252667426386294, tol=PTOL) + assert ae(v.imag, -413.20318163814670688, tol=PTOL) + v = fp.e1((-20.0 - 5.0j)) + assert ae(v, (-711836.97165402624643 + 24745250.939695900956j), tol=ATOL) + assert ae(v.real, -711836.97165402624643, tol=PTOL) + assert ae(v.imag, 24745250.939695900956, tol=PTOL) + v = fp.e1((-80.0 - 20.0j)) + assert ae(v, (-4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL) + v = fp.e1((-120.0 - 30.0j)) + assert ae(v, (9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL) + v = fp.e1((-160.0 - 40.0j)) + assert ae(v, (8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL) + v = fp.e1((-200.0 - 50.0j)) + assert ae(v, (-3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, -3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL) + v = fp.e1((-320.0 - 80.0j)) + assert ae(v, (9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703180274374 + 2.356194490075929607j), tol=ATOL) + assert ae(v.real, 21.950067703180274374, tol=PTOL) + assert ae(v.imag, 2.356194490075929607, tol=PTOL) + v = fp.e1((-0.25 - 0.25j)) + assert ae(v, (0.21441047326710323254 + 2.0732153554307936389j), tol=ATOL) + assert ae(v.real, 0.21441047326710323254, tol=PTOL) + assert ae(v.imag, 2.0732153554307936389, tol=PTOL) + v = fp.e1((-1.0 - 1.0j)) + assert ae(v, (-1.7646259855638540684 + 0.7538228020792708192j), tol=ATOL) + assert ae(v.real, -1.7646259855638540684, tol=PTOL) + assert ae(v.imag, 0.7538228020792708192, tol=PTOL) + v = fp.e1((-2.0 - 2.0j)) + assert ae(v, (-1.8920781621855474089 - 2.1753697842428647236j), tol=ATOL) + assert ae(v.real, -1.8920781621855474089, tol=PTOL) + assert ae(v.imag, -2.1753697842428647236, tol=PTOL) + v = fp.e1((-5.0 - 5.0j)) + assert ae(v, (13.470936071475245856 + 18.464085049321024206j), tol=ATOL) + assert ae(v.real, 13.470936071475245856, tol=PTOL) + assert ae(v.imag, 18.464085049321024206, tol=PTOL) + v = fp.e1((-20.0 - 20.0j)) + assert ae(v, (-16589317.398788971896 - 5831702.3296441771206j), tol=ATOL) + assert ae(v.real, -16589317.398788971896, tol=PTOL) + assert ae(v.imag, -5831702.3296441771206, tol=PTOL) + v = fp.e1((-30.0 - 30.0j)) + assert ae(v, (154596484273.69322527 + 204179357837.41389696j), tol=ATOL) + assert ae(v.real, 154596484273.69322527, tol=PTOL) + assert ae(v.imag, 204179357837.41389696, tol=PTOL) + v = fp.e1((-40.0 - 40.0j)) + assert ae(v, (-287512180321448.45408 - 4203502407932314.974j), tol=ATOL) + assert ae(v.real, -287512180321448.45408, tol=PTOL) + assert ae(v.imag, -4203502407932314.974, tol=PTOL) + v = fp.e1((-50.0 - 50.0j)) + assert ae(v, (-36128528616649268826.0 + 64648801861338741963.0j), tol=ATOL) + assert ae(v.real, -36128528616649268826.0, tol=PTOL) + assert ae(v.imag, 64648801861338741963.0, tol=PTOL) + v = fp.e1((-80.0 - 80.0j)) + assert ae(v, (3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, 3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621432138988 + 1.8157749894560994861j), tol=ATOL) + assert ae(v.real, 20.880034621432138988, tol=PTOL) + assert ae(v.imag, 1.8157749894560994861, tol=PTOL) + v = fp.e1((-0.25 - 1.0j)) + assert ae(v, (-0.59066621214766308594 + 0.74474454765205036972j), tol=ATOL) + assert ae(v.real, -0.59066621214766308594, tol=PTOL) + assert ae(v.imag, 0.74474454765205036972, tol=PTOL) + v = fp.e1((-1.0 - 4.0j)) + assert ae(v, (0.49739047283060471093 - 0.41543605404038863174j), tol=ATOL) + assert ae(v.real, 0.49739047283060471093, tol=PTOL) + assert ae(v.imag, -0.41543605404038863174, tol=PTOL) + v = fp.e1((-2.0 - 8.0j)) + assert ae(v, (-0.8705211147733730969 - 0.24099328498605539667j), tol=ATOL) + assert ae(v.real, -0.8705211147733730969, tol=PTOL) + assert ae(v.imag, -0.24099328498605539667, tol=PTOL) + v = fp.e1((-5.0 - 20.0j)) + assert ae(v, (-7.0789514293925893007 + 1.6102177171960790536j), tol=ATOL) + assert ae(v.real, -7.0789514293925893007, tol=PTOL) + assert ae(v.imag, 1.6102177171960790536, tol=PTOL) + v = fp.e1((-20.0 - 80.0j)) + assert ae(v, (5855431.4907298084434 + 720920.93315409165707j), tol=ATOL) + assert ae(v.real, 5855431.4907298084434, tol=PTOL) + assert ae(v.imag, 720920.93315409165707, tol=PTOL) + v = fp.e1((-30.0 - 120.0j)) + assert ae(v, (-65402491644.703470747 + 56697658399.657460294j), tol=ATOL) + assert ae(v.real, -65402491644.703470747, tol=PTOL) + assert ae(v.imag, 56697658399.657460294, tol=PTOL) + v = fp.e1((-40.0 - 160.0j)) + assert ae(v, (25504929379604.776769 - 1429035198630573.2463j), tol=ATOL) + assert ae(v.real, 25504929379604.776769, tol=PTOL) + assert ae(v.imag, -1429035198630573.2463, tol=PTOL) + v = fp.e1((-50.0 - 200.0j)) + assert ae(v, (18437746526988116954.0 + 17146362239046152345.0j), tol=ATOL) + assert ae(v.real, 18437746526988116954.0, tol=PTOL) + assert ae(v.imag, 17146362239046152345.0, tol=PTOL) + v = fp.e1((-80.0 - 320.0j)) + assert ae(v, (3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL) + v = fp.e1((0.0 - 1.1641532182693481445e-10j)) + assert ae(v, (22.29664129357666235 + 1.5707963266784812974j), tol=ATOL) + assert ae(v.real, 22.29664129357666235, tol=PTOL) + assert ae(v.imag, 1.5707963266784812974, tol=PTOL) + v = fp.e1((0.0 - 0.25j)) + assert ae(v, (0.82466306258094565309 + 1.3216627564751394551j), tol=ATOL) + assert ae(v.real, 0.82466306258094565309, tol=PTOL) + assert ae(v.imag, 1.3216627564751394551, tol=PTOL) + v = fp.e1((0.0 - 1.0j)) + assert ae(v, (-0.33740392290096813466 + 0.62471325642771360429j), tol=ATOL) + assert ae(v.real, -0.33740392290096813466, tol=PTOL) + assert ae(v.imag, 0.62471325642771360429, tol=PTOL) + v = fp.e1((0.0 - 2.0j)) + assert ae(v, (-0.4229808287748649957 - 0.034616650007798229345j), tol=ATOL) + assert ae(v.real, -0.4229808287748649957, tol=PTOL) + assert ae(v.imag, -0.034616650007798229345, tol=PTOL) + v = fp.e1((0.0 - 5.0j)) + assert ae(v, (0.19002974965664387862 + 0.020865081850222481957j), tol=ATOL) + assert ae(v.real, 0.19002974965664387862, tol=PTOL) + assert ae(v.imag, 0.020865081850222481957, tol=PTOL) + v = fp.e1((0.0 - 20.0j)) + assert ae(v, (-0.04441982084535331654 + 0.022554625751456779068j), tol=ATOL) + assert ae(v.real, -0.04441982084535331654, tol=PTOL) + assert ae(v.imag, 0.022554625751456779068, tol=PTOL) + v = fp.e1((0.0 - 30.0j)) + assert ae(v, (0.033032417282071143779 + 0.0040397867645455082476j), tol=ATOL) + assert ae(v.real, 0.033032417282071143779, tol=PTOL) + assert ae(v.imag, 0.0040397867645455082476, tol=PTOL) + v = fp.e1((0.0 - 40.0j)) + assert ae(v, (-0.019020007896208766962 - 0.016188792559887887544j), tol=ATOL) + assert ae(v.real, -0.019020007896208766962, tol=PTOL) + assert ae(v.imag, -0.016188792559887887544, tol=PTOL) + v = fp.e1((0.0 - 50.0j)) + assert ae(v, (0.0056283863241163054402 + 0.019179254308960724503j), tol=ATOL) + assert ae(v.real, 0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, 0.019179254308960724503, tol=PTOL) + v = fp.e1((0.0 - 80.0j)) + assert ae(v, (0.012402501155070958192 - 0.0015345601175906961199j), tol=ATOL) + assert ae(v.real, 0.012402501155070958192, tol=PTOL) + assert ae(v.imag, -0.0015345601175906961199, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621664969632 + 1.3258176632023711778j), tol=ATOL) + assert ae(v.real, 20.880034621664969632, tol=PTOL) + assert ae(v.imag, 1.3258176632023711778, tol=PTOL) + v = fp.e1((0.25 - 1.0j)) + assert ae(v, (-0.16868306393667788761 + 0.4858011885947426971j), tol=ATOL) + assert ae(v.real, -0.16868306393667788761, tol=PTOL) + assert ae(v.imag, 0.4858011885947426971, tol=PTOL) + v = fp.e1((1.0 - 4.0j)) + assert ae(v, (0.03373591813926547318 - 0.073523452241083821877j), tol=ATOL) + assert ae(v.real, 0.03373591813926547318, tol=PTOL) + assert ae(v.imag, -0.073523452241083821877, tol=PTOL) + v = fp.e1((2.0 - 8.0j)) + assert ae(v, (-0.015392833434733785143 + 0.0031747121557605415914j), tol=ATOL) + assert ae(v.real, -0.015392833434733785143, tol=PTOL) + assert ae(v.imag, 0.0031747121557605415914, tol=PTOL) + v = fp.e1((5.0 - 20.0j)) + assert ae(v, (-0.00024419662286542966525 + 0.00021008322966152755674j), tol=ATOL) + assert ae(v.real, -0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, 0.00021008322966152755674, tol=PTOL) + v = fp.e1((20.0 - 80.0j)) + assert ae(v, (2.3255552781051330088e-11 - 8.9463918891349438007e-12j), tol=ATOL) + assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, -8.9463918891349438007e-12, tol=PTOL) + v = fp.e1((30.0 - 120.0j)) + assert ae(v, (-2.7068919097124652332e-16 + 7.0477762411705130239e-16j), tol=ATOL) + assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, 7.0477762411705130239e-16, tol=PTOL) + v = fp.e1((40.0 - 160.0j)) + assert ae(v, (-1.1695597827678024687e-20 - 2.2907401455645736661e-20j), tol=ATOL) + assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, -2.2907401455645736661e-20, tol=PTOL) + v = fp.e1((50.0 - 200.0j)) + assert ae(v, (9.0323746914410162531e-25 + 2.3950601790033530935e-25j), tol=ATOL) + assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, 2.3950601790033530935e-25, tol=PTOL) + v = fp.e1((80.0 - 320.0j)) + assert ae(v, (3.4819106748728063576e-38 + 4.215653005615772724e-38j), tol=ATOL) + assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, 4.215653005615772724e-38, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703413105017 + 0.7853981632810329878j), tol=ATOL) + assert ae(v.real, 21.950067703413105017, tol=PTOL) + assert ae(v.imag, 0.7853981632810329878, tol=PTOL) + v = fp.e1((0.25 - 0.25j)) + assert ae(v, (0.71092525792923287894 + 0.56491812441304194711j), tol=ATOL) + assert ae(v.real, 0.71092525792923287894, tol=PTOL) + assert ae(v.imag, 0.56491812441304194711, tol=PTOL) + v = fp.e1((1.0 - 1.0j)) + assert ae(v, (0.00028162445198141832551 + 0.17932453503935894015j), tol=ATOL) + assert ae(v.real, 0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, 0.17932453503935894015, tol=PTOL) + v = fp.e1((2.0 - 2.0j)) + assert ae(v, (-0.033767089606562004246 + 0.018599414169750541925j), tol=ATOL) + assert ae(v.real, -0.033767089606562004246, tol=PTOL) + assert ae(v.imag, 0.018599414169750541925, tol=PTOL) + v = fp.e1((5.0 - 5.0j)) + assert ae(v, (0.0007266506660356393891 - 0.00047102780163522245054j), tol=ATOL) + assert ae(v.real, 0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, -0.00047102780163522245054, tol=PTOL) + v = fp.e1((20.0 - 20.0j)) + assert ae(v, (-2.3824537449367396579e-11 + 6.6969873156525615158e-11j), tol=ATOL) + assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, 6.6969873156525615158e-11, tol=PTOL) + v = fp.e1((30.0 - 30.0j)) + assert ae(v, (1.7316045841744061617e-15 - 1.3065678019487308689e-15j), tol=ATOL) + assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, -1.3065678019487308689e-15, tol=PTOL) + v = fp.e1((40.0 - 40.0j)) + assert ae(v, (-7.4001043002899232182e-20 + 4.991847855336816304e-21j), tol=ATOL) + assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, 4.991847855336816304e-21, tol=PTOL) + v = fp.e1((50.0 - 50.0j)) + assert ae(v, (2.3566128324644641219e-24 + 1.3188326726201614778e-24j), tol=ATOL) + assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, 1.3188326726201614778e-24, tol=PTOL) + v = fp.e1((80.0 - 80.0j)) + assert ae(v, (9.8279750572186526673e-38 - 1.243952841288868831e-37j), tol=ATOL) + assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, -1.243952841288868831e-37, tol=PTOL) + v = fp.e1((4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (20.880034622014215597 + 0.24497866301044883237j), tol=ATOL) + assert ae(v.real, 20.880034622014215597, tol=PTOL) + assert ae(v.imag, 0.24497866301044883237, tol=PTOL) + v = fp.e1((1.0 - 0.25j)) + assert ae(v, (0.19731063945004229095 + 0.087366045774299963672j), tol=ATOL) + assert ae(v.real, 0.19731063945004229095, tol=PTOL) + assert ae(v.imag, 0.087366045774299963672, tol=PTOL) + v = fp.e1((4.0 - 1.0j)) + assert ae(v, (0.0013106173980145506944 + 0.0034542480199350626699j), tol=ATOL) + assert ae(v.real, 0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, 0.0034542480199350626699, tol=PTOL) + v = fp.e1((8.0 - 2.0j)) + assert ae(v, (-0.000022278049065270225945 + 0.000029191940456521555288j), tol=ATOL) + assert ae(v.real, -0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, 0.000029191940456521555288, tol=PTOL) + v = fp.e1((20.0 - 5.0j)) + assert ae(v, (4.7711374515765346894e-11 - 8.2902652405126947359e-11j), tol=ATOL) + assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, -8.2902652405126947359e-11, tol=PTOL) + v = fp.e1((80.0 - 20.0j)) + assert ae(v, (3.8353473865788235787e-38 + 2.129247592349605139e-37j), tol=ATOL) + assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, 2.129247592349605139e-37, tol=PTOL) + v = fp.e1((120.0 - 30.0j)) + assert ae(v, (2.3836002337480334716e-55 - 5.6704043587126198306e-55j), tol=ATOL) + assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, -5.6704043587126198306e-55, tol=PTOL) + v = fp.e1((160.0 - 40.0j)) + assert ae(v, (-1.6238022898654510661e-72 + 1.104172355572287367e-72j), tol=ATOL) + assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, 1.104172355572287367e-72, tol=PTOL) + v = fp.e1((200.0 - 50.0j)) + assert ae(v, (6.6800061461666228487e-90 - 1.4473816083541016115e-91j), tol=ATOL) + assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, -1.4473816083541016115e-91, tol=PTOL) + v = fp.e1((320.0 - 80.0j)) + assert ae(v, (4.2737871527778786157e-143 - 3.1789935525785660314e-142j), tol=ATOL) + assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, -3.1789935525785660314e-142, tol=PTOL) + v = fp.ei(1.1641532182693481445e-10) + assert ae(v, -22.296641293460247028, tol=ATOL) + assert type(v) is float + v = fp.ei(0.25) + assert ae(v, -0.54254326466191372953, tol=ATOL) + assert type(v) is float + v = fp.ei(1.0) + assert ae(v, 1.8951178163559367555, tol=ATOL) + assert type(v) is float + v = fp.ei(2.0) + assert ae(v, 4.9542343560018901634, tol=ATOL) + assert type(v) is float + v = fp.ei(5.0) + assert ae(v, 40.185275355803177455, tol=ATOL) + assert type(v) is float + v = fp.ei(20.0) + assert ae(v, 25615652.66405658882, tol=ATOL) + assert type(v) is float + v = fp.ei(30.0) + assert ae(v, 368973209407.27419706, tol=ATOL) + assert type(v) is float + v = fp.ei(40.0) + assert ae(v, 6039718263611241.5784, tol=ATOL) + assert type(v) is float + v = fp.ei(50.0) + assert ae(v, 1.0585636897131690963e+20, tol=ATOL) + assert type(v) is float + v = fp.ei(80.0) + assert ae(v, 7.0146000049047999696e+32, tol=ATOL) + assert type(v) is float + v = fp.ei((1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (-22.296641293460247028 + 0.0j), tol=ATOL) + assert ae(v.real, -22.296641293460247028, tol=PTOL) + assert v.imag == 0 + v = fp.ei((0.25 + 0.0j)) + assert ae(v, (-0.54254326466191372953 + 0.0j), tol=ATOL) + assert ae(v.real, -0.54254326466191372953, tol=PTOL) + assert v.imag == 0 + v = fp.ei((1.0 + 0.0j)) + assert ae(v, (1.8951178163559367555 + 0.0j), tol=ATOL) + assert ae(v.real, 1.8951178163559367555, tol=PTOL) + assert v.imag == 0 + v = fp.ei((2.0 + 0.0j)) + assert ae(v, (4.9542343560018901634 + 0.0j), tol=ATOL) + assert ae(v.real, 4.9542343560018901634, tol=PTOL) + assert v.imag == 0 + v = fp.ei((5.0 + 0.0j)) + assert ae(v, (40.185275355803177455 + 0.0j), tol=ATOL) + assert ae(v.real, 40.185275355803177455, tol=PTOL) + assert v.imag == 0 + v = fp.ei((20.0 + 0.0j)) + assert ae(v, (25615652.66405658882 + 0.0j), tol=ATOL) + assert ae(v.real, 25615652.66405658882, tol=PTOL) + assert v.imag == 0 + v = fp.ei((30.0 + 0.0j)) + assert ae(v, (368973209407.27419706 + 0.0j), tol=ATOL) + assert ae(v.real, 368973209407.27419706, tol=PTOL) + assert v.imag == 0 + v = fp.ei((40.0 + 0.0j)) + assert ae(v, (6039718263611241.5784 + 0.0j), tol=ATOL) + assert ae(v.real, 6039718263611241.5784, tol=PTOL) + assert v.imag == 0 + v = fp.ei((50.0 + 0.0j)) + assert ae(v, (1.0585636897131690963e+20 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0585636897131690963e+20, tol=PTOL) + assert v.imag == 0 + v = fp.ei((80.0 + 0.0j)) + assert ae(v, (7.0146000049047999696e+32 + 0.0j), tol=ATOL) + assert ae(v.real, 7.0146000049047999696e+32, tol=PTOL) + assert v.imag == 0 + v = fp.ei((4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034621082893023 + 0.24497866324327947603j), tol=ATOL) + assert ae(v.real, -20.880034621082893023, tol=PTOL) + assert ae(v.imag, 0.24497866324327947603, tol=PTOL) + v = fp.ei((1.0 + 0.25j)) + assert ae(v, (1.8942716983721074932 + 0.67268237088273915854j), tol=ATOL) + assert ae(v.real, 1.8942716983721074932, tol=PTOL) + assert ae(v.imag, 0.67268237088273915854, tol=PTOL) + v = fp.ei((4.0 + 1.0j)) + assert ae(v, (14.806699492675420438 + 12.280015176673582616j), tol=ATOL) + assert ae(v.real, 14.806699492675420438, tol=PTOL) + assert ae(v.imag, 12.280015176673582616, tol=PTOL) + v = fp.ei((8.0 + 2.0j)) + assert ae(v, (-54.633252667426386294 + 416.34477429173650012j), tol=ATOL) + assert ae(v.real, -54.633252667426386294, tol=PTOL) + assert ae(v.imag, 416.34477429173650012, tol=PTOL) + v = fp.ei((20.0 + 5.0j)) + assert ae(v, (711836.97165402624643 - 24745247.798103247366j), tol=ATOL) + assert ae(v.real, 711836.97165402624643, tol=PTOL) + assert ae(v.imag, -24745247.798103247366, tol=PTOL) + v = fp.ei((80.0 + 20.0j)) + assert ae(v, (4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL) + v = fp.ei((120.0 + 30.0j)) + assert ae(v, (-9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL) + v = fp.ei((160.0 + 40.0j)) + assert ae(v, (-8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL) + v = fp.ei((200.0 + 50.0j)) + assert ae(v, (3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, 3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL) + v = fp.ei((320.0 + 80.0j)) + assert ae(v, (-9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703180274374 + 0.78539816351386363145j), tol=ATOL) + assert ae(v.real, -21.950067703180274374, tol=PTOL) + assert ae(v.imag, 0.78539816351386363145, tol=PTOL) + v = fp.ei((0.25 + 0.25j)) + assert ae(v, (-0.21441047326710323254 + 1.0683772981589995996j), tol=ATOL) + assert ae(v.real, -0.21441047326710323254, tol=PTOL) + assert ae(v.imag, 1.0683772981589995996, tol=PTOL) + v = fp.ei((1.0 + 1.0j)) + assert ae(v, (1.7646259855638540684 + 2.3877698515105224193j), tol=ATOL) + assert ae(v.real, 1.7646259855638540684, tol=PTOL) + assert ae(v.imag, 2.3877698515105224193, tol=PTOL) + v = fp.ei((2.0 + 2.0j)) + assert ae(v, (1.8920781621855474089 + 5.3169624378326579621j), tol=ATOL) + assert ae(v.real, 1.8920781621855474089, tol=PTOL) + assert ae(v.imag, 5.3169624378326579621, tol=PTOL) + v = fp.ei((5.0 + 5.0j)) + assert ae(v, (-13.470936071475245856 - 15.322492395731230968j), tol=ATOL) + assert ae(v.real, -13.470936071475245856, tol=PTOL) + assert ae(v.imag, -15.322492395731230968, tol=PTOL) + v = fp.ei((20.0 + 20.0j)) + assert ae(v, (16589317.398788971896 + 5831705.4712368307104j), tol=ATOL) + assert ae(v.real, 16589317.398788971896, tol=PTOL) + assert ae(v.imag, 5831705.4712368307104, tol=PTOL) + v = fp.ei((30.0 + 30.0j)) + assert ae(v, (-154596484273.69322527 - 204179357834.2723043j), tol=ATOL) + assert ae(v.real, -154596484273.69322527, tol=PTOL) + assert ae(v.imag, -204179357834.2723043, tol=PTOL) + v = fp.ei((40.0 + 40.0j)) + assert ae(v, (287512180321448.45408 + 4203502407932318.1156j), tol=ATOL) + assert ae(v.real, 287512180321448.45408, tol=PTOL) + assert ae(v.imag, 4203502407932318.1156, tol=PTOL) + v = fp.ei((50.0 + 50.0j)) + assert ae(v, (36128528616649268826.0 - 64648801861338741960.0j), tol=ATOL) + assert ae(v.real, 36128528616649268826.0, tol=PTOL) + assert ae(v.imag, -64648801861338741960.0, tol=PTOL) + v = fp.ei((80.0 + 80.0j)) + assert ae(v, (-3.8674816337930010217e+32 - 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, -3.0540709639658071041e+32, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621432138988 + 1.3258176641336937524j), tol=ATOL) + assert ae(v.real, -20.880034621432138988, tol=PTOL) + assert ae(v.imag, 1.3258176641336937524, tol=PTOL) + v = fp.ei((0.25 + 1.0j)) + assert ae(v, (0.59066621214766308594 + 2.3968481059377428687j), tol=ATOL) + assert ae(v.real, 0.59066621214766308594, tol=PTOL) + assert ae(v.imag, 2.3968481059377428687, tol=PTOL) + v = fp.ei((1.0 + 4.0j)) + assert ae(v, (-0.49739047283060471093 + 3.5570287076301818702j), tol=ATOL) + assert ae(v.real, -0.49739047283060471093, tol=PTOL) + assert ae(v.imag, 3.5570287076301818702, tol=PTOL) + v = fp.ei((2.0 + 8.0j)) + assert ae(v, (0.8705211147733730969 + 3.3825859385758486351j), tol=ATOL) + assert ae(v.real, 0.8705211147733730969, tol=PTOL) + assert ae(v.imag, 3.3825859385758486351, tol=PTOL) + v = fp.ei((5.0 + 20.0j)) + assert ae(v, (7.0789514293925893007 + 1.5313749363937141849j), tol=ATOL) + assert ae(v.real, 7.0789514293925893007, tol=PTOL) + assert ae(v.imag, 1.5313749363937141849, tol=PTOL) + v = fp.ei((20.0 + 80.0j)) + assert ae(v, (-5855431.4907298084434 - 720917.79156143806727j), tol=ATOL) + assert ae(v.real, -5855431.4907298084434, tol=PTOL) + assert ae(v.imag, -720917.79156143806727, tol=PTOL) + v = fp.ei((30.0 + 120.0j)) + assert ae(v, (65402491644.703470747 - 56697658396.51586764j), tol=ATOL) + assert ae(v.real, 65402491644.703470747, tol=PTOL) + assert ae(v.imag, -56697658396.51586764, tol=PTOL) + v = fp.ei((40.0 + 160.0j)) + assert ae(v, (-25504929379604.776769 + 1429035198630576.3879j), tol=ATOL) + assert ae(v.real, -25504929379604.776769, tol=PTOL) + assert ae(v.imag, 1429035198630576.3879, tol=PTOL) + v = fp.ei((50.0 + 200.0j)) + assert ae(v, (-18437746526988116954.0 - 17146362239046152342.0j), tol=ATOL) + assert ae(v.real, -18437746526988116954.0, tol=PTOL) + assert ae(v.imag, -17146362239046152342.0, tol=PTOL) + v = fp.ei((80.0 + 320.0j)) + assert ae(v, (-3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL) + v = fp.ei((0.0 + 1.1641532182693481445e-10j)) + assert ae(v, (-22.29664129357666235 + 1.5707963269113119411j), tol=ATOL) + assert ae(v.real, -22.29664129357666235, tol=PTOL) + assert ae(v.imag, 1.5707963269113119411, tol=PTOL) + v = fp.ei((0.0 + 0.25j)) + assert ae(v, (-0.82466306258094565309 + 1.8199298971146537833j), tol=ATOL) + assert ae(v.real, -0.82466306258094565309, tol=PTOL) + assert ae(v.imag, 1.8199298971146537833, tol=PTOL) + v = fp.ei((0.0 + 1.0j)) + assert ae(v, (0.33740392290096813466 + 2.5168793971620796342j), tol=ATOL) + assert ae(v.real, 0.33740392290096813466, tol=PTOL) + assert ae(v.imag, 2.5168793971620796342, tol=PTOL) + v = fp.ei((0.0 + 2.0j)) + assert ae(v, (0.4229808287748649957 + 3.1762093035975914678j), tol=ATOL) + assert ae(v.real, 0.4229808287748649957, tol=PTOL) + assert ae(v.imag, 3.1762093035975914678, tol=PTOL) + v = fp.ei((0.0 + 5.0j)) + assert ae(v, (-0.19002974965664387862 + 3.1207275717395707565j), tol=ATOL) + assert ae(v.real, -0.19002974965664387862, tol=PTOL) + assert ae(v.imag, 3.1207275717395707565, tol=PTOL) + v = fp.ei((0.0 + 20.0j)) + assert ae(v, (0.04441982084535331654 + 3.1190380278383364594j), tol=ATOL) + assert ae(v.real, 0.04441982084535331654, tol=PTOL) + assert ae(v.imag, 3.1190380278383364594, tol=PTOL) + v = fp.ei((0.0 + 30.0j)) + assert ae(v, (-0.033032417282071143779 + 3.1375528668252477302j), tol=ATOL) + assert ae(v.real, -0.033032417282071143779, tol=PTOL) + assert ae(v.imag, 3.1375528668252477302, tol=PTOL) + v = fp.ei((0.0 + 40.0j)) + assert ae(v, (0.019020007896208766962 + 3.157781446149681126j), tol=ATOL) + assert ae(v.real, 0.019020007896208766962, tol=PTOL) + assert ae(v.imag, 3.157781446149681126, tol=PTOL) + v = fp.ei((0.0 + 50.0j)) + assert ae(v, (-0.0056283863241163054402 + 3.122413399280832514j), tol=ATOL) + assert ae(v.real, -0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, 3.122413399280832514, tol=PTOL) + v = fp.ei((0.0 + 80.0j)) + assert ae(v, (-0.012402501155070958192 + 3.1431272137073839346j), tol=ATOL) + assert ae(v.real, -0.012402501155070958192, tol=PTOL) + assert ae(v.imag, 3.1431272137073839346, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621664969632 + 1.8157749903874220607j), tol=ATOL) + assert ae(v.real, -20.880034621664969632, tol=PTOL) + assert ae(v.imag, 1.8157749903874220607, tol=PTOL) + v = fp.ei((-0.25 + 1.0j)) + assert ae(v, (0.16868306393667788761 + 2.6557914649950505414j), tol=ATOL) + assert ae(v.real, 0.16868306393667788761, tol=PTOL) + assert ae(v.imag, 2.6557914649950505414, tol=PTOL) + v = fp.ei((-1.0 + 4.0j)) + assert ae(v, (-0.03373591813926547318 + 3.2151161058308770603j), tol=ATOL) + assert ae(v.real, -0.03373591813926547318, tol=PTOL) + assert ae(v.imag, 3.2151161058308770603, tol=PTOL) + v = fp.ei((-2.0 + 8.0j)) + assert ae(v, (0.015392833434733785143 + 3.1384179414340326969j), tol=ATOL) + assert ae(v.real, 0.015392833434733785143, tol=PTOL) + assert ae(v.imag, 3.1384179414340326969, tol=PTOL) + v = fp.ei((-5.0 + 20.0j)) + assert ae(v, (0.00024419662286542966525 + 3.1413825703601317109j), tol=ATOL) + assert ae(v.real, 0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, 3.1413825703601317109, tol=PTOL) + v = fp.ei((-20.0 + 80.0j)) + assert ae(v, (-2.3255552781051330088e-11 + 3.1415926535987396304j), tol=ATOL) + assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, 3.1415926535987396304, tol=PTOL) + v = fp.ei((-30.0 + 120.0j)) + assert ae(v, (2.7068919097124652332e-16 + 3.1415926535897925337j), tol=ATOL) + assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, 3.1415926535897925337, tol=PTOL) + v = fp.ei((-40.0 + 160.0j)) + assert ae(v, (1.1695597827678024687e-20 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 + 200.0j)) + assert ae(v, (-9.0323746914410162531e-25 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 + 320.0j)) + assert ae(v, (-3.4819106748728063576e-38 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034622014215597 + 2.8966139905793444061j), tol=ATOL) + assert ae(v.real, -20.880034622014215597, tol=PTOL) + assert ae(v.imag, 2.8966139905793444061, tol=PTOL) + v = fp.ei((-1.0 + 0.25j)) + assert ae(v, (-0.19731063945004229095 + 3.0542266078154932748j), tol=ATOL) + assert ae(v.real, -0.19731063945004229095, tol=PTOL) + assert ae(v.imag, 3.0542266078154932748, tol=PTOL) + v = fp.ei((-4.0 + 1.0j)) + assert ae(v, (-0.0013106173980145506944 + 3.1381384055698581758j), tol=ATOL) + assert ae(v.real, -0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, 3.1381384055698581758, tol=PTOL) + v = fp.ei((-8.0 + 2.0j)) + assert ae(v, (0.000022278049065270225945 + 3.1415634616493367169j), tol=ATOL) + assert ae(v.real, 0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, 3.1415634616493367169, tol=PTOL) + v = fp.ei((-20.0 + 5.0j)) + assert ae(v, (-4.7711374515765346894e-11 + 3.1415926536726958909j), tol=ATOL) + assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, 3.1415926536726958909, tol=PTOL) + v = fp.ei((-80.0 + 20.0j)) + assert ae(v, (-3.8353473865788235787e-38 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-120.0 + 30.0j)) + assert ae(v, (-2.3836002337480334716e-55 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-160.0 + 40.0j)) + assert ae(v, (1.6238022898654510661e-72 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-200.0 + 50.0j)) + assert ae(v, (-6.6800061461666228487e-90 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-320.0 + 80.0j)) + assert ae(v, (-4.2737871527778786157e-143 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei(-1.1641532182693481445e-10) + assert ae(v, -22.296641293693077672, tol=ATOL) + assert type(v) is float + v = fp.ei(-0.25) + assert ae(v, -1.0442826344437381945, tol=ATOL) + assert type(v) is float + v = fp.ei(-1.0) + assert ae(v, -0.21938393439552027368, tol=ATOL) + assert type(v) is float + v = fp.ei(-2.0) + assert ae(v, -0.048900510708061119567, tol=ATOL) + assert type(v) is float + v = fp.ei(-5.0) + assert ae(v, -0.0011482955912753257973, tol=ATOL) + assert type(v) is float + v = fp.ei(-20.0) + assert ae(v, -9.8355252906498816904e-11, tol=ATOL) + assert type(v) is float + v = fp.ei(-30.0) + assert ae(v, -3.0215520106888125448e-15, tol=ATOL) + assert type(v) is float + v = fp.ei(-40.0) + assert ae(v, -1.0367732614516569722e-19, tol=ATOL) + assert type(v) is float + v = fp.ei(-50.0) + assert ae(v, -3.7832640295504590187e-24, tol=ATOL) + assert type(v) is float + v = fp.ei(-80.0) + assert ae(v, -2.2285432586884729112e-37, tol=ATOL) + assert type(v) is float + v = fp.ei((-1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (-22.296641293693077672 + 0.0j), tol=ATOL) + assert ae(v.real, -22.296641293693077672, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-0.25 + 0.0j)) + assert ae(v, (-1.0442826344437381945 + 0.0j), tol=ATOL) + assert ae(v.real, -1.0442826344437381945, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-1.0 + 0.0j)) + assert ae(v, (-0.21938393439552027368 + 0.0j), tol=ATOL) + assert ae(v.real, -0.21938393439552027368, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-2.0 + 0.0j)) + assert ae(v, (-0.048900510708061119567 + 0.0j), tol=ATOL) + assert ae(v.real, -0.048900510708061119567, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-5.0 + 0.0j)) + assert ae(v, (-0.0011482955912753257973 + 0.0j), tol=ATOL) + assert ae(v.real, -0.0011482955912753257973, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-20.0 + 0.0j)) + assert ae(v, (-9.8355252906498816904e-11 + 0.0j), tol=ATOL) + assert ae(v.real, -9.8355252906498816904e-11, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-30.0 + 0.0j)) + assert ae(v, (-3.0215520106888125448e-15 + 0.0j), tol=ATOL) + assert ae(v.real, -3.0215520106888125448e-15, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-40.0 + 0.0j)) + assert ae(v, (-1.0367732614516569722e-19 + 0.0j), tol=ATOL) + assert ae(v.real, -1.0367732614516569722e-19, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-50.0 + 0.0j)) + assert ae(v, (-3.7832640295504590187e-24 + 0.0j), tol=ATOL) + assert ae(v.real, -3.7832640295504590187e-24, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-80.0 + 0.0j)) + assert ae(v, (-2.2285432586884729112e-37 + 0.0j), tol=ATOL) + assert ae(v.real, -2.2285432586884729112e-37, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034622014215597 - 2.8966139905793444061j), tol=ATOL) + assert ae(v.real, -20.880034622014215597, tol=PTOL) + assert ae(v.imag, -2.8966139905793444061, tol=PTOL) + v = fp.ei((-1.0 - 0.25j)) + assert ae(v, (-0.19731063945004229095 - 3.0542266078154932748j), tol=ATOL) + assert ae(v.real, -0.19731063945004229095, tol=PTOL) + assert ae(v.imag, -3.0542266078154932748, tol=PTOL) + v = fp.ei((-4.0 - 1.0j)) + assert ae(v, (-0.0013106173980145506944 - 3.1381384055698581758j), tol=ATOL) + assert ae(v.real, -0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, -3.1381384055698581758, tol=PTOL) + v = fp.ei((-8.0 - 2.0j)) + assert ae(v, (0.000022278049065270225945 - 3.1415634616493367169j), tol=ATOL) + assert ae(v.real, 0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, -3.1415634616493367169, tol=PTOL) + v = fp.ei((-20.0 - 5.0j)) + assert ae(v, (-4.7711374515765346894e-11 - 3.1415926536726958909j), tol=ATOL) + assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, -3.1415926536726958909, tol=PTOL) + v = fp.ei((-80.0 - 20.0j)) + assert ae(v, (-3.8353473865788235787e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-120.0 - 30.0j)) + assert ae(v, (-2.3836002337480334716e-55 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-160.0 - 40.0j)) + assert ae(v, (1.6238022898654510661e-72 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-200.0 - 50.0j)) + assert ae(v, (-6.6800061461666228487e-90 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-320.0 - 80.0j)) + assert ae(v, (-4.2737871527778786157e-143 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703413105017 - 2.3561944903087602507j), tol=ATOL) + assert ae(v.real, -21.950067703413105017, tol=PTOL) + assert ae(v.imag, -2.3561944903087602507, tol=PTOL) + v = fp.ei((-0.25 - 0.25j)) + assert ae(v, (-0.71092525792923287894 - 2.5766745291767512913j), tol=ATOL) + assert ae(v.real, -0.71092525792923287894, tol=PTOL) + assert ae(v.imag, -2.5766745291767512913, tol=PTOL) + v = fp.ei((-1.0 - 1.0j)) + assert ae(v, (-0.00028162445198141832551 - 2.9622681185504342983j), tol=ATOL) + assert ae(v.real, -0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, -2.9622681185504342983, tol=PTOL) + v = fp.ei((-2.0 - 2.0j)) + assert ae(v, (0.033767089606562004246 - 3.1229932394200426965j), tol=ATOL) + assert ae(v.real, 0.033767089606562004246, tol=PTOL) + assert ae(v.imag, -3.1229932394200426965, tol=PTOL) + v = fp.ei((-5.0 - 5.0j)) + assert ae(v, (-0.0007266506660356393891 - 3.1420636813914284609j), tol=ATOL) + assert ae(v.real, -0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, -3.1420636813914284609, tol=PTOL) + v = fp.ei((-20.0 - 20.0j)) + assert ae(v, (2.3824537449367396579e-11 - 3.1415926535228233653j), tol=ATOL) + assert ae(v.real, 2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, -3.1415926535228233653, tol=PTOL) + v = fp.ei((-30.0 - 30.0j)) + assert ae(v, (-1.7316045841744061617e-15 - 3.141592653589794545j), tol=ATOL) + assert ae(v.real, -1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, -3.141592653589794545, tol=PTOL) + v = fp.ei((-40.0 - 40.0j)) + assert ae(v, (7.4001043002899232182e-20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 - 50.0j)) + assert ae(v, (-2.3566128324644641219e-24 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 - 80.0j)) + assert ae(v, (-9.8279750572186526673e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621664969632 - 1.8157749903874220607j), tol=ATOL) + assert ae(v.real, -20.880034621664969632, tol=PTOL) + assert ae(v.imag, -1.8157749903874220607, tol=PTOL) + v = fp.ei((-0.25 - 1.0j)) + assert ae(v, (0.16868306393667788761 - 2.6557914649950505414j), tol=ATOL) + assert ae(v.real, 0.16868306393667788761, tol=PTOL) + assert ae(v.imag, -2.6557914649950505414, tol=PTOL) + v = fp.ei((-1.0 - 4.0j)) + assert ae(v, (-0.03373591813926547318 - 3.2151161058308770603j), tol=ATOL) + assert ae(v.real, -0.03373591813926547318, tol=PTOL) + assert ae(v.imag, -3.2151161058308770603, tol=PTOL) + v = fp.ei((-2.0 - 8.0j)) + assert ae(v, (0.015392833434733785143 - 3.1384179414340326969j), tol=ATOL) + assert ae(v.real, 0.015392833434733785143, tol=PTOL) + assert ae(v.imag, -3.1384179414340326969, tol=PTOL) + v = fp.ei((-5.0 - 20.0j)) + assert ae(v, (0.00024419662286542966525 - 3.1413825703601317109j), tol=ATOL) + assert ae(v.real, 0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, -3.1413825703601317109, tol=PTOL) + v = fp.ei((-20.0 - 80.0j)) + assert ae(v, (-2.3255552781051330088e-11 - 3.1415926535987396304j), tol=ATOL) + assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, -3.1415926535987396304, tol=PTOL) + v = fp.ei((-30.0 - 120.0j)) + assert ae(v, (2.7068919097124652332e-16 - 3.1415926535897925337j), tol=ATOL) + assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, -3.1415926535897925337, tol=PTOL) + v = fp.ei((-40.0 - 160.0j)) + assert ae(v, (1.1695597827678024687e-20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 - 200.0j)) + assert ae(v, (-9.0323746914410162531e-25 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 - 320.0j)) + assert ae(v, (-3.4819106748728063576e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((0.0 - 1.1641532182693481445e-10j)) + assert ae(v, (-22.29664129357666235 - 1.5707963269113119411j), tol=ATOL) + assert ae(v.real, -22.29664129357666235, tol=PTOL) + assert ae(v.imag, -1.5707963269113119411, tol=PTOL) + v = fp.ei((0.0 - 0.25j)) + assert ae(v, (-0.82466306258094565309 - 1.8199298971146537833j), tol=ATOL) + assert ae(v.real, -0.82466306258094565309, tol=PTOL) + assert ae(v.imag, -1.8199298971146537833, tol=PTOL) + v = fp.ei((0.0 - 1.0j)) + assert ae(v, (0.33740392290096813466 - 2.5168793971620796342j), tol=ATOL) + assert ae(v.real, 0.33740392290096813466, tol=PTOL) + assert ae(v.imag, -2.5168793971620796342, tol=PTOL) + v = fp.ei((0.0 - 2.0j)) + assert ae(v, (0.4229808287748649957 - 3.1762093035975914678j), tol=ATOL) + assert ae(v.real, 0.4229808287748649957, tol=PTOL) + assert ae(v.imag, -3.1762093035975914678, tol=PTOL) + v = fp.ei((0.0 - 5.0j)) + assert ae(v, (-0.19002974965664387862 - 3.1207275717395707565j), tol=ATOL) + assert ae(v.real, -0.19002974965664387862, tol=PTOL) + assert ae(v.imag, -3.1207275717395707565, tol=PTOL) + v = fp.ei((0.0 - 20.0j)) + assert ae(v, (0.04441982084535331654 - 3.1190380278383364594j), tol=ATOL) + assert ae(v.real, 0.04441982084535331654, tol=PTOL) + assert ae(v.imag, -3.1190380278383364594, tol=PTOL) + v = fp.ei((0.0 - 30.0j)) + assert ae(v, (-0.033032417282071143779 - 3.1375528668252477302j), tol=ATOL) + assert ae(v.real, -0.033032417282071143779, tol=PTOL) + assert ae(v.imag, -3.1375528668252477302, tol=PTOL) + v = fp.ei((0.0 - 40.0j)) + assert ae(v, (0.019020007896208766962 - 3.157781446149681126j), tol=ATOL) + assert ae(v.real, 0.019020007896208766962, tol=PTOL) + assert ae(v.imag, -3.157781446149681126, tol=PTOL) + v = fp.ei((0.0 - 50.0j)) + assert ae(v, (-0.0056283863241163054402 - 3.122413399280832514j), tol=ATOL) + assert ae(v.real, -0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, -3.122413399280832514, tol=PTOL) + v = fp.ei((0.0 - 80.0j)) + assert ae(v, (-0.012402501155070958192 - 3.1431272137073839346j), tol=ATOL) + assert ae(v.real, -0.012402501155070958192, tol=PTOL) + assert ae(v.imag, -3.1431272137073839346, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621432138988 - 1.3258176641336937524j), tol=ATOL) + assert ae(v.real, -20.880034621432138988, tol=PTOL) + assert ae(v.imag, -1.3258176641336937524, tol=PTOL) + v = fp.ei((0.25 - 1.0j)) + assert ae(v, (0.59066621214766308594 - 2.3968481059377428687j), tol=ATOL) + assert ae(v.real, 0.59066621214766308594, tol=PTOL) + assert ae(v.imag, -2.3968481059377428687, tol=PTOL) + v = fp.ei((1.0 - 4.0j)) + assert ae(v, (-0.49739047283060471093 - 3.5570287076301818702j), tol=ATOL) + assert ae(v.real, -0.49739047283060471093, tol=PTOL) + assert ae(v.imag, -3.5570287076301818702, tol=PTOL) + v = fp.ei((2.0 - 8.0j)) + assert ae(v, (0.8705211147733730969 - 3.3825859385758486351j), tol=ATOL) + assert ae(v.real, 0.8705211147733730969, tol=PTOL) + assert ae(v.imag, -3.3825859385758486351, tol=PTOL) + v = fp.ei((5.0 - 20.0j)) + assert ae(v, (7.0789514293925893007 - 1.5313749363937141849j), tol=ATOL) + assert ae(v.real, 7.0789514293925893007, tol=PTOL) + assert ae(v.imag, -1.5313749363937141849, tol=PTOL) + v = fp.ei((20.0 - 80.0j)) + assert ae(v, (-5855431.4907298084434 + 720917.79156143806727j), tol=ATOL) + assert ae(v.real, -5855431.4907298084434, tol=PTOL) + assert ae(v.imag, 720917.79156143806727, tol=PTOL) + v = fp.ei((30.0 - 120.0j)) + assert ae(v, (65402491644.703470747 + 56697658396.51586764j), tol=ATOL) + assert ae(v.real, 65402491644.703470747, tol=PTOL) + assert ae(v.imag, 56697658396.51586764, tol=PTOL) + v = fp.ei((40.0 - 160.0j)) + assert ae(v, (-25504929379604.776769 - 1429035198630576.3879j), tol=ATOL) + assert ae(v.real, -25504929379604.776769, tol=PTOL) + assert ae(v.imag, -1429035198630576.3879, tol=PTOL) + v = fp.ei((50.0 - 200.0j)) + assert ae(v, (-18437746526988116954.0 + 17146362239046152342.0j), tol=ATOL) + assert ae(v.real, -18437746526988116954.0, tol=PTOL) + assert ae(v.imag, 17146362239046152342.0, tol=PTOL) + v = fp.ei((80.0 - 320.0j)) + assert ae(v, (-3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703180274374 - 0.78539816351386363145j), tol=ATOL) + assert ae(v.real, -21.950067703180274374, tol=PTOL) + assert ae(v.imag, -0.78539816351386363145, tol=PTOL) + v = fp.ei((0.25 - 0.25j)) + assert ae(v, (-0.21441047326710323254 - 1.0683772981589995996j), tol=ATOL) + assert ae(v.real, -0.21441047326710323254, tol=PTOL) + assert ae(v.imag, -1.0683772981589995996, tol=PTOL) + v = fp.ei((1.0 - 1.0j)) + assert ae(v, (1.7646259855638540684 - 2.3877698515105224193j), tol=ATOL) + assert ae(v.real, 1.7646259855638540684, tol=PTOL) + assert ae(v.imag, -2.3877698515105224193, tol=PTOL) + v = fp.ei((2.0 - 2.0j)) + assert ae(v, (1.8920781621855474089 - 5.3169624378326579621j), tol=ATOL) + assert ae(v.real, 1.8920781621855474089, tol=PTOL) + assert ae(v.imag, -5.3169624378326579621, tol=PTOL) + v = fp.ei((5.0 - 5.0j)) + assert ae(v, (-13.470936071475245856 + 15.322492395731230968j), tol=ATOL) + assert ae(v.real, -13.470936071475245856, tol=PTOL) + assert ae(v.imag, 15.322492395731230968, tol=PTOL) + v = fp.ei((20.0 - 20.0j)) + assert ae(v, (16589317.398788971896 - 5831705.4712368307104j), tol=ATOL) + assert ae(v.real, 16589317.398788971896, tol=PTOL) + assert ae(v.imag, -5831705.4712368307104, tol=PTOL) + v = fp.ei((30.0 - 30.0j)) + assert ae(v, (-154596484273.69322527 + 204179357834.2723043j), tol=ATOL) + assert ae(v.real, -154596484273.69322527, tol=PTOL) + assert ae(v.imag, 204179357834.2723043, tol=PTOL) + v = fp.ei((40.0 - 40.0j)) + assert ae(v, (287512180321448.45408 - 4203502407932318.1156j), tol=ATOL) + assert ae(v.real, 287512180321448.45408, tol=PTOL) + assert ae(v.imag, -4203502407932318.1156, tol=PTOL) + v = fp.ei((50.0 - 50.0j)) + assert ae(v, (36128528616649268826.0 + 64648801861338741960.0j), tol=ATOL) + assert ae(v.real, 36128528616649268826.0, tol=PTOL) + assert ae(v.imag, 64648801861338741960.0, tol=PTOL) + v = fp.ei((80.0 - 80.0j)) + assert ae(v, (-3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL) + v = fp.ei((4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034621082893023 - 0.24497866324327947603j), tol=ATOL) + assert ae(v.real, -20.880034621082893023, tol=PTOL) + assert ae(v.imag, -0.24497866324327947603, tol=PTOL) + v = fp.ei((1.0 - 0.25j)) + assert ae(v, (1.8942716983721074932 - 0.67268237088273915854j), tol=ATOL) + assert ae(v.real, 1.8942716983721074932, tol=PTOL) + assert ae(v.imag, -0.67268237088273915854, tol=PTOL) + v = fp.ei((4.0 - 1.0j)) + assert ae(v, (14.806699492675420438 - 12.280015176673582616j), tol=ATOL) + assert ae(v.real, 14.806699492675420438, tol=PTOL) + assert ae(v.imag, -12.280015176673582616, tol=PTOL) + v = fp.ei((8.0 - 2.0j)) + assert ae(v, (-54.633252667426386294 - 416.34477429173650012j), tol=ATOL) + assert ae(v.real, -54.633252667426386294, tol=PTOL) + assert ae(v.imag, -416.34477429173650012, tol=PTOL) + v = fp.ei((20.0 - 5.0j)) + assert ae(v, (711836.97165402624643 + 24745247.798103247366j), tol=ATOL) + assert ae(v.real, 711836.97165402624643, tol=PTOL) + assert ae(v.imag, 24745247.798103247366, tol=PTOL) + v = fp.ei((80.0 - 20.0j)) + assert ae(v, (4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL) + v = fp.ei((120.0 - 30.0j)) + assert ae(v, (-9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL) + v = fp.ei((160.0 - 40.0j)) + assert ae(v, (-8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL) + v = fp.ei((200.0 - 50.0j)) + assert ae(v, (3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, 3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL) + v = fp.ei((320.0 - 80.0j)) + assert ae(v, (-9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_functions.py b/phivenv/Lib/site-packages/mpmath/tests/test_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..3bfe852f008173eb636c147abc83d71dbdd4d23a --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_functions.py @@ -0,0 +1,920 @@ +from mpmath.libmp import * +from mpmath import * +import random +import time +import math +import cmath + +def mpc_ae(a, b, eps=eps): + res = True + res = res and a.real.ae(b.real, eps) + res = res and a.imag.ae(b.imag, eps) + return res + +#---------------------------------------------------------------------------- +# Constants and functions +# + +tpi = "3.1415926535897932384626433832795028841971693993751058209749445923078\ +1640628620899862803482534211706798" +te = "2.71828182845904523536028747135266249775724709369995957496696762772407\ +663035354759457138217852516642743" +tdegree = "0.017453292519943295769236907684886127134428718885417254560971914\ +4017100911460344944368224156963450948221" +teuler = "0.5772156649015328606065120900824024310421593359399235988057672348\ +84867726777664670936947063291746749516" +tln2 = "0.693147180559945309417232121458176568075500134360255254120680009493\ +393621969694715605863326996418687542" +tln10 = "2.30258509299404568401799145468436420760110148862877297603332790096\ +757260967735248023599720508959829834" +tcatalan = "0.91596559417721901505460351493238411077414937428167213426649811\ +9621763019776254769479356512926115106249" +tkhinchin = "2.6854520010653064453097148354817956938203822939944629530511523\ +4555721885953715200280114117493184769800" +tglaisher = "1.2824271291006226368753425688697917277676889273250011920637400\ +2174040630885882646112973649195820237439420646" +tapery = "1.2020569031595942853997381615114499907649862923404988817922715553\ +4183820578631309018645587360933525815" +tphi = "1.618033988749894848204586834365638117720309179805762862135448622705\ +26046281890244970720720418939113748475" +tmertens = "0.26149721284764278375542683860869585905156664826119920619206421\ +3924924510897368209714142631434246651052" +ttwinprime = "0.660161815846869573927812110014555778432623360284733413319448\ +423335405642304495277143760031413839867912" + +def test_constants(): + for prec in [3, 7, 10, 15, 20, 37, 80, 100, 29]: + mp.dps = prec + assert pi == mpf(tpi) + assert e == mpf(te) + assert degree == mpf(tdegree) + assert euler == mpf(teuler) + assert ln2 == mpf(tln2) + assert ln10 == mpf(tln10) + assert catalan == mpf(tcatalan) + assert khinchin == mpf(tkhinchin) + assert glaisher == mpf(tglaisher) + assert phi == mpf(tphi) + if prec < 50: + assert mertens == mpf(tmertens) + assert twinprime == mpf(ttwinprime) + mp.dps = 15 + assert pi >= -1 + assert pi > 2 + assert pi > 3 + assert pi < 4 + +def test_exact_sqrts(): + for i in range(20000): + assert sqrt(mpf(i*i)) == i + random.seed(1) + for prec in [100, 300, 1000, 10000]: + mp.dps = prec + for i in range(20): + A = random.randint(10**(prec//2-2), 10**(prec//2-1)) + assert sqrt(mpf(A*A)) == A + mp.dps = 15 + for i in range(100): + for a in [1, 8, 25, 112307]: + assert sqrt(mpf((a*a, 2*i))) == mpf((a, i)) + assert sqrt(mpf((a*a, -2*i))) == mpf((a, -i)) + +def test_sqrt_rounding(): + for i in [2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]: + i = from_int(i) + for dps in [7, 15, 83, 106, 2000]: + mp.dps = dps + a = mpf_pow_int(mpf_sqrt(i, mp.prec, round_down), 2, mp.prec, round_down) + b = mpf_pow_int(mpf_sqrt(i, mp.prec, round_up), 2, mp.prec, round_up) + assert mpf_lt(a, i) + assert mpf_gt(b, i) + random.seed(1234) + prec = 100 + for rnd in [round_down, round_nearest, round_ceiling]: + for i in range(100): + a = mpf_rand(prec) + b = mpf_mul(a, a) + assert mpf_sqrt(b, prec, rnd) == a + # Test some extreme cases + mp.dps = 100 + a = mpf(9) + 1e-90 + b = mpf(9) - 1e-90 + mp.dps = 15 + assert sqrt(a, rounding='d') == 3 + assert sqrt(a, rounding='n') == 3 + assert sqrt(a, rounding='u') > 3 + assert sqrt(b, rounding='d') < 3 + assert sqrt(b, rounding='n') == 3 + assert sqrt(b, rounding='u') == 3 + # A worst case, from the MPFR test suite + assert sqrt(mpf('7.0503726185518891')) == mpf('2.655253776675949') + +def test_float_sqrt(): + mp.dps = 15 + # These should round identically + for x in [0, 1e-7, 0.1, 0.5, 1, 2, 3, 4, 5, 0.333, 76.19]: + assert sqrt(mpf(x)) == float(x)**0.5 + assert sqrt(-1) == 1j + assert sqrt(-2).ae(cmath.sqrt(-2)) + assert sqrt(-3).ae(cmath.sqrt(-3)) + assert sqrt(-100).ae(cmath.sqrt(-100)) + assert sqrt(1j).ae(cmath.sqrt(1j)) + assert sqrt(-1j).ae(cmath.sqrt(-1j)) + assert sqrt(math.pi + math.e*1j).ae(cmath.sqrt(math.pi + math.e*1j)) + assert sqrt(math.pi - math.e*1j).ae(cmath.sqrt(math.pi - math.e*1j)) + +def test_hypot(): + assert hypot(0, 0) == 0 + assert hypot(0, 0.33) == mpf(0.33) + assert hypot(0.33, 0) == mpf(0.33) + assert hypot(-0.33, 0) == mpf(0.33) + assert hypot(3, 4) == mpf(5) + +def test_exact_cbrt(): + for i in range(0, 20000, 200): + assert cbrt(mpf(i*i*i)) == i + random.seed(1) + for prec in [100, 300, 1000, 10000]: + mp.dps = prec + A = random.randint(10**(prec//2-2), 10**(prec//2-1)) + assert cbrt(mpf(A*A*A)) == A + mp.dps = 15 + +def test_exp(): + assert exp(0) == 1 + assert exp(10000).ae(mpf('8.8068182256629215873e4342')) + assert exp(-10000).ae(mpf('1.1354838653147360985e-4343')) + a = exp(mpf((1, 8198646019315405, -53, 53))) + assert(a.bc == bitcount(a.man)) + mp.prec = 67 + a = exp(mpf((1, 1781864658064754565, -60, 61))) + assert(a.bc == bitcount(a.man)) + mp.prec = 53 + assert exp(ln2 * 10).ae(1024) + assert exp(2+2j).ae(cmath.exp(2+2j)) + +def test_issue_73(): + mp.dps = 512 + a = exp(-1) + b = exp(1) + mp.dps = 15 + assert (+a).ae(0.36787944117144233) + assert (+b).ae(2.7182818284590451) + +def test_log(): + mp.dps = 15 + assert log(1) == 0 + for x in [0.5, 1.5, 2.0, 3.0, 100, 10**50, 1e-50]: + assert log(x).ae(math.log(x)) + assert log(x, x) == 1 + assert log(1024, 2) == 10 + assert log(10**1234, 10) == 1234 + assert log(2+2j).ae(cmath.log(2+2j)) + # Accuracy near 1 + assert (log(0.6+0.8j).real*10**17).ae(2.2204460492503131) + assert (log(0.6-0.8j).real*10**17).ae(2.2204460492503131) + assert (log(0.8-0.6j).real*10**17).ae(2.2204460492503131) + assert (log(1+1e-8j).real*10**16).ae(0.5) + assert (log(1-1e-8j).real*10**16).ae(0.5) + assert (log(-1+1e-8j).real*10**16).ae(0.5) + assert (log(-1-1e-8j).real*10**16).ae(0.5) + assert (log(1j+1e-8).real*10**16).ae(0.5) + assert (log(1j-1e-8).real*10**16).ae(0.5) + assert (log(-1j+1e-8).real*10**16).ae(0.5) + assert (log(-1j-1e-8).real*10**16).ae(0.5) + assert (log(1+1e-40j).real*10**80).ae(0.5) + assert (log(1j+1e-40).real*10**80).ae(0.5) + # Huge + assert log(ldexp(1.234,10**20)).ae(log(2)*1e20) + assert log(ldexp(1.234,10**200)).ae(log(2)*1e200) + # Some special values + assert log(mpc(0,0)) == mpc(-inf,0) + assert isnan(log(mpc(nan,0)).real) + assert isnan(log(mpc(nan,0)).imag) + assert isnan(log(mpc(0,nan)).real) + assert isnan(log(mpc(0,nan)).imag) + assert isnan(log(mpc(nan,1)).real) + assert isnan(log(mpc(nan,1)).imag) + assert isnan(log(mpc(1,nan)).real) + assert isnan(log(mpc(1,nan)).imag) + +def test_trig_hyperb_basic(): + for x in (list(range(100)) + list(range(-100,0))): + t = x / 4.1 + assert cos(mpf(t)).ae(math.cos(t)) + assert sin(mpf(t)).ae(math.sin(t)) + assert tan(mpf(t)).ae(math.tan(t)) + assert cosh(mpf(t)).ae(math.cosh(t)) + assert sinh(mpf(t)).ae(math.sinh(t)) + assert tanh(mpf(t)).ae(math.tanh(t)) + assert sin(1+1j).ae(cmath.sin(1+1j)) + assert sin(-4-3.6j).ae(cmath.sin(-4-3.6j)) + assert cos(1+1j).ae(cmath.cos(1+1j)) + assert cos(-4-3.6j).ae(cmath.cos(-4-3.6j)) + +def test_degrees(): + assert cos(0*degree) == 1 + assert cos(90*degree).ae(0) + assert cos(180*degree).ae(-1) + assert cos(270*degree).ae(0) + assert cos(360*degree).ae(1) + assert sin(0*degree) == 0 + assert sin(90*degree).ae(1) + assert sin(180*degree).ae(0) + assert sin(270*degree).ae(-1) + assert sin(360*degree).ae(0) + +def random_complexes(N): + random.seed(1) + a = [] + for i in range(N): + x1 = random.uniform(-10, 10) + y1 = random.uniform(-10, 10) + x2 = random.uniform(-10, 10) + y2 = random.uniform(-10, 10) + z1 = complex(x1, y1) + z2 = complex(x2, y2) + a.append((z1, z2)) + return a + +def test_complex_powers(): + for dps in [15, 30, 100]: + # Check accuracy for complex square root + mp.dps = dps + a = mpc(1j)**0.5 + assert a.real == a.imag == mpf(2)**0.5 / 2 + mp.dps = 15 + random.seed(1) + for (z1, z2) in random_complexes(100): + assert (mpc(z1)**mpc(z2)).ae(z1**z2, 1e-12) + assert (e**(-pi*1j)).ae(-1) + mp.dps = 50 + assert (e**(-pi*1j)).ae(-1) + mp.dps = 15 + +def test_complex_sqrt_accuracy(): + def test_mpc_sqrt(lst): + for a, b in lst: + z = mpc(a + j*b) + assert mpc_ae(sqrt(z*z), z) + z = mpc(-a + j*b) + assert mpc_ae(sqrt(z*z), -z) + z = mpc(a - j*b) + assert mpc_ae(sqrt(z*z), z) + z = mpc(-a - j*b) + assert mpc_ae(sqrt(z*z), -z) + random.seed(2) + N = 10 + mp.dps = 30 + dps = mp.dps + test_mpc_sqrt([(random.uniform(0, 10),random.uniform(0, 10)) for i in range(N)]) + test_mpc_sqrt([(i + 0.1, (i + 0.2)*10**i) for i in range(N)]) + mp.dps = 15 + +def test_atan(): + mp.dps = 15 + assert atan(-2.3).ae(math.atan(-2.3)) + assert atan(1e-50) == 1e-50 + assert atan(1e50).ae(pi/2) + assert atan(-1e-50) == -1e-50 + assert atan(-1e50).ae(-pi/2) + assert atan(10**1000).ae(pi/2) + for dps in [25, 70, 100, 300, 1000]: + mp.dps = dps + assert (4*atan(1)).ae(pi) + mp.dps = 15 + pi2 = pi/2 + assert atan(mpc(inf,-1)).ae(pi2) + assert atan(mpc(inf,0)).ae(pi2) + assert atan(mpc(inf,1)).ae(pi2) + assert atan(mpc(1,inf)).ae(pi2) + assert atan(mpc(0,inf)).ae(pi2) + assert atan(mpc(-1,inf)).ae(-pi2) + assert atan(mpc(-inf,1)).ae(-pi2) + assert atan(mpc(-inf,0)).ae(-pi2) + assert atan(mpc(-inf,-1)).ae(-pi2) + assert atan(mpc(-1,-inf)).ae(-pi2) + assert atan(mpc(0,-inf)).ae(-pi2) + assert atan(mpc(1,-inf)).ae(pi2) + +def test_atan2(): + mp.dps = 15 + assert atan2(1,1).ae(pi/4) + assert atan2(1,-1).ae(3*pi/4) + assert atan2(-1,-1).ae(-3*pi/4) + assert atan2(-1,1).ae(-pi/4) + assert atan2(-1,0).ae(-pi/2) + assert atan2(1,0).ae(pi/2) + assert atan2(0,0) == 0 + assert atan2(inf,0).ae(pi/2) + assert atan2(-inf,0).ae(-pi/2) + assert isnan(atan2(inf,inf)) + assert isnan(atan2(-inf,inf)) + assert isnan(atan2(inf,-inf)) + assert isnan(atan2(3,nan)) + assert isnan(atan2(nan,3)) + assert isnan(atan2(0,nan)) + assert isnan(atan2(nan,0)) + assert atan2(0,inf) == 0 + assert atan2(0,-inf).ae(pi) + assert atan2(10,inf) == 0 + assert atan2(-10,inf) == 0 + assert atan2(-10,-inf).ae(-pi) + assert atan2(10,-inf).ae(pi) + assert atan2(inf,10).ae(pi/2) + assert atan2(inf,-10).ae(pi/2) + assert atan2(-inf,10).ae(-pi/2) + assert atan2(-inf,-10).ae(-pi/2) + +def test_areal_inverses(): + assert asin(mpf(0)) == 0 + assert asinh(mpf(0)) == 0 + assert acosh(mpf(1)) == 0 + assert isinstance(asin(mpf(0.5)), mpf) + assert isinstance(asin(mpf(2.0)), mpc) + assert isinstance(acos(mpf(0.5)), mpf) + assert isinstance(acos(mpf(2.0)), mpc) + assert isinstance(atanh(mpf(0.1)), mpf) + assert isinstance(atanh(mpf(1.1)), mpc) + + random.seed(1) + for i in range(50): + x = random.uniform(0, 1) + assert asin(mpf(x)).ae(math.asin(x)) + assert acos(mpf(x)).ae(math.acos(x)) + + x = random.uniform(-10, 10) + assert asinh(mpf(x)).ae(cmath.asinh(x).real) + assert isinstance(asinh(mpf(x)), mpf) + x = random.uniform(1, 10) + assert acosh(mpf(x)).ae(cmath.acosh(x).real) + assert isinstance(acosh(mpf(x)), mpf) + x = random.uniform(-10, 0.999) + assert isinstance(acosh(mpf(x)), mpc) + + x = random.uniform(-1, 1) + assert atanh(mpf(x)).ae(cmath.atanh(x).real) + assert isinstance(atanh(mpf(x)), mpf) + + dps = mp.dps + mp.dps = 300 + assert isinstance(asin(0.5), mpf) + mp.dps = 1000 + assert asin(1).ae(pi/2) + assert asin(-1).ae(-pi/2) + mp.dps = dps + +def test_invhyperb_inaccuracy(): + mp.dps = 15 + assert (asinh(1e-5)*10**5).ae(0.99999999998333333) + assert (asinh(1e-10)*10**10).ae(1) + assert (asinh(1e-50)*10**50).ae(1) + assert (asinh(-1e-5)*10**5).ae(-0.99999999998333333) + assert (asinh(-1e-10)*10**10).ae(-1) + assert (asinh(-1e-50)*10**50).ae(-1) + assert asinh(10**20).ae(46.744849040440862) + assert asinh(-10**20).ae(-46.744849040440862) + assert (tanh(1e-10)*10**10).ae(1) + assert (tanh(-1e-10)*10**10).ae(-1) + assert (atanh(1e-10)*10**10).ae(1) + assert (atanh(-1e-10)*10**10).ae(-1) + +def test_complex_functions(): + for x in (list(range(10)) + list(range(-10,0))): + for y in (list(range(10)) + list(range(-10,0))): + z = complex(x, y)/4.3 + 0.01j + assert exp(mpc(z)).ae(cmath.exp(z)) + assert log(mpc(z)).ae(cmath.log(z)) + assert cos(mpc(z)).ae(cmath.cos(z)) + assert sin(mpc(z)).ae(cmath.sin(z)) + assert tan(mpc(z)).ae(cmath.tan(z)) + assert sinh(mpc(z)).ae(cmath.sinh(z)) + assert cosh(mpc(z)).ae(cmath.cosh(z)) + assert tanh(mpc(z)).ae(cmath.tanh(z)) + +def test_complex_inverse_functions(): + mp.dps = 15 + iv.dps = 15 + for (z1, z2) in random_complexes(30): + # apparently cmath uses a different branch, so we + # can't use it for comparison + assert sinh(asinh(z1)).ae(z1) + # + assert acosh(z1).ae(cmath.acosh(z1)) + assert atanh(z1).ae(cmath.atanh(z1)) + assert atan(z1).ae(cmath.atan(z1)) + # the reason we set a big eps here is that the cmath + # functions are inaccurate + assert asin(z1).ae(cmath.asin(z1), rel_eps=1e-12) + assert acos(z1).ae(cmath.acos(z1), rel_eps=1e-12) + one = mpf(1) + for i in range(-9, 10, 3): + for k in range(-9, 10, 3): + a = 0.9*j*10**k + 0.8*one*10**i + b = cos(acos(a)) + assert b.ae(a) + b = sin(asin(a)) + assert b.ae(a) + one = mpf(1) + err = 2*10**-15 + for i in range(-9, 9, 3): + for k in range(-9, 9, 3): + a = -0.9*10**k + j*0.8*one*10**i + b = cosh(acosh(a)) + assert b.ae(a, err) + b = sinh(asinh(a)) + assert b.ae(a, err) + +def test_reciprocal_functions(): + assert sec(3).ae(-1.01010866590799375) + assert csc(3).ae(7.08616739573718592) + assert cot(3).ae(-7.01525255143453347) + assert sech(3).ae(0.0993279274194332078) + assert csch(3).ae(0.0998215696688227329) + assert coth(3).ae(1.00496982331368917) + assert asec(3).ae(1.23095941734077468) + assert acsc(3).ae(0.339836909454121937) + assert acot(3).ae(0.321750554396642193) + assert asech(0.5).ae(1.31695789692481671) + assert acsch(3).ae(0.327450150237258443) + assert acoth(3).ae(0.346573590279972655) + assert acot(0).ae(1.5707963267948966192) + assert acoth(0).ae(1.5707963267948966192j) + +def test_ldexp(): + mp.dps = 15 + assert ldexp(mpf(2.5), 0) == 2.5 + assert ldexp(mpf(2.5), -1) == 1.25 + assert ldexp(mpf(2.5), 2) == 10 + assert ldexp(mpf('inf'), 3) == mpf('inf') + +def test_frexp(): + mp.dps = 15 + assert frexp(0) == (0.0, 0) + assert frexp(9) == (0.5625, 4) + assert frexp(1) == (0.5, 1) + assert frexp(0.2) == (0.8, -2) + assert frexp(1000) == (0.9765625, 10) + +def test_aliases(): + assert ln(7) == log(7) + assert log10(3.75) == log(3.75,10) + assert degrees(5.6) == 5.6 / degree + assert radians(5.6) == 5.6 * degree + assert power(-1,0.5) == j + assert fmod(25,7) == 4.0 and isinstance(fmod(25,7), mpf) + +def test_arg_sign(): + assert arg(3) == 0 + assert arg(-3).ae(pi) + assert arg(j).ae(pi/2) + assert arg(-j).ae(-pi/2) + assert arg(0) == 0 + assert isnan(atan2(3,nan)) + assert isnan(atan2(nan,3)) + assert isnan(atan2(0,nan)) + assert isnan(atan2(nan,0)) + assert isnan(atan2(nan,nan)) + assert arg(inf) == 0 + assert arg(-inf).ae(pi) + assert isnan(arg(nan)) + #assert arg(inf*j).ae(pi/2) + assert sign(0) == 0 + assert sign(3) == 1 + assert sign(-3) == -1 + assert sign(inf) == 1 + assert sign(-inf) == -1 + assert isnan(sign(nan)) + assert sign(j) == j + assert sign(-3*j) == -j + assert sign(1+j).ae((1+j)/sqrt(2)) + +def test_misc_bugs(): + # test that this doesn't raise an exception + mp.dps = 1000 + log(1302) + mp.dps = 15 + +def test_arange(): + assert arange(10) == [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0'), + mpf('4.0'), mpf('5.0'), mpf('6.0'), mpf('7.0'), + mpf('8.0'), mpf('9.0')] + assert arange(-5, 5) == [mpf('-5.0'), mpf('-4.0'), mpf('-3.0'), + mpf('-2.0'), mpf('-1.0'), mpf('0.0'), + mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')] + assert arange(0, 1, 0.1) == [mpf('0.0'), mpf('0.10000000000000001'), + mpf('0.20000000000000001'), + mpf('0.30000000000000004'), + mpf('0.40000000000000002'), + mpf('0.5'), mpf('0.60000000000000009'), + mpf('0.70000000000000007'), + mpf('0.80000000000000004'), + mpf('0.90000000000000002')] + assert arange(17, -9, -3) == [mpf('17.0'), mpf('14.0'), mpf('11.0'), + mpf('8.0'), mpf('5.0'), mpf('2.0'), + mpf('-1.0'), mpf('-4.0'), mpf('-7.0')] + assert arange(0.2, 0.1, -0.1) == [mpf('0.20000000000000001')] + assert arange(0) == [] + assert arange(1000, -1) == [] + assert arange(-1.23, 3.21, -0.0000001) == [] + +def test_linspace(): + assert linspace(2, 9, 7) == [mpf('2.0'), mpf('3.166666666666667'), + mpf('4.3333333333333339'), mpf('5.5'), mpf('6.666666666666667'), + mpf('7.8333333333333339'), mpf('9.0')] + assert linspace(2, 9, 7, endpoint=0) == [mpf('2.0'), mpf('3.0'), mpf('4.0'), + mpf('5.0'), mpf('6.0'), mpf('7.0'), mpf('8.0')] + assert linspace(2, 7, 1) == [mpf(2)] + +def test_float_cbrt(): + mp.dps = 30 + for a in arange(0,10,0.1): + assert cbrt(a*a*a).ae(a, eps) + assert cbrt(-1).ae(0.5 + j*sqrt(3)/2) + one_third = mpf(1)/3 + for a in arange(0,10,2.7) + [0.1 + 10**5]: + a = mpc(a + 1.1j) + r1 = cbrt(a) + mp.dps += 10 + r2 = pow(a, one_third) + mp.dps -= 10 + assert r1.ae(r2, eps) + mp.dps = 100 + for n in range(100, 301, 100): + w = 10**n + j*10**-3 + z = w*w*w + r = cbrt(z) + assert mpc_ae(r, w, eps) + mp.dps = 15 + +def test_root(): + mp.dps = 30 + random.seed(1) + a = random.randint(0, 10000) + p = a*a*a + r = nthroot(mpf(p), 3) + assert r == a + for n in range(4, 10): + p = p*a + assert nthroot(mpf(p), n) == a + mp.dps = 40 + for n in range(10, 5000, 100): + for a in [random.random()*10000, random.random()*10**100]: + r = nthroot(a, n) + r1 = pow(a, mpf(1)/n) + assert r.ae(r1) + r = nthroot(a, -n) + r1 = pow(a, -mpf(1)/n) + assert r.ae(r1) + # XXX: this is broken right now + # tests for nthroot rounding + for rnd in ['nearest', 'up', 'down']: + mp.rounding = rnd + for n in [-5, -3, 3, 5]: + prec = 50 + for i in range(10): + mp.prec = prec + a = rand() + mp.prec = 2*prec + b = a**n + mp.prec = prec + r = nthroot(b, n) + assert r == a + mp.dps = 30 + for n in range(3, 21): + a = (random.random() + j*random.random()) + assert nthroot(a, n).ae(pow(a, mpf(1)/n)) + assert mpc_ae(nthroot(a, n), pow(a, mpf(1)/n)) + a = (random.random()*10**100 + j*random.random()) + r = nthroot(a, n) + mp.dps += 4 + r1 = pow(a, mpf(1)/n) + mp.dps -= 4 + assert r.ae(r1) + assert mpc_ae(r, r1, eps) + r = nthroot(a, -n) + mp.dps += 4 + r1 = pow(a, -mpf(1)/n) + mp.dps -= 4 + assert r.ae(r1) + assert mpc_ae(r, r1, eps) + mp.dps = 15 + assert nthroot(4, 1) == 4 + assert nthroot(4, 0) == 1 + assert nthroot(4, -1) == 0.25 + assert nthroot(inf, 1) == inf + assert nthroot(inf, 2) == inf + assert nthroot(inf, 3) == inf + assert nthroot(inf, -1) == 0 + assert nthroot(inf, -2) == 0 + assert nthroot(inf, -3) == 0 + assert nthroot(j, 1) == j + assert nthroot(j, 0) == 1 + assert nthroot(j, -1) == -j + assert isnan(nthroot(nan, 1)) + assert isnan(nthroot(nan, 0)) + assert isnan(nthroot(nan, -1)) + assert isnan(nthroot(inf, 0)) + assert root(2,3) == nthroot(2,3) + assert root(16,4,0) == 2 + assert root(16,4,1) == 2j + assert root(16,4,2) == -2 + assert root(16,4,3) == -2j + assert root(16,4,4) == 2 + assert root(-125,3,1) == -5 + +def test_issue_136(): + for dps in [20, 80]: + mp.dps = dps + r = nthroot(mpf('-1e-20'), 4) + assert r.ae(mpf(10)**(-5) * (1 + j) * mpf(2)**(-0.5)) + mp.dps = 80 + assert nthroot('-1e-3', 4).ae(mpf(10)**(-3./4) * (1 + j)/sqrt(2)) + assert nthroot('-1e-6', 4).ae((1 + j)/(10 * sqrt(20))) + # Check that this doesn't take eternity to compute + mp.dps = 20 + assert nthroot('-1e100000000', 4).ae((1+j)*mpf('1e25000000')/sqrt(2)) + mp.dps = 15 + +def test_mpcfun_real_imag(): + mp.dps = 15 + x = mpf(0.3) + y = mpf(0.4) + assert exp(mpc(x,0)) == exp(x) + assert exp(mpc(0,y)) == mpc(cos(y),sin(y)) + assert cos(mpc(x,0)) == cos(x) + assert sin(mpc(x,0)) == sin(x) + assert cos(mpc(0,y)) == cosh(y) + assert sin(mpc(0,y)) == mpc(0,sinh(y)) + assert cospi(mpc(x,0)) == cospi(x) + assert sinpi(mpc(x,0)) == sinpi(x) + assert cospi(mpc(0,y)).ae(cosh(pi*y)) + assert sinpi(mpc(0,y)).ae(mpc(0,sinh(pi*y))) + c, s = cospi_sinpi(mpc(x,0)) + assert c == cospi(x) + assert s == sinpi(x) + c, s = cospi_sinpi(mpc(0,y)) + assert c.ae(cosh(pi*y)) + assert s.ae(mpc(0,sinh(pi*y))) + c, s = cos_sin(mpc(x,0)) + assert c == cos(x) + assert s == sin(x) + c, s = cos_sin(mpc(0,y)) + assert c == cosh(y) + assert s == mpc(0,sinh(y)) + +def test_perturbation_rounding(): + mp.dps = 100 + a = pi/10**50 + b = -pi/10**50 + c = 1 + a + d = 1 + b + mp.dps = 15 + assert exp(a) == 1 + assert exp(a, rounding='c') > 1 + assert exp(b, rounding='c') == 1 + assert exp(a, rounding='f') == 1 + assert exp(b, rounding='f') < 1 + assert cos(a) == 1 + assert cos(a, rounding='c') == 1 + assert cos(b, rounding='c') == 1 + assert cos(a, rounding='f') < 1 + assert cos(b, rounding='f') < 1 + for f in [sin, atan, asinh, tanh]: + assert f(a) == +a + assert f(a, rounding='c') > a + assert f(a, rounding='f') < a + assert f(b) == +b + assert f(b, rounding='c') > b + assert f(b, rounding='f') < b + for f in [asin, tan, sinh, atanh]: + assert f(a) == +a + assert f(b) == +b + assert f(a, rounding='c') > a + assert f(b, rounding='c') > b + assert f(a, rounding='f') < a + assert f(b, rounding='f') < b + assert ln(c) == +a + assert ln(d) == +b + assert ln(c, rounding='c') > a + assert ln(c, rounding='f') < a + assert ln(d, rounding='c') > b + assert ln(d, rounding='f') < b + assert cosh(a) == 1 + assert cosh(b) == 1 + assert cosh(a, rounding='c') > 1 + assert cosh(b, rounding='c') > 1 + assert cosh(a, rounding='f') == 1 + assert cosh(b, rounding='f') == 1 + +def test_integer_parts(): + assert floor(3.2) == 3 + assert ceil(3.2) == 4 + assert floor(3.2+5j) == 3+5j + assert ceil(3.2+5j) == 4+5j + +def test_complex_parts(): + assert fabs('3') == 3 + assert fabs(3+4j) == 5 + assert re(3) == 3 + assert re(1+4j) == 1 + assert im(3) == 0 + assert im(1+4j) == 4 + assert conj(3) == 3 + assert conj(3+4j) == 3-4j + assert mpf(3).conjugate() == 3 + +def test_cospi_sinpi(): + assert sinpi(0) == 0 + assert sinpi(0.5) == 1 + assert sinpi(1) == 0 + assert sinpi(1.5) == -1 + assert sinpi(2) == 0 + assert sinpi(2.5) == 1 + assert sinpi(-0.5) == -1 + assert cospi(0) == 1 + assert cospi(0.5) == 0 + assert cospi(1) == -1 + assert cospi(1.5) == 0 + assert cospi(2) == 1 + assert cospi(2.5) == 0 + assert cospi(-0.5) == 0 + assert cospi(100000000000.25).ae(sqrt(2)/2) + a = cospi(2+3j) + assert a.real.ae(cos((2+3j)*pi).real) + assert a.imag == 0 + b = sinpi(2+3j) + assert b.imag.ae(sin((2+3j)*pi).imag) + assert b.real == 0 + mp.dps = 35 + x1 = mpf(10000) - mpf('1e-15') + x2 = mpf(10000) + mpf('1e-15') + x3 = mpf(10000.5) - mpf('1e-15') + x4 = mpf(10000.5) + mpf('1e-15') + x5 = mpf(10001) - mpf('1e-15') + x6 = mpf(10001) + mpf('1e-15') + x7 = mpf(10001.5) - mpf('1e-15') + x8 = mpf(10001.5) + mpf('1e-15') + mp.dps = 15 + M = 10**15 + assert (sinpi(x1)*M).ae(-pi) + assert (sinpi(x2)*M).ae(pi) + assert (cospi(x3)*M).ae(pi) + assert (cospi(x4)*M).ae(-pi) + assert (sinpi(x5)*M).ae(pi) + assert (sinpi(x6)*M).ae(-pi) + assert (cospi(x7)*M).ae(-pi) + assert (cospi(x8)*M).ae(pi) + assert 0.999 < cospi(x1, rounding='d') < 1 + assert 0.999 < cospi(x2, rounding='d') < 1 + assert 0.999 < sinpi(x3, rounding='d') < 1 + assert 0.999 < sinpi(x4, rounding='d') < 1 + assert -1 < cospi(x5, rounding='d') < -0.999 + assert -1 < cospi(x6, rounding='d') < -0.999 + assert -1 < sinpi(x7, rounding='d') < -0.999 + assert -1 < sinpi(x8, rounding='d') < -0.999 + assert (sinpi(1e-15)*M).ae(pi) + assert (sinpi(-1e-15)*M).ae(-pi) + assert cospi(1e-15) == 1 + assert cospi(1e-15, rounding='d') < 1 + +def test_expj(): + assert expj(0) == 1 + assert expj(1).ae(exp(j)) + assert expj(j).ae(exp(-1)) + assert expj(1+j).ae(exp(j*(1+j))) + assert expjpi(0) == 1 + assert expjpi(1).ae(exp(j*pi)) + assert expjpi(j).ae(exp(-pi)) + assert expjpi(1+j).ae(exp(j*pi*(1+j))) + assert expjpi(-10**15 * j).ae('2.22579818340535731e+1364376353841841') + +def test_sinc(): + assert sinc(0) == sincpi(0) == 1 + assert sinc(inf) == sincpi(inf) == 0 + assert sinc(-inf) == sincpi(-inf) == 0 + assert sinc(2).ae(0.45464871341284084770) + assert sinc(2+3j).ae(0.4463290318402435457-2.7539470277436474940j) + assert sincpi(2) == 0 + assert sincpi(1.5).ae(-0.212206590789193781) + +def test_fibonacci(): + mp.dps = 15 + assert [fibonacci(n) for n in range(-5, 10)] == \ + [5, -3, 2, -1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21, 34] + assert fib(2.5).ae(1.4893065462657091) + assert fib(3+4j).ae(-5248.51130728372 - 14195.962288353j) + assert fib(1000).ae(4.3466557686937455e+208) + assert str(fib(10**100)) == '6.24499112864607e+2089876402499787337692720892375554168224592399182109535392875613974104853496745963277658556235103534' + mp.dps = 2100 + a = fib(10000) + assert a % 10**10 == 9947366875 + mp.dps = 15 + assert fibonacci(inf) == inf + assert fib(3+0j) == 2 + +def test_call_with_dps(): + mp.dps = 15 + assert abs(exp(1, dps=30)-e(dps=35)) < 1e-29 + +def test_tanh(): + mp.dps = 15 + assert tanh(0) == 0 + assert tanh(inf) == 1 + assert tanh(-inf) == -1 + assert isnan(tanh(nan)) + assert tanh(mpc('inf', '0')) == 1 + +def test_atanh(): + mp.dps = 15 + assert atanh(0) == 0 + assert atanh(0.5).ae(0.54930614433405484570) + assert atanh(-0.5).ae(-0.54930614433405484570) + assert atanh(1) == inf + assert atanh(-1) == -inf + assert isnan(atanh(nan)) + assert isinstance(atanh(1), mpf) + assert isinstance(atanh(-1), mpf) + # Limits at infinity + jpi2 = j*pi/2 + assert atanh(inf).ae(-jpi2) + assert atanh(-inf).ae(jpi2) + assert atanh(mpc(inf,-1)).ae(-jpi2) + assert atanh(mpc(inf,0)).ae(-jpi2) + assert atanh(mpc(inf,1)).ae(jpi2) + assert atanh(mpc(1,inf)).ae(jpi2) + assert atanh(mpc(0,inf)).ae(jpi2) + assert atanh(mpc(-1,inf)).ae(jpi2) + assert atanh(mpc(-inf,1)).ae(jpi2) + assert atanh(mpc(-inf,0)).ae(jpi2) + assert atanh(mpc(-inf,-1)).ae(-jpi2) + assert atanh(mpc(-1,-inf)).ae(-jpi2) + assert atanh(mpc(0,-inf)).ae(-jpi2) + assert atanh(mpc(1,-inf)).ae(-jpi2) + +def test_expm1(): + mp.dps = 15 + assert expm1(0) == 0 + assert expm1(3).ae(exp(3)-1) + assert expm1(inf) == inf + assert expm1(1e-50).ae(1e-50) + assert (expm1(1e-10)*1e10).ae(1.00000000005) + +def test_log1p(): + mp.dps = 15 + assert log1p(0) == 0 + assert log1p(3).ae(log(1+3)) + assert log1p(inf) == inf + assert log1p(1e-50).ae(1e-50) + assert (log1p(1e-10)*1e10).ae(0.99999999995) + +def test_powm1(): + mp.dps = 15 + assert powm1(2,3) == 7 + assert powm1(-1,2) == 0 + assert powm1(-1,0) == 0 + assert powm1(-2,0) == 0 + assert powm1(3+4j,0) == 0 + assert powm1(0,1) == -1 + assert powm1(0,0) == 0 + assert powm1(1,0) == 0 + assert powm1(1,2) == 0 + assert powm1(1,3+4j) == 0 + assert powm1(1,5) == 0 + assert powm1(j,4) == 0 + assert powm1(-j,4) == 0 + assert (powm1(2,1e-100)*1e100).ae(ln2) + assert powm1(2,'1e-100000000000') != 0 + assert (powm1(fadd(1,1e-100,exact=True), 5)*1e100).ae(5) + +def test_unitroots(): + assert unitroots(1) == [1] + assert unitroots(2) == [1, -1] + a, b, c = unitroots(3) + assert a == 1 + assert b.ae(-0.5 + 0.86602540378443864676j) + assert c.ae(-0.5 - 0.86602540378443864676j) + assert unitroots(1, primitive=True) == [1] + assert unitroots(2, primitive=True) == [-1] + assert unitroots(3, primitive=True) == unitroots(3)[1:] + assert unitroots(4, primitive=True) == [j, -j] + assert len(unitroots(17, primitive=True)) == 16 + assert len(unitroots(16, primitive=True)) == 8 + +def test_cyclotomic(): + mp.dps = 15 + assert [cyclotomic(n,1) for n in range(31)] == [1,0,2,3,2,5,1,7,2,3,1,11,1,13,1,1,2,17,1,19,1,1,1,23,1,5,1,3,1,29,1] + assert [cyclotomic(n,-1) for n in range(31)] == [1,-2,0,1,2,1,3,1,2,1,5,1,1,1,7,1,2,1,3,1,1,1,11,1,1,1,13,1,1,1,1] + assert [cyclotomic(n,j) for n in range(21)] == [1,-1+j,1+j,j,0,1,-j,j,2,-j,1,j,3,1,-j,1,2,1,j,j,5] + assert [cyclotomic(n,-j) for n in range(21)] == [1,-1-j,1-j,-j,0,1,j,-j,2,j,1,-j,3,1,j,1,2,1,-j,-j,5] + assert cyclotomic(1624,j) == 1 + assert cyclotomic(33600,j) == 1 + u = sqrt(j, prec=500) + assert cyclotomic(8, u).ae(0) + assert cyclotomic(30, u).ae(5.8284271247461900976) + assert cyclotomic(2040, u).ae(1) + assert cyclotomic(0,2.5) == 1 + assert cyclotomic(1,2.5) == 2.5-1 + assert cyclotomic(2,2.5) == 2.5+1 + assert cyclotomic(3,2.5) == 2.5**2 + 2.5 + 1 + assert cyclotomic(7,2.5) == 406.234375 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_functions2.py b/phivenv/Lib/site-packages/mpmath/tests/test_functions2.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2d57fcec9be0db4d921b013f24fd6a5e0e9930 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_functions2.py @@ -0,0 +1,2384 @@ +import math +import pytest +from mpmath import * + +def test_bessel(): + mp.dps = 15 + assert j0(1).ae(0.765197686557966551) + assert j0(pi).ae(-0.304242177644093864) + assert j0(1000).ae(0.0247866861524201746) + assert j0(-25).ae(0.0962667832759581162) + assert j1(1).ae(0.440050585744933516) + assert j1(pi).ae(0.284615343179752757) + assert j1(1000).ae(0.00472831190708952392) + assert j1(-25).ae(0.125350249580289905) + assert besselj(5,1).ae(0.000249757730211234431) + assert besselj(5+0j,1).ae(0.000249757730211234431) + assert besselj(5,pi).ae(0.0521411843671184747) + assert besselj(5,1000).ae(0.00502540694523318607) + assert besselj(5,-25).ae(0.0660079953984229934) + assert besselj(-3,2).ae(-0.128943249474402051) + assert besselj(-4,2).ae(0.0339957198075684341) + assert besselj(3,3+2j).ae(0.424718794929639595942 + 0.625665327745785804812j) + assert besselj(0.25,4).ae(-0.374760630804249715) + assert besselj(1+2j,3+4j).ae(0.319247428741872131 - 0.669557748880365678j) + assert (besselj(3, 10**10) * 10**5).ae(0.76765081748139204023) + assert bessely(-0.5, 0) == 0 + assert bessely(0.5, 0) == -inf + assert bessely(1.5, 0) == -inf + assert bessely(0,0) == -inf + assert bessely(-0.4, 0) == -inf + assert bessely(-0.6, 0) == inf + assert bessely(-1, 0) == inf + assert bessely(-1.4, 0) == inf + assert bessely(-1.6, 0) == -inf + assert bessely(-1, 0) == inf + assert bessely(-2, 0) == -inf + assert bessely(-3, 0) == inf + assert bessely(0.5, 0) == -inf + assert bessely(1, 0) == -inf + assert bessely(1.5, 0) == -inf + assert bessely(2, 0) == -inf + assert bessely(2.5, 0) == -inf + assert bessely(3, 0) == -inf + assert bessely(0,0.5).ae(-0.44451873350670655715) + assert bessely(1,0.5).ae(-1.4714723926702430692) + assert bessely(-1,0.5).ae(1.4714723926702430692) + assert bessely(3.5,0.5).ae(-138.86400867242488443) + assert bessely(0,3+4j).ae(4.6047596915010138655-8.8110771408232264208j) + assert bessely(0,j).ae(-0.26803248203398854876+1.26606587775200833560j) + assert (bessely(3, 10**10) * 10**5).ae(0.21755917537013204058) + assert besseli(0,0) == 1 + assert besseli(1,0) == 0 + assert besseli(2,0) == 0 + assert besseli(-1,0) == 0 + assert besseli(-2,0) == 0 + assert besseli(0,0.5).ae(1.0634833707413235193) + assert besseli(1,0.5).ae(0.25789430539089631636) + assert besseli(-1,0.5).ae(0.25789430539089631636) + assert besseli(3.5,0.5).ae(0.00068103597085793815863) + assert besseli(0,3+4j).ae(-3.3924877882755196097-1.3239458916287264815j) + assert besseli(0,j).ae(besselj(0,1)) + assert (besseli(3, 10**10) * mpf(10)**(-4342944813)).ae(4.2996028505491271875) + assert besselk(0,0) == inf + assert besselk(1,0) == inf + assert besselk(2,0) == inf + assert besselk(-1,0) == inf + assert besselk(-2,0) == inf + assert besselk(0,0.5).ae(0.92441907122766586178) + assert besselk(1,0.5).ae(1.6564411200033008937) + assert besselk(-1,0.5).ae(1.6564411200033008937) + assert besselk(3.5,0.5).ae(207.48418747548460607) + assert besselk(0,3+4j).ae(-0.007239051213570155013+0.026510418350267677215j) + assert besselk(0,j).ae(-0.13863371520405399968-1.20196971531720649914j) + assert (besselk(3, 10**10) * mpf(10)**4342944824).ae(1.1628981033356187851) + # test for issue 331, bug reported by Michael Hartmann + for n in range(10,100,10): + mp.dps = n + assert besseli(91.5,24.7708).ae("4.00830632138673963619656140653537080438462342928377020695738635559218797348548092636896796324190271316137982810144874264e-41") + +def test_bessel_zeros(): + mp.dps = 15 + assert besseljzero(0,1).ae(2.40482555769577276869) + assert besseljzero(2,1).ae(5.1356223018406825563) + assert besseljzero(1,50).ae(157.86265540193029781) + assert besseljzero(10,1).ae(14.475500686554541220) + assert besseljzero(0.5,3).ae(9.4247779607693797153) + assert besseljzero(2,1,1).ae(3.0542369282271403228) + assert besselyzero(0,1).ae(0.89357696627916752158) + assert besselyzero(2,1).ae(3.3842417671495934727) + assert besselyzero(1,50).ae(156.29183520147840108) + assert besselyzero(10,1).ae(12.128927704415439387) + assert besselyzero(0.5,3).ae(7.8539816339744830962) + assert besselyzero(2,1,1).ae(5.0025829314460639452) + +def test_hankel(): + mp.dps = 15 + assert hankel1(0,0.5).ae(0.93846980724081290423-0.44451873350670655715j) + assert hankel1(1,0.5).ae(0.2422684576748738864-1.4714723926702430692j) + assert hankel1(-1,0.5).ae(-0.2422684576748738864+1.4714723926702430692j) + assert hankel1(1.5,0.5).ae(0.0917016996256513026-2.5214655504213378514j) + assert hankel1(1.5,3+4j).ae(0.0066806866476728165382-0.0036684231610839127106j) + assert hankel2(0,0.5).ae(0.93846980724081290423+0.44451873350670655715j) + assert hankel2(1,0.5).ae(0.2422684576748738864+1.4714723926702430692j) + assert hankel2(-1,0.5).ae(-0.2422684576748738864-1.4714723926702430692j) + assert hankel2(1.5,0.5).ae(0.0917016996256513026+2.5214655504213378514j) + assert hankel2(1.5,3+4j).ae(14.783528526098567526-7.397390270853446512j) + +def test_struve(): + mp.dps = 15 + assert struveh(2,3).ae(0.74238666967748318564) + assert struveh(-2.5,3).ae(0.41271003220971599344) + assert struvel(2,3).ae(1.7476573277362782744) + assert struvel(-2.5,3).ae(1.5153394466819651377) + +def test_whittaker(): + mp.dps = 15 + assert whitm(2,3,4).ae(49.753745589025246591) + assert whitw(2,3,4).ae(14.111656223052932215) + +def test_kelvin(): + mp.dps = 15 + assert ber(2,3).ae(0.80836846563726819091) + assert ber(3,4).ae(-0.28262680167242600233) + assert ber(-3,2).ae(-0.085611448496796363669) + assert bei(2,3).ae(-0.89102236377977331571) + assert bei(-3,2).ae(-0.14420994155731828415) + assert ker(2,3).ae(0.12839126695733458928) + assert ker(-3,2).ae(-0.29802153400559142783) + assert ker(0.5,3).ae(-0.085662378535217097524) + assert kei(2,3).ae(0.036804426134164634000) + assert kei(-3,2).ae(0.88682069845786731114) + assert kei(0.5,3).ae(0.013633041571314302948) + +def test_hyper_misc(): + mp.dps = 15 + assert hyp0f1(1,0) == 1 + assert hyp1f1(1,2,0) == 1 + assert hyp1f2(1,2,3,0) == 1 + assert hyp2f1(1,2,3,0) == 1 + assert hyp2f2(1,2,3,4,0) == 1 + assert hyp2f3(1,2,3,4,5,0) == 1 + # Degenerate case: 0F0 + assert hyper([],[],0) == 1 + assert hyper([],[],-2).ae(exp(-2)) + # Degenerate case: 1F0 + assert hyper([2],[],1.5) == 4 + # + assert hyp2f1((1,3),(2,3),(5,6),mpf(27)/32).ae(1.6) + assert hyp2f1((1,4),(1,2),(3,4),mpf(80)/81).ae(1.8) + assert hyp2f1((2,3),(1,1),(3,2),(2+j)/3).ae(1.327531603558679093+0.439585080092769253j) + mp.dps = 25 + v = mpc('1.2282306665029814734863026', '-0.1225033830118305184672133') + assert hyper([(3,4),2+j,1],[1,5,j/3],mpf(1)/5+j/8).ae(v) + mp.dps = 15 + +def test_elliptic_integrals(): + mp.dps = 15 + assert ellipk(0).ae(pi/2) + assert ellipk(0.5).ae(gamma(0.25)**2/(4*sqrt(pi))) + assert ellipk(1) == inf + assert ellipk(1+0j) == inf + assert ellipk(-1).ae('1.3110287771460599052') + assert ellipk(-2).ae('1.1714200841467698589') + assert isinstance(ellipk(-2), mpf) + assert isinstance(ellipe(-2), mpf) + assert ellipk(-50).ae('0.47103424540873331679') + mp.dps = 30 + n1 = +fraction(99999,100000) + n2 = +fraction(100001,100000) + mp.dps = 15 + assert ellipk(n1).ae('7.1427724505817781901') + assert ellipk(n2).ae(mpc('7.1427417367963090109', '-1.5707923998261688019')) + assert ellipe(n1).ae('1.0000332138990829170') + v = ellipe(n2) + assert v.real.ae('0.999966786328145474069137') + assert (v.imag*10**6).ae('7.853952181727432') + assert ellipk(2).ae(mpc('1.3110287771460599052', '-1.3110287771460599052')) + assert ellipk(50).ae(mpc('0.22326753950210985451', '-0.47434723226254522087')) + assert ellipk(3+4j).ae(mpc('0.91119556380496500866', '0.63133428324134524388')) + assert ellipk(3-4j).ae(mpc('0.91119556380496500866', '-0.63133428324134524388')) + assert ellipk(-3+4j).ae(mpc('0.95357894880405122483', '0.23093044503746114444')) + assert ellipk(-3-4j).ae(mpc('0.95357894880405122483', '-0.23093044503746114444')) + assert isnan(ellipk(nan)) + assert isnan(ellipe(nan)) + assert ellipk(inf) == 0 + assert isinstance(ellipk(inf), mpc) + assert ellipk(-inf) == 0 + assert ellipk(1+0j) == inf + assert ellipe(0).ae(pi/2) + assert ellipe(0.5).ae(pi**(mpf(3)/2)/gamma(0.25)**2 +gamma(0.25)**2/(8*sqrt(pi))) + assert ellipe(1) == 1 + assert ellipe(1+0j) == 1 + assert ellipe(inf) == mpc(0,inf) + assert ellipe(-inf) == inf + assert ellipe(3+4j).ae(1.4995535209333469543-1.5778790079127582745j) + assert ellipe(3-4j).ae(1.4995535209333469543+1.5778790079127582745j) + assert ellipe(-3+4j).ae(2.5804237855343377803-0.8306096791000413778j) + assert ellipe(-3-4j).ae(2.5804237855343377803+0.8306096791000413778j) + assert ellipe(2).ae(0.59907011736779610372+0.59907011736779610372j) + assert ellipe('1e-1000000000').ae(pi/2) + assert ellipk('1e-1000000000').ae(pi/2) + assert ellipe(-pi).ae(2.4535865983838923) + mp.dps = 50 + assert ellipk(1/pi).ae('1.724756270009501831744438120951614673874904182624739673') + assert ellipe(1/pi).ae('1.437129808135123030101542922290970050337425479058225712') + assert ellipk(-10*pi).ae('0.5519067523886233967683646782286965823151896970015484512') + assert ellipe(-10*pi).ae('5.926192483740483797854383268707108012328213431657645509') + v = ellipk(pi) + assert v.real.ae('0.973089521698042334840454592642137667227167622330325225') + assert v.imag.ae('-1.156151296372835303836814390793087600271609993858798016') + v = ellipe(pi) + assert v.real.ae('0.4632848917264710404078033487934663562998345622611263332') + assert v.imag.ae('1.0637961621753130852473300451583414489944099504180510966') + mp.dps = 15 + +def test_exp_integrals(): + mp.dps = 15 + x = +e + z = e + sqrt(3)*j + assert ei(x).ae(8.21168165538361560) + assert li(x).ae(1.89511781635593676) + assert si(x).ae(1.82104026914756705) + assert ci(x).ae(0.213958001340379779) + assert shi(x).ae(4.11520706247846193) + assert chi(x).ae(4.09647459290515367) + assert fresnels(x).ae(0.437189718149787643) + assert fresnelc(x).ae(0.401777759590243012) + assert airyai(x).ae(0.0108502401568586681) + assert airybi(x).ae(8.98245748585468627) + assert ei(z).ae(3.72597969491314951 + 7.34213212314224421j) + assert li(z).ae(2.28662658112562502 + 1.50427225297269364j) + assert si(z).ae(2.48122029237669054 + 0.12684703275254834j) + assert ci(z).ae(0.169255590269456633 - 0.892020751420780353j) + assert shi(z).ae(1.85810366559344468 + 3.66435842914920263j) + assert chi(z).ae(1.86787602931970484 + 3.67777369399304159j) + assert fresnels(z/3).ae(0.034534397197008182 + 0.754859844188218737j) + assert fresnelc(z/3).ae(1.261581645990027372 + 0.417949198775061893j) + assert airyai(z).ae(-0.0162552579839056062 - 0.0018045715700210556j) + assert airybi(z).ae(-4.98856113282883371 + 2.08558537872180623j) + assert li(0) == 0.0 + assert li(1) == -inf + assert li(inf) == inf + assert isinstance(li(0.7), mpf) + assert si(inf).ae(pi/2) + assert si(-inf).ae(-pi/2) + assert ci(inf) == 0 + assert ci(0) == -inf + assert isinstance(ei(-0.7), mpf) + assert airyai(inf) == 0 + assert airybi(inf) == inf + assert airyai(-inf) == 0 + assert airybi(-inf) == 0 + assert fresnels(inf) == 0.5 + assert fresnelc(inf) == 0.5 + assert fresnels(-inf) == -0.5 + assert fresnelc(-inf) == -0.5 + assert shi(0) == 0 + assert shi(inf) == inf + assert shi(-inf) == -inf + assert chi(0) == -inf + assert chi(inf) == inf + +def test_ei(): + mp.dps = 15 + assert ei(0) == -inf + assert ei(inf) == inf + assert ei(-inf) == -0.0 + assert ei(20+70j).ae(6.1041351911152984397e6 - 2.7324109310519928872e6j) + # tests for the asymptotic expansion + # values checked with Mathematica ExpIntegralEi + mp.dps = 50 + r = ei(20000) + s = '3.8781962825045010930273870085501819470698476975019e+8681' + assert str(r) == s + r = ei(-200) + s = '-6.8852261063076355977108174824557929738368086933303e-90' + assert str(r) == s + r =ei(20000 + 10*j) + sre = '-3.255138234032069402493850638874410725961401274106e+8681' + sim = '-2.1081929993474403520785942429469187647767369645423e+8681' + assert str(r.real) == sre and str(r.imag) == sim + mp.dps = 15 + # More asymptotic expansions + assert chi(-10**6+100j).ae('1.3077239389562548386e+434288 + 7.6808956999707408158e+434287j') + assert shi(-10**6+100j).ae('-1.3077239389562548386e+434288 - 7.6808956999707408158e+434287j') + mp.dps = 15 + assert ei(10j).ae(-0.0454564330044553726+3.2291439210137706686j) + assert ei(100j).ae(-0.0051488251426104921+3.1330217936839529126j) + u = ei(fmul(10**20, j, exact=True)) + assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(pi) + assert ei(-10j).ae(-0.0454564330044553726-3.2291439210137706686j) + assert ei(-100j).ae(-0.0051488251426104921-3.1330217936839529126j) + u = ei(fmul(-10**20, j, exact=True)) + assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(-pi) + assert ei(10+10j).ae(-1576.1504265768517448+436.9192317011328140j) + u = ei(-10+10j) + assert u.real.ae(7.6698978415553488362543e-7, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(3.141595611735621062025) + +def test_e1(): + mp.dps = 15 + assert e1(0) == inf + assert e1(inf) == 0 + assert e1(-inf) == mpc(-inf, -pi) + assert e1(10j).ae(0.045456433004455372635 + 0.087551267423977430100j) + assert e1(100j).ae(0.0051488251426104921444 - 0.0085708599058403258790j) + assert e1(fmul(10**20, j, exact=True)).ae(6.4525128526578084421e-21 - 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps) + assert e1(-10j).ae(0.045456433004455372635 - 0.087551267423977430100j) + assert e1(-100j).ae(0.0051488251426104921444 + 0.0085708599058403258790j) + assert e1(fmul(-10**20, j, exact=True)).ae(6.4525128526578084421e-21 + 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps) + +def test_expint(): + mp.dps = 15 + assert expint(0,0) == inf + assert expint(0,1).ae(1/e) + assert expint(0,1.5).ae(2/exp(1.5)/3) + assert expint(1,1).ae(-ei(-1)) + assert expint(2,0).ae(1) + assert expint(3,0).ae(1/2.) + assert expint(4,0).ae(1/3.) + assert expint(-2, 0.5).ae(26/sqrt(e)) + assert expint(-1,-1) == 0 + assert expint(-2,-1).ae(-e) + assert expint(5.5, 0).ae(2/9.) + assert expint(2.00000001,0).ae(100000000./100000001) + assert expint(2+3j,4-j).ae(0.0023461179581675065414+0.0020395540604713669262j) + assert expint('1.01', '1e-1000').ae(99.9999999899412802) + assert expint('1.000000000001', 3.5).ae(0.00697013985754701819446) + assert expint(2,3).ae(3*ei(-3)+exp(-3)) + assert (expint(10,20)*10**10).ae(0.694439055541231353) + assert expint(3,inf) == 0 + assert expint(3.2,inf) == 0 + assert expint(3.2+2j,inf) == 0 + assert expint(1,3j).ae(-0.11962978600800032763 + 0.27785620120457163717j) + assert expint(1,3).ae(0.013048381094197037413) + assert expint(1,-3).ae(-ei(3)-pi*j) + #assert expint(3) == expint(1,3) + assert expint(1,-20).ae(-25615652.66405658882 - 3.1415926535897932385j) + assert expint(1000000,0).ae(1./999999) + assert expint(0,2+3j).ae(-0.025019798357114678171 + 0.027980439405104419040j) + assert expint(-1,2+3j).ae(-0.022411973626262070419 + 0.038058922011377716932j) + assert expint(-1.5,0) == inf + +def test_trig_integrals(): + mp.dps = 30 + assert si(mpf(1)/1000000).ae('0.000000999999999999944444444444446111') + assert ci(mpf(1)/1000000).ae('-13.2382948930629912435014366276') + assert si(10**10).ae('1.5707963267075846569685111517747537') + assert ci(10**10).ae('-4.87506025174822653785729773959e-11') + assert si(10**100).ae(pi/2) + assert (ci(10**100)*10**100).ae('-0.372376123661276688262086695553') + assert si(-3) == -si(3) + assert ci(-3).ae(ci(3) + pi*j) + # Test complex structure + mp.dps = 15 + assert mp.ci(50).ae(-0.0056283863241163054402) + assert mp.ci(50+2j).ae(-0.018378282946133067149+0.070352808023688336193j) + assert mp.ci(20j).ae(1.28078263320282943611e7+1.5707963267949j) + assert mp.ci(-2+20j).ae(-4.050116856873293505e6+1.207476188206989909e7j) + assert mp.ci(-50+2j).ae(-0.0183782829461330671+3.0712398455661049023j) + assert mp.ci(-50).ae(-0.0056283863241163054+3.1415926535897932385j) + assert mp.ci(-50-2j).ae(-0.0183782829461330671-3.0712398455661049023j) + assert mp.ci(-2-20j).ae(-4.050116856873293505e6-1.207476188206989909e7j) + assert mp.ci(-20j).ae(1.28078263320282943611e7-1.5707963267949j) + assert mp.ci(50-2j).ae(-0.018378282946133067149-0.070352808023688336193j) + assert mp.si(50).ae(1.5516170724859358947) + assert mp.si(50+2j).ae(1.497884414277228461-0.017515007378437448j) + assert mp.si(20j).ae(1.2807826332028294459e7j) + assert mp.si(-2+20j).ae(-1.20747603112735722103e7-4.050116856873293554e6j) + assert mp.si(-50+2j).ae(-1.497884414277228461-0.017515007378437448j) + assert mp.si(-50).ae(-1.5516170724859358947) + assert mp.si(-50-2j).ae(-1.497884414277228461+0.017515007378437448j) + assert mp.si(-2-20j).ae(-1.20747603112735722103e7+4.050116856873293554e6j) + assert mp.si(-20j).ae(-1.2807826332028294459e7j) + assert mp.si(50-2j).ae(1.497884414277228461+0.017515007378437448j) + assert mp.chi(50j).ae(-0.0056283863241163054+1.5707963267948966192j) + assert mp.chi(-2+50j).ae(-0.0183782829461330671+1.6411491348185849554j) + assert mp.chi(-20).ae(1.28078263320282943611e7+3.1415926535898j) + assert mp.chi(-20-2j).ae(-4.050116856873293505e6+1.20747571696809187053e7j) + assert mp.chi(-2-50j).ae(-0.0183782829461330671-1.6411491348185849554j) + assert mp.chi(-50j).ae(-0.0056283863241163054-1.5707963267948966192j) + assert mp.chi(2-50j).ae(-0.0183782829461330671-1.500443518771208283j) + assert mp.chi(20-2j).ae(-4.050116856873293505e6-1.20747603112735722951e7j) + assert mp.chi(20).ae(1.2807826332028294361e7) + assert mp.chi(2+50j).ae(-0.0183782829461330671+1.500443518771208283j) + assert mp.shi(50j).ae(1.5516170724859358947j) + assert mp.shi(-2+50j).ae(0.017515007378437448+1.497884414277228461j) + assert mp.shi(-20).ae(-1.2807826332028294459e7) + assert mp.shi(-20-2j).ae(4.050116856873293554e6-1.20747603112735722103e7j) + assert mp.shi(-2-50j).ae(0.017515007378437448-1.497884414277228461j) + assert mp.shi(-50j).ae(-1.5516170724859358947j) + assert mp.shi(2-50j).ae(-0.017515007378437448-1.497884414277228461j) + assert mp.shi(20-2j).ae(-4.050116856873293554e6-1.20747603112735722103e7j) + assert mp.shi(20).ae(1.2807826332028294459e7) + assert mp.shi(2+50j).ae(-0.017515007378437448+1.497884414277228461j) + def ae(x,y,tol=1e-12): + return abs(x-y) <= abs(y)*tol + assert fp.ci(fp.inf) == 0 + assert ae(fp.ci(fp.ninf), fp.pi*1j) + assert ae(fp.si(fp.inf), fp.pi/2) + assert ae(fp.si(fp.ninf), -fp.pi/2) + assert fp.si(0) == 0 + assert ae(fp.ci(50), -0.0056283863241163054402) + assert ae(fp.ci(50+2j), -0.018378282946133067149+0.070352808023688336193j) + assert ae(fp.ci(20j), 1.28078263320282943611e7+1.5707963267949j) + assert ae(fp.ci(-2+20j), -4.050116856873293505e6+1.207476188206989909e7j) + assert ae(fp.ci(-50+2j), -0.0183782829461330671+3.0712398455661049023j) + assert ae(fp.ci(-50), -0.0056283863241163054+3.1415926535897932385j) + assert ae(fp.ci(-50-2j), -0.0183782829461330671-3.0712398455661049023j) + assert ae(fp.ci(-2-20j), -4.050116856873293505e6-1.207476188206989909e7j) + assert ae(fp.ci(-20j), 1.28078263320282943611e7-1.5707963267949j) + assert ae(fp.ci(50-2j), -0.018378282946133067149-0.070352808023688336193j) + assert ae(fp.si(50), 1.5516170724859358947) + assert ae(fp.si(50+2j), 1.497884414277228461-0.017515007378437448j) + assert ae(fp.si(20j), 1.2807826332028294459e7j) + assert ae(fp.si(-2+20j), -1.20747603112735722103e7-4.050116856873293554e6j) + assert ae(fp.si(-50+2j), -1.497884414277228461-0.017515007378437448j) + assert ae(fp.si(-50), -1.5516170724859358947) + assert ae(fp.si(-50-2j), -1.497884414277228461+0.017515007378437448j) + assert ae(fp.si(-2-20j), -1.20747603112735722103e7+4.050116856873293554e6j) + assert ae(fp.si(-20j), -1.2807826332028294459e7j) + assert ae(fp.si(50-2j), 1.497884414277228461+0.017515007378437448j) + assert ae(fp.chi(50j), -0.0056283863241163054+1.5707963267948966192j) + assert ae(fp.chi(-2+50j), -0.0183782829461330671+1.6411491348185849554j) + assert ae(fp.chi(-20), 1.28078263320282943611e7+3.1415926535898j) + assert ae(fp.chi(-20-2j), -4.050116856873293505e6+1.20747571696809187053e7j) + assert ae(fp.chi(-2-50j), -0.0183782829461330671-1.6411491348185849554j) + assert ae(fp.chi(-50j), -0.0056283863241163054-1.5707963267948966192j) + assert ae(fp.chi(2-50j), -0.0183782829461330671-1.500443518771208283j) + assert ae(fp.chi(20-2j), -4.050116856873293505e6-1.20747603112735722951e7j) + assert ae(fp.chi(20), 1.2807826332028294361e7) + assert ae(fp.chi(2+50j), -0.0183782829461330671+1.500443518771208283j) + assert ae(fp.shi(50j), 1.5516170724859358947j) + assert ae(fp.shi(-2+50j), 0.017515007378437448+1.497884414277228461j) + assert ae(fp.shi(-20), -1.2807826332028294459e7) + assert ae(fp.shi(-20-2j), 4.050116856873293554e6-1.20747603112735722103e7j) + assert ae(fp.shi(-2-50j), 0.017515007378437448-1.497884414277228461j) + assert ae(fp.shi(-50j), -1.5516170724859358947j) + assert ae(fp.shi(2-50j), -0.017515007378437448-1.497884414277228461j) + assert ae(fp.shi(20-2j), -4.050116856873293554e6-1.20747603112735722103e7j) + assert ae(fp.shi(20), 1.2807826332028294459e7) + assert ae(fp.shi(2+50j), -0.017515007378437448+1.497884414277228461j) + +def test_airy(): + mp.dps = 15 + assert (airyai(10)*10**10).ae(1.1047532552898687) + assert (airybi(10)/10**9).ae(0.45564115354822515) + assert (airyai(1000)*10**9158).ae(9.306933063179556004) + assert (airybi(1000)/10**9154).ae(5.4077118391949465477) + assert airyai(-1000).ae(0.055971895773019918842) + assert airybi(-1000).ae(-0.083264574117080633012) + assert (airyai(100+100j)*10**188).ae(2.9099582462207032076 + 2.353013591706178756j) + assert (airybi(100+100j)/10**185).ae(1.7086751714463652039 - 3.1416590020830804578j) + +def test_hyper_0f1(): + mp.dps = 15 + v = 8.63911136507950465 + assert hyper([],[(1,3)],1.5).ae(v) + assert hyper([],[1/3.],1.5).ae(v) + assert hyp0f1(1/3.,1.5).ae(v) + assert hyp0f1((1,3),1.5).ae(v) + # Asymptotic expansion + assert hyp0f1(3,1e9).ae('4.9679055380347771271e+27455') + assert hyp0f1(3,1e9j).ae('-2.1222788784457702157e+19410 + 5.0840597555401854116e+19410j') + +def test_hyper_1f1(): + mp.dps = 15 + v = 1.2917526488617656673 + assert hyper([(1,2)],[(3,2)],0.7).ae(v) + assert hyper([(1,2)],[(3,2)],0.7+0j).ae(v) + assert hyper([0.5],[(3,2)],0.7).ae(v) + assert hyper([0.5],[1.5],0.7).ae(v) + assert hyper([0.5],[(3,2)],0.7+0j).ae(v) + assert hyper([0.5],[1.5],0.7+0j).ae(v) + assert hyper([(1,2)],[1.5+0j],0.7).ae(v) + assert hyper([0.5+0j],[1.5],0.7).ae(v) + assert hyper([0.5+0j],[1.5+0j],0.7+0j).ae(v) + assert hyp1f1(0.5,1.5,0.7).ae(v) + assert hyp1f1((1,2),1.5,0.7).ae(v) + # Asymptotic expansion + assert hyp1f1(2,3,1e10).ae('2.1555012157015796988e+4342944809') + assert (hyp1f1(2,3,1e10j)*10**10).ae(-0.97501205020039745852 - 1.7462392454512132074j) + # Shouldn't use asymptotic expansion + assert hyp1f1(-2, 1, 10000).ae(49980001) + # Bug + assert hyp1f1(1j,fraction(1,3),0.415-69.739j).ae(25.857588206024346592 + 15.738060264515292063j) + +def test_hyper_2f1(): + mp.dps = 15 + v = 1.0652207633823291032 + assert hyper([(1,2), (3,4)], [2], 0.3).ae(v) + assert hyper([(1,2), 0.75], [2], 0.3).ae(v) + assert hyper([0.5, 0.75], [2.0], 0.3).ae(v) + assert hyper([0.5, 0.75], [2.0], 0.3+0j).ae(v) + assert hyper([0.5+0j, (3,4)], [2.0], 0.3+0j).ae(v) + assert hyper([0.5+0j, (3,4)], [2.0], 0.3).ae(v) + assert hyper([0.5, (3,4)], [2.0+0j], 0.3).ae(v) + assert hyper([0.5+0j, 0.75+0j], [2.0+0j], 0.3+0j).ae(v) + v = 1.09234681096223231717 + 0.18104859169479360380j + assert hyper([(1,2),0.75+j], [2], 0.5).ae(v) + assert hyper([0.5,0.75+j], [2.0], 0.5).ae(v) + assert hyper([0.5,0.75+j], [2.0], 0.5+0j).ae(v) + assert hyper([0.5,0.75+j], [2.0+0j], 0.5+0j).ae(v) + v = 0.9625 - 0.125j + assert hyper([(3,2),-1],[4], 0.1+j/3).ae(v) + assert hyper([1.5,-1.0],[4], 0.1+j/3).ae(v) + assert hyper([1.5,-1.0],[4+0j], 0.1+j/3).ae(v) + assert hyper([1.5+0j,-1.0+0j],[4+0j], 0.1+j/3).ae(v) + v = 1.02111069501693445001 - 0.50402252613466859521j + assert hyper([(2,10),(3,10)],[(4,10)],1.5).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],1.5).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],1.5+0j).ae(v) + v = 0.76922501362865848528 + 0.32640579593235886194j + assert hyper([(2,10),(3,10)],[(4,10)],4+2j).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],4+2j).ae(v) + assert hyper([0.2,(3,10)],[(4,10)],4+2j).ae(v) + +def test_hyper_2f1_hard(): + mp.dps = 15 + # Singular cases + assert hyp2f1(2,-1,-1,3).ae(7) + assert hyp2f1(2,-1,-1,3,eliminate_all=True).ae(0.25) + assert hyp2f1(2,-2,-2,3).ae(34) + assert hyp2f1(2,-2,-2,3,eliminate_all=True).ae(0.25) + assert hyp2f1(2,-2,-3,3) == 14 + assert hyp2f1(2,-3,-2,3) == inf + assert hyp2f1(2,-1.5,-1.5,3) == 0.25 + assert hyp2f1(1,2,3,0) == 1 + assert hyp2f1(0,1,0,0) == 1 + assert hyp2f1(0,0,0,0) == 1 + assert isnan(hyp2f1(1,1,0,0)) + assert hyp2f1(2,-1,-5, 0.25+0.25j).ae(1.1+0.1j) + assert hyp2f1(2,-5,-5, 0.25+0.25j, eliminate=False).ae(163./128 + 125./128*j) + assert hyp2f1(0.7235, -1, -5, 0.3).ae(1.04341) + assert hyp2f1(0.7235, -5, -5, 0.3, eliminate=False).ae(1.2939225017815903812) + assert hyp2f1(-1,-2,4,1) == 1.5 + assert hyp2f1(1,2,-3,1) == inf + assert hyp2f1(-2,-2,1,1) == 6 + assert hyp2f1(1,-2,-4,1).ae(5./3) + assert hyp2f1(0,-6,-4,1) == 1 + assert hyp2f1(0,-3,-4,1) == 1 + assert hyp2f1(0,0,0,1) == 1 + assert hyp2f1(1,0,0,1,eliminate=False) == 1 + assert hyp2f1(1,1,0,1) == inf + assert hyp2f1(1,-6,-4,1) == inf + assert hyp2f1(-7.2,-0.5,-4.5,1) == 0 + assert hyp2f1(-7.2,-1,-2,1).ae(-2.6) + assert hyp2f1(1,-0.5,-4.5, 1) == inf + assert hyp2f1(1,0.5,-4.5, 1) == -inf + # Check evaluation on / close to unit circle + z = exp(j*pi/3) + w = (nthroot(2,3)+1)*exp(j*pi/12)/nthroot(3,4)**3 + assert hyp2f1('1/2','1/6','1/3', z).ae(w) + assert hyp2f1('1/2','1/6','1/3', z.conjugate()).ae(w.conjugate()) + assert hyp2f1(0.25, (1,3), 2, '0.999').ae(1.06826449496030635) + assert hyp2f1(0.25, (1,3), 2, '1.001').ae(1.06867299254830309446-0.00001446586793975874j) + assert hyp2f1(0.25, (1,3), 2, -1).ae(0.96656584492524351673) + assert hyp2f1(0.25, (1,3), 2, j).ae(0.99041766248982072266+0.03777135604180735522j) + assert hyp2f1(2,3,5,'0.99').ae(27.699347904322690602) + assert hyp2f1((3,2),-0.5,3,'0.99').ae(0.68403036843911661388) + assert hyp2f1(2,3,5,1j).ae(0.37290667145974386127+0.59210004902748285917j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), 0.95*exp(j*k)) for k in range(1,15)]).ae(52.851400204289452922+6.244285013912953225j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), 1.05*exp(j*k)) for k in range(1,15)]).ae(54.506013786220655330-3.000118813413217097j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), exp(j*k)) for k in range(1,15)]).ae(55.792077935955314887+1.731986485778500241j) + assert hyp2f1(2,2.5,-3.25,0.999).ae(218373932801217082543180041.33) + # Branches + assert hyp2f1(1,1,2,1.01).ae(4.5595744415723676911-3.1104877758314784539j) + assert hyp2f1(1,1,2,1.01+0.1j).ae(2.4149427480552782484+1.4148224796836938829j) + assert hyp2f1(1,1,2,3+4j).ae(0.14576709331407297807+0.48379185417980360773j) + assert hyp2f1(1,1,2,4).ae(-0.27465307216702742285 - 0.78539816339744830962j) + assert hyp2f1(1,1,2,-4).ae(0.40235947810852509365) + # Other: + # Cancellation with a large parameter involved (bug reported on sage-devel) + assert hyp2f1(112, (51,10), (-9,10), -0.99999).ae(-1.6241361047970862961e-24, abs_eps=0, rel_eps=eps*16) + +def test_hyper_3f2_etc(): + assert hyper([1,2,3],[1.5,8],-1).ae(0.67108992351533333030) + assert hyper([1,2,3,4],[5,6,7], -1).ae(0.90232988035425506008) + assert hyper([1,2,3],[1.25,5], 1).ae(28.924181329701905701) + assert hyper([1,2,3,4],[5,6,7],5).ae(1.5192307344006649499-1.1529845225075537461j) + assert hyper([1,2,3,4,5],[6,7,8,9],-1).ae(0.96288759462882357253) + assert hyper([1,2,3,4,5],[6,7,8,9],1).ae(1.0428697385885855841) + assert hyper([1,2,3,4,5],[6,7,8,9],5).ae(1.33980653631074769423-0.07143405251029226699j) + assert hyper([1,2.79,3.08,4.37],[5.2,6.1,7.3],5).ae(1.0996321464692607231-1.7748052293979985001j) + assert hyper([1,1,1],[1,2],1) == inf + assert hyper([1,1,1],[2,(101,100)],1).ae(100.01621213528313220) + # slow -- covered by doctests + #assert hyper([1,1,1],[2,3],0.9999).ae(1.2897972005319693905) + +def test_hyper_u(): + mp.dps = 15 + assert hyperu(2,-3,0).ae(0.05) + assert hyperu(2,-3.5,0).ae(4./99) + assert hyperu(2,0,0) == 0.5 + assert hyperu(-5,1,0) == -120 + assert hyperu(-5,2,0) == inf + assert hyperu(-5,-2,0) == 0 + assert hyperu(7,7,3).ae(0.00014681269365593503986) #exp(3)*gammainc(-6,3) + assert hyperu(2,-3,4).ae(0.011836478100271995559) + assert hyperu(3,4,5).ae(1./125) + assert hyperu(2,3,0.0625) == 256 + assert hyperu(-1,2,0.25+0.5j) == -1.75+0.5j + assert hyperu(0.5,1.5,7.25).ae(2/sqrt(29)) + assert hyperu(2,6,pi).ae(0.55804439825913399130) + assert (hyperu((3,2),8,100+201j)*10**4).ae(-0.3797318333856738798 - 2.9974928453561707782j) + assert (hyperu((5,2),(-1,2),-5000)*10**10).ae(-5.6681877926881664678j) + # XXX: fails because of undetected cancellation in low level series code + # Alternatively: could use asymptotic series here, if convergence test + # tweaked back to recognize this one + #assert (hyperu((5,2),(-1,2),-500)*10**7).ae(-1.82526906001593252847j) + +def test_hyper_2f0(): + mp.dps = 15 + assert hyper([1,2],[],3) == hyp2f0(1,2,3) + assert hyp2f0(2,3,7).ae(0.0116108068639728714668 - 0.0073727413865865802130j) + assert hyp2f0(2,3,0) == 1 + assert hyp2f0(0,0,0) == 1 + assert hyp2f0(-1,-1,1).ae(2) + assert hyp2f0(-4,1,1.5).ae(62.5) + assert hyp2f0(-4,1,50).ae(147029801) + assert hyp2f0(-4,1,0.0001).ae(0.99960011997600240000) + assert hyp2f0(0.5,0.25,0.001).ae(1.0001251174078538115) + assert hyp2f0(0.5,0.25,3+4j).ae(0.85548875824755163518 + 0.21636041283392292973j) + # Important: cancellation check + assert hyp2f0((1,6),(5,6),-0.02371708245126284498).ae(0.996785723120804309) + # Should be exact; polynomial case + assert hyp2f0(-2,1,0.5+0.5j,zeroprec=200) == 0 + assert hyp2f0(1,-2,0.5+0.5j,zeroprec=200) == 0 + # There used to be a bug in thresholds that made one of the following hang + for d in [15, 50, 80]: + mp.dps = d + assert hyp2f0(1.5, 0.5, 0.009).ae('1.006867007239309717945323585695344927904000945829843527398772456281301440034218290443367270629519483 + 1.238277162240704919639384945859073461954721356062919829456053965502443570466701567100438048602352623e-46j') + +def test_hyper_1f2(): + mp.dps = 15 + assert hyper([1],[2,3],4) == hyp1f2(1,2,3,4) + a1,b1,b2 = (1,10),(2,3),1./16 + assert hyp1f2(a1,b1,b2,10).ae(298.7482725554557568) + assert hyp1f2(a1,b1,b2,100).ae(224128961.48602947604) + assert hyp1f2(a1,b1,b2,1000).ae(1.1669528298622675109e+27) + assert hyp1f2(a1,b1,b2,10000).ae(2.4780514622487212192e+86) + assert hyp1f2(a1,b1,b2,100000).ae(1.3885391458871523997e+274) + assert hyp1f2(a1,b1,b2,1000000).ae('9.8851796978960318255e+867') + assert hyp1f2(a1,b1,b2,10**7).ae('1.1505659189516303646e+2746') + assert hyp1f2(a1,b1,b2,10**8).ae('1.4672005404314334081e+8685') + assert hyp1f2(a1,b1,b2,10**20).ae('3.6888217332150976493e+8685889636') + assert hyp1f2(a1,b1,b2,10*j).ae(-16.163252524618572878 - 44.321567896480184312j) + assert hyp1f2(a1,b1,b2,100*j).ae(61938.155294517848171 + 637349.45215942348739j) + assert hyp1f2(a1,b1,b2,1000*j).ae(8455057657257695958.7 + 6261969266997571510.6j) + assert hyp1f2(a1,b1,b2,10000*j).ae(-8.9771211184008593089e+60 + 4.6550528111731631456e+59j) + assert hyp1f2(a1,b1,b2,100000*j).ae(2.6398091437239324225e+193 + 4.1658080666870618332e+193j) + assert hyp1f2(a1,b1,b2,1000000*j).ae('3.5999042951925965458e+613 + 1.5026014707128947992e+613j') + assert hyp1f2(a1,b1,b2,10**7*j).ae('-8.3208715051623234801e+1939 - 3.6752883490851869429e+1941j') + assert hyp1f2(a1,b1,b2,10**8*j).ae('2.0724195707891484454e+6140 - 1.3276619482724266387e+6141j') + assert hyp1f2(a1,b1,b2,10**20*j).ae('-1.1734497974795488504e+6141851462 + 1.1498106965385471542e+6141851462j') + +def test_hyper_2f3(): + mp.dps = 15 + assert hyper([1,2],[3,4,5],6) == hyp2f3(1,2,3,4,5,6) + a1,a2,b1,b2,b3 = (1,10),(2,3),(3,10), 2, 1./16 + # Check asymptotic expansion + assert hyp2f3(a1,a2,b1,b2,b3,10).ae(128.98207160698659976) + assert hyp2f3(a1,a2,b1,b2,b3,1000).ae(6.6309632883131273141e25) + assert hyp2f3(a1,a2,b1,b2,b3,10000).ae(4.6863639362713340539e84) + assert hyp2f3(a1,a2,b1,b2,b3,100000).ae(8.6632451236103084119e271) + assert hyp2f3(a1,a2,b1,b2,b3,10**6).ae('2.0291718386574980641e865') + assert hyp2f3(a1,a2,b1,b2,b3,10**7).ae('7.7639836665710030977e2742') + assert hyp2f3(a1,a2,b1,b2,b3,10**8).ae('3.2537462584071268759e8681') + assert hyp2f3(a1,a2,b1,b2,b3,10**20).ae('1.2966030542911614163e+8685889627') + assert hyp2f3(a1,a2,b1,b2,b3,10*j).ae(-18.551602185587547854 - 13.348031097874113552j) + assert hyp2f3(a1,a2,b1,b2,b3,100*j).ae(78634.359124504488695 + 74459.535945281973996j) + assert hyp2f3(a1,a2,b1,b2,b3,1000*j).ae(597682550276527901.59 - 65136194809352613.078j) + assert hyp2f3(a1,a2,b1,b2,b3,10000*j).ae(-1.1779696326238582496e+59 + 1.2297607505213133872e+59j) + assert hyp2f3(a1,a2,b1,b2,b3,100000*j).ae(2.9844228969804380301e+191 + 7.5587163231490273296e+190j) + assert hyp2f3(a1,a2,b1,b2,b3,1000000*j).ae('7.4859161049322370311e+610 - 2.8467477015940090189e+610j') + assert hyp2f3(a1,a2,b1,b2,b3,10**7*j).ae('-1.7477645579418800826e+1938 - 1.7606522995808116405e+1938j') + assert hyp2f3(a1,a2,b1,b2,b3,10**8*j).ae('-1.6932731942958401784e+6137 - 2.4521909113114629368e+6137j') + assert hyp2f3(a1,a2,b1,b2,b3,10**20*j).ae('-2.0988815677627225449e+6141851451 + 5.7708223542739208681e+6141851452j') + +def test_hyper_2f2(): + mp.dps = 15 + assert hyper([1,2],[3,4],5) == hyp2f2(1,2,3,4,5) + a1,a2,b1,b2 = (3,10),4,(1,2),1./16 + assert hyp2f2(a1,a2,b1,b2,10).ae(448225936.3377556696) + assert hyp2f2(a1,a2,b1,b2,10000).ae('1.2012553712966636711e+4358') + assert hyp2f2(a1,a2,b1,b2,-20000).ae(-0.04182343755661214626) + assert hyp2f2(a1,a2,b1,b2,10**20).ae('1.1148680024303263661e+43429448190325182840') + +def test_orthpoly(): + mp.dps = 15 + assert jacobi(-4,2,3,0.7).ae(22800./4913) + assert jacobi(3,2,4,5.5) == 4133.125 + assert jacobi(1.5,5/6.,4,0).ae(-1.0851951434075508417) + assert jacobi(-2, 1, 2, 4).ae(-0.16) + assert jacobi(2, -1, 2.5, 4).ae(34.59375) + #assert jacobi(2, -1, 2, 4) == 28.5 + assert legendre(5, 7) == 129367 + assert legendre(0.5,0).ae(0.53935260118837935667) + assert legendre(-1,-1) == 1 + assert legendre(0,-1) == 1 + assert legendre(0, 1) == 1 + assert legendre(1, -1) == -1 + assert legendre(7, 1) == 1 + assert legendre(7, -1) == -1 + assert legendre(8,1.5).ae(15457523./32768) + assert legendre(j,-j).ae(2.4448182735671431011 + 0.6928881737669934843j) + assert chebyu(5,1) == 6 + assert chebyt(3,2) == 26 + assert legendre(3.5,-1) == inf + assert legendre(4.5,-1) == -inf + assert legendre(3.5+1j,-1) == mpc(inf,inf) + assert legendre(4.5+1j,-1) == mpc(-inf,-inf) + assert laguerre(4, -2, 3).ae(-1.125) + assert laguerre(3, 1+j, 0.5).ae(0.2291666666666666667 + 2.5416666666666666667j) + +def test_hermite(): + mp.dps = 15 + assert hermite(-2, 0).ae(0.5) + assert hermite(-1, 0).ae(0.88622692545275801365) + assert hermite(0, 0).ae(1) + assert hermite(1, 0) == 0 + assert hermite(2, 0).ae(-2) + assert hermite(0, 2).ae(1) + assert hermite(1, 2).ae(4) + assert hermite(1, -2).ae(-4) + assert hermite(2, -2).ae(14) + assert hermite(0.5, 0).ae(0.69136733903629335053) + assert hermite(9, 0) == 0 + assert hermite(4,4).ae(3340) + assert hermite(3,4).ae(464) + assert hermite(-4,4).ae(0.00018623860287512396181) + assert hermite(-3,4).ae(0.0016540169879668766270) + assert hermite(9, 2.5j).ae(13638725j) + assert hermite(9, -2.5j).ae(-13638725j) + assert hermite(9, 100).ae(511078883759363024000) + assert hermite(9, -100).ae(-511078883759363024000) + assert hermite(9, 100j).ae(512922083920643024000j) + assert hermite(9, -100j).ae(-512922083920643024000j) + assert hermite(-9.5, 2.5j).ae(-2.9004951258126778174e-6 + 1.7601372934039951100e-6j) + assert hermite(-9.5, -2.5j).ae(-2.9004951258126778174e-6 - 1.7601372934039951100e-6j) + assert hermite(-9.5, 100).ae(1.3776300722767084162e-22, abs_eps=0, rel_eps=eps) + assert hermite(-9.5, -100).ae('1.3106082028470671626e4355') + assert hermite(-9.5, 100j).ae(-9.7900218581864768430e-23 - 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps) + assert hermite(-9.5, -100j).ae(-9.7900218581864768430e-23 + 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps) + assert hermite(2+3j, -1-j).ae(851.3677063883687676 - 1496.4373467871007997j) + +def test_gegenbauer(): + mp.dps = 15 + assert gegenbauer(1,2,3).ae(12) + assert gegenbauer(2,3,4).ae(381) + assert gegenbauer(0,0,0) == 0 + assert gegenbauer(2,-1,3) == 0 + assert gegenbauer(-7, 0.5, 3).ae(8989) + assert gegenbauer(1, -0.5, 3).ae(-3) + assert gegenbauer(1, -1.5, 3).ae(-9) + assert gegenbauer(1, -0.5, 3).ae(-3) + assert gegenbauer(-0.5, -0.5, 3).ae(-2.6383553159023906245) + assert gegenbauer(2+3j, 1-j, 3+4j).ae(14.880536623203696780 + 20.022029711598032898j) + #assert gegenbauer(-2, -0.5, 3).ae(-12) + +def test_legenp(): + mp.dps = 15 + assert legenp(2,0,4) == legendre(2,4) + assert legenp(-2, -1, 0.5).ae(0.43301270189221932338) + assert legenp(-2, -1, 0.5, type=3).ae(0.43301270189221932338j) + assert legenp(-2, 1, 0.5).ae(-0.86602540378443864676) + assert legenp(2+j, 3+4j, -j).ae(134742.98773236786148 + 429782.72924463851745j) + assert legenp(2+j, 3+4j, -j, type=3).ae(802.59463394152268507 - 251.62481308942906447j) + assert legenp(2,4,3).ae(0) + assert legenp(2,4,3,type=3).ae(0) + assert legenp(2,1,0.5).ae(-1.2990381056766579701) + assert legenp(2,1,0.5,type=3).ae(1.2990381056766579701j) + assert legenp(3,2,3).ae(-360) + assert legenp(3,3,3).ae(240j*2**0.5) + assert legenp(3,4,3).ae(0) + assert legenp(0,0.5,2).ae(0.52503756790433198939 - 0.52503756790433198939j) + assert legenp(-1,-0.5,2).ae(0.60626116232846498110 + 0.60626116232846498110j) + assert legenp(-2,0.5,2).ae(1.5751127037129959682 - 1.5751127037129959682j) + assert legenp(-2,0.5,-0.5).ae(-0.85738275810499171286) + +def test_legenq(): + mp.dps = 15 + f = legenq + # Evaluation at poles + assert isnan(f(3,2,1)) + assert isnan(f(3,2,-1)) + assert isnan(f(3,2,1,type=3)) + assert isnan(f(3,2,-1,type=3)) + # Evaluation at 0 + assert f(0,1,0,type=2).ae(-1) + assert f(-2,2,0,type=2,zeroprec=200).ae(0) + assert f(1.5,3,0,type=2).ae(-2.2239343475841951023) + assert f(0,1,0,type=3).ae(j) + assert f(-2,2,0,type=3,zeroprec=200).ae(0) + assert f(1.5,3,0,type=3).ae(2.2239343475841951022*(1-1j)) + # Standard case, degree 0 + assert f(0,0,-1.5).ae(-0.8047189562170501873 + 1.5707963267948966192j) + assert f(0,0,-0.5).ae(-0.54930614433405484570) + assert f(0,0,0,zeroprec=200).ae(0) + assert f(0,0,0.5).ae(0.54930614433405484570) + assert f(0,0,1.5).ae(0.8047189562170501873 - 1.5707963267948966192j) + assert f(0,0,-1.5,type=3).ae(-0.80471895621705018730) + assert f(0,0,-0.5,type=3).ae(-0.5493061443340548457 - 1.5707963267948966192j) + assert f(0,0,0,type=3).ae(-1.5707963267948966192j) + assert f(0,0,0.5,type=3).ae(0.5493061443340548457 - 1.5707963267948966192j) + assert f(0,0,1.5,type=3).ae(0.80471895621705018730) + # Standard case, degree 1 + assert f(1,0,-1.5).ae(0.2070784343255752810 - 2.3561944901923449288j) + assert f(1,0,-0.5).ae(-0.72534692783297257715) + assert f(1,0,0).ae(-1) + assert f(1,0,0.5).ae(-0.72534692783297257715) + assert f(1,0,1.5).ae(0.2070784343255752810 - 2.3561944901923449288j) + # Standard case, degree 2 + assert f(2,0,-1.5).ae(-0.0635669991240192885 + 4.5160394395353277803j) + assert f(2,0,-0.5).ae(0.81866326804175685571) + assert f(2,0,0,zeroprec=200).ae(0) + assert f(2,0,0.5).ae(-0.81866326804175685571) + assert f(2,0,1.5).ae(0.0635669991240192885 - 4.5160394395353277803j) + # Misc orders and degrees + assert f(2,3,1.5,type=2).ae(-5.7243340223994616228j) + assert f(2,3,1.5,type=3).ae(-5.7243340223994616228) + assert f(2,3,0.5,type=2).ae(-12.316805742712016310) + assert f(2,3,0.5,type=3).ae(-12.316805742712016310j) + assert f(2,3,-1.5,type=2).ae(-5.7243340223994616228j) + assert f(2,3,-1.5,type=3).ae(5.7243340223994616228) + assert f(2,3,-0.5,type=2).ae(-12.316805742712016310) + assert f(2,3,-0.5,type=3).ae(-12.316805742712016310j) + assert f(2+3j, 3+4j, 0.5, type=3).ae(0.0016119404873235186807 - 0.0005885900510718119836j) + assert f(2+3j, 3+4j, -1.5, type=3).ae(0.008451400254138808670 + 0.020645193304593235298j) + assert f(-2.5,1,-1.5).ae(3.9553395527435335749j) + assert f(-2.5,1,-0.5).ae(1.9290561746445456908) + assert f(-2.5,1,0).ae(1.2708196271909686299) + assert f(-2.5,1,0.5).ae(-0.31584812990742202869) + assert f(-2.5,1,1.5).ae(-3.9553395527435335742 + 0.2993235655044701706j) + assert f(-2.5,1,-1.5,type=3).ae(0.29932356550447017254j) + assert f(-2.5,1,-0.5,type=3).ae(-0.3158481299074220287 - 1.9290561746445456908j) + assert f(-2.5,1,0,type=3).ae(1.2708196271909686292 - 1.2708196271909686299j) + assert f(-2.5,1,0.5,type=3).ae(1.9290561746445456907 + 0.3158481299074220287j) + assert f(-2.5,1,1.5,type=3).ae(-0.29932356550447017254) + +def test_agm(): + mp.dps = 15 + assert agm(0,0) == 0 + assert agm(0,1) == 0 + assert agm(1,1) == 1 + assert agm(7,7) == 7 + assert agm(j,j) == j + assert (1/agm(1,sqrt(2))).ae(0.834626841674073186) + assert agm(1,2).ae(1.4567910310469068692) + assert agm(1,3).ae(1.8636167832448965424) + assert agm(1,j).ae(0.599070117367796104+0.599070117367796104j) + assert agm(2) == agm(1,2) + assert agm(-3,4).ae(0.63468509766550907+1.3443087080896272j) + +def test_gammainc(): + mp.dps = 15 + assert gammainc(2,5).ae(6*exp(-5)) + assert gammainc(2,0,5).ae(1-6*exp(-5)) + assert gammainc(2,3,5).ae(-6*exp(-5)+4*exp(-3)) + assert gammainc(-2.5,-0.5).ae(-0.9453087204829418812-5.3164237738936178621j) + assert gammainc(0,2,4).ae(0.045121158298212213088) + assert gammainc(0,3).ae(0.013048381094197037413) + assert gammainc(0,2+j,1-j).ae(0.00910653685850304839-0.22378752918074432574j) + assert gammainc(0,1-j).ae(0.00028162445198141833+0.17932453503935894015j) + assert gammainc(3,4,5,True).ae(0.11345128607046320253) + assert gammainc(3.5,0,inf).ae(gamma(3.5)) + assert gammainc(-150.5,500).ae('6.9825435345798951153e-627') + assert gammainc(-150.5,800).ae('4.6885137549474089431e-788') + assert gammainc(-3.5, -20.5).ae(0.27008820585226911 - 1310.31447140574997636j) + assert gammainc(-3.5, -200.5).ae(0.27008820585226911 - 5.3264597096208368435e76j) # XXX real part + assert gammainc(0,0,2) == inf + assert gammainc(1,b=1).ae(0.6321205588285576784) + assert gammainc(3,2,2) == 0 + assert gammainc(2,3+j,3-j).ae(-0.28135485191849314194j) + assert gammainc(4+0j,1).ae(5.8860710587430771455) + # GH issue #301 + assert gammainc(-1,-1).ae(-0.8231640121031084799 + 3.1415926535897932385j) + assert gammainc(-2,-1).ae(1.7707229202810768576 - 1.5707963267948966192j) + assert gammainc(-3,-1).ae(-1.4963349162467073643 + 0.5235987755982988731j) + assert gammainc(-4,-1).ae(1.05365418617643814992 - 0.13089969389957471827j) + # Regularized upper gamma + assert isnan(gammainc(0, 0, regularized=True)) + assert gammainc(-1, 0, regularized=True) == inf + assert gammainc(1, 0, regularized=True) == 1 + assert gammainc(0, 5, regularized=True) == 0 + assert gammainc(0, 2+3j, regularized=True) == 0 + assert gammainc(0, 5000, regularized=True) == 0 + assert gammainc(0, 10**30, regularized=True) == 0 + assert gammainc(-1, 5, regularized=True) == 0 + assert gammainc(-1, 5000, regularized=True) == 0 + assert gammainc(-1, 10**30, regularized=True) == 0 + assert gammainc(-1, -5, regularized=True) == 0 + assert gammainc(-1, -5000, regularized=True) == 0 + assert gammainc(-1, -10**30, regularized=True) == 0 + assert gammainc(-1, 3+4j, regularized=True) == 0 + assert gammainc(1, 5, regularized=True).ae(exp(-5)) + assert gammainc(1, 5000, regularized=True).ae(exp(-5000)) + assert gammainc(1, 10**30, regularized=True).ae(exp(-10**30)) + assert gammainc(1, 3+4j, regularized=True).ae(exp(-3-4j)) + assert gammainc(-1000000,2).ae('1.3669297209397347754e-301037', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1000000,2,regularized=True) == 0 + assert gammainc(-1000000,3+4j).ae('-1.322575609404222361e-698979 - 4.9274570591854533273e-698978j', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1000000,3+4j,regularized=True) == 0 + assert gammainc(2+3j, 4+5j, regularized=True).ae(0.085422013530993285774-0.052595379150390078503j) + assert gammainc(1000j, 1000j, regularized=True).ae(0.49702647628921131761 + 0.00297355675013575341j) + # Generalized + assert gammainc(3,4,2) == -gammainc(3,2,4) + assert gammainc(4, 2, 3).ae(1.2593494302978947396) + assert gammainc(4, 2, 3, regularized=True).ae(0.20989157171631578993) + assert gammainc(0, 2, 3).ae(0.035852129613864082155) + assert gammainc(0, 2, 3, regularized=True) == 0 + assert gammainc(-1, 2, 3).ae(0.015219822548487616132) + assert gammainc(-1, 2, 3, regularized=True) == 0 + assert gammainc(0, 2, 3).ae(0.035852129613864082155) + assert gammainc(0, 2, 3, regularized=True) == 0 + # Should use upper gammas + assert gammainc(5, 10000, 12000).ae('1.1359381951461801687e-4327', abs_eps=0, rel_eps=8*eps) + # Should use lower gammas + assert gammainc(10000, 2, 3).ae('8.1244514125995785934e4765') + # GH issue 306 + assert gammainc(3,-1-1j) == 0 + assert gammainc(3,-1+1j) == 0 + assert gammainc(2,-1) == 0 + assert gammainc(2,-1+0j) == 0 + assert gammainc(2+0j,-1) == 0 + +def test_gammainc_expint_n(): + # These tests are intended to check all cases of the low-level code + # for upper gamma and expint with small integer index. + # Need to cover positive/negative arguments; small/large/huge arguments + # for both positive and negative indices, as well as indices 0 and 1 + # which may be special-cased + mp.dps = 15 + assert expint(-3,3.5).ae(0.021456366563296693987) + assert expint(-2,3.5).ae(0.014966633183073309405) + assert expint(-1,3.5).ae(0.011092916359219041088) + assert expint(0,3.5).ae(0.0086278238349481430685) + assert expint(1,3.5).ae(0.0069701398575483929193) + assert expint(2,3.5).ae(0.0058018939208991255223) + assert expint(3,3.5).ae(0.0049453773495857807058) + assert expint(-3,-3.5).ae(-4.6618170604073311319) + assert expint(-2,-3.5).ae(-5.5996974157555515963) + assert expint(-1,-3.5).ae(-6.7582555017739415818) + assert expint(0,-3.5).ae(-9.4615577024835182145) + assert expint(1,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j) + assert expint(2,-3.5).ae(-15.62328702434085977 - 10.995574287564276335j) + assert expint(3,-3.5).ae(-10.783026313250347722 - 19.242255003237483586j) + assert expint(-3,350).ae(2.8614825451252838069e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-2,350).ae(2.8532837224504675901e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-1,350).ae(2.8451316155828634555e-155, abs_eps=0, rel_eps=8*eps) + assert expint(0,350).ae(2.8370258275042797989e-155, abs_eps=0, rel_eps=8*eps) + assert expint(1,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps) + assert expint(2,350).ae(2.8209516419468505006e-155, abs_eps=0, rel_eps=8*eps) + assert expint(3,350).ae(2.8129824725501272171e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-3,-350).ae(-2.8528796154044839443e+149) + assert expint(-2,-350).ae(-2.8610072121701264351e+149) + assert expint(-1,-350).ae(-2.8691813842677537647e+149) + assert expint(0,-350).ae(-2.8774025343659421709e+149) + u = expint(1,-350) + assert u.ae(-2.8856710698020863568e+149) + assert u.imag.ae(-3.1415926535897932385) + u = expint(2,-350) + assert u.ae(-2.8939874026504650534e+149) + assert u.imag.ae(-1099.5574287564276335) + u = expint(3,-350) + assert u.ae(-2.9023519497915044349e+149) + assert u.imag.ae(-192422.55003237483586) + assert expint(-3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-3,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(-2,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(-1,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(0,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + u = expint(1,-350000000000000000000000) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + assert u.imag.ae(-3.1415926535897932385) + u = expint(2,-350000000000000000000000) + assert u.imag.ae(-1.0995574287564276335e+24) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + u = expint(3,-350000000000000000000000) + assert u.imag.ae(-1.9242255003237483586e+47) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + # Small case; no branch cut + assert gammainc(-3,3.5).ae(0.00010020262545203707109) + assert gammainc(-2,3.5).ae(0.00040370427343557393517) + assert gammainc(-1,3.5).ae(0.0016576839773997501492) + assert gammainc(0,3.5).ae(0.0069701398575483929193) + assert gammainc(1,3.5).ae(0.03019738342231850074) + assert gammainc(2,3.5).ae(0.13588822540043325333) + assert gammainc(3,3.5).ae(0.64169439772426814072) + # Small case; with branch cut + assert gammainc(-3,-3.5).ae(0.03595832954467563286 + 0.52359877559829887308j) + assert gammainc(-2,-3.5).ae(-0.88024704597962022221 - 1.5707963267948966192j) + assert gammainc(-1,-3.5).ae(4.4637962926688170771 + 3.1415926535897932385j) + assert gammainc(0,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j) + assert gammainc(1,-3.5).ae(33.115451958692313751) + assert gammainc(2,-3.5).ae(-82.788629896730784377) + assert gammainc(3,-3.5).ae(240.08702670051927469) + # Asymptotic case; no branch cut + assert gammainc(-3,350).ae(6.5424095113340358813e-163, abs_eps=0, rel_eps=8*eps) + assert gammainc(-2,350).ae(2.296312222489899769e-160, abs_eps=0, rel_eps=8*eps) + assert gammainc(-1,350).ae(8.059861834133858573e-158, abs_eps=0, rel_eps=8*eps) + assert gammainc(0,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps) + assert gammainc(1,350).ae(9.9295903962649792963e-153, abs_eps=0, rel_eps=8*eps) + assert gammainc(2,350).ae(3.485286229089007733e-150, abs_eps=0, rel_eps=8*eps) + assert gammainc(3,350).ae(1.2233453960006379793e-147, abs_eps=0, rel_eps=8*eps) + # Asymptotic case; branch cut + u = gammainc(-3,-350) + assert u.ae(6.7889565783842895085e+141) + assert u.imag.ae(0.52359877559829887308) + u = gammainc(-2,-350) + assert u.ae(-2.3692668977889832121e+144) + assert u.imag.ae(-1.5707963267948966192) + u = gammainc(-1,-350) + assert u.ae(8.2685354361441858669e+146) + assert u.imag.ae(3.1415926535897932385) + u = gammainc(0,-350) + assert u.ae(-2.8856710698020863568e+149) + assert u.imag.ae(-3.1415926535897932385) + u = gammainc(1,-350) + assert u.ae(1.0070908870280797598e+152) + assert u.imag == 0 + u = gammainc(2,-350) + assert u.ae(-3.5147471957279983618e+154) + assert u.imag == 0 + u = gammainc(3,-350) + assert u.ae(1.2266568422179417091e+157) + assert u.imag == 0 + # Extreme asymptotic case + assert gammainc(-3,350000000000000000000000).ae('5.0362468738874738859e-152003068666138139677990', abs_eps=0, rel_eps=8*eps) + assert gammainc(-2,350000000000000000000000).ae('1.7626864058606158601e-152003068666138139677966', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1,350000000000000000000000).ae('6.1694024205121555102e-152003068666138139677943', abs_eps=0, rel_eps=8*eps) + assert gammainc(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert gammainc(1,350000000000000000000000).ae('7.5575179651273905e-152003068666138139677896', abs_eps=0, rel_eps=8*eps) + assert gammainc(2,350000000000000000000000).ae('2.645131287794586675e-152003068666138139677872', abs_eps=0, rel_eps=8*eps) + assert gammainc(3,350000000000000000000000).ae('9.2579595072810533625e-152003068666138139677849', abs_eps=0, rel_eps=8*eps) + u = gammainc(-3,-350000000000000000000000) + assert u.ae('8.8175642804468234866e+152003068666138139677800') + assert u.imag.ae(0.52359877559829887308) + u = gammainc(-2,-350000000000000000000000) + assert u.ae('-3.0861474981563882203e+152003068666138139677824') + assert u.imag.ae(-1.5707963267948966192) + u = gammainc(-1,-350000000000000000000000) + assert u.ae('1.0801516243547358771e+152003068666138139677848') + assert u.imag.ae(3.1415926535897932385) + u = gammainc(0,-350000000000000000000000) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + assert u.imag.ae(-3.1415926535897932385) + assert gammainc(1,-350000000000000000000000).ae('1.3231857398345514495e+152003068666138139677895') + assert gammainc(2,-350000000000000000000000).ae('-4.6311500894209300731e+152003068666138139677918') + assert gammainc(3,-350000000000000000000000).ae('1.6209025312973255256e+152003068666138139677942') + +def test_incomplete_beta(): + mp.dps = 15 + assert betainc(-2,-3,0.5,0.75).ae(63.4305673311255413583969) + assert betainc(4.5,0.5+2j,2.5,6).ae(0.2628801146130621387903065 + 0.5162565234467020592855378j) + assert betainc(4,5,0,6).ae(90747.77142857142857142857) + +def test_erf(): + mp.dps = 15 + assert erf(0) == 0 + assert erf(1).ae(0.84270079294971486934) + assert erf(3+4j).ae(-120.186991395079444098 - 27.750337293623902498j) + assert erf(-4-3j).ae(-0.99991066178539168236 + 0.00004972026054496604j) + assert erf(pi).ae(0.99999112385363235839) + assert erf(1j).ae(1.6504257587975428760j) + assert erf(-1j).ae(-1.6504257587975428760j) + assert isinstance(erf(1), mpf) + assert isinstance(erf(-1), mpf) + assert isinstance(erf(0), mpf) + assert isinstance(erf(0j), mpc) + assert erf(inf) == 1 + assert erf(-inf) == -1 + assert erfi(0) == 0 + assert erfi(1/pi).ae(0.371682698493894314) + assert erfi(inf) == inf + assert erfi(-inf) == -inf + assert erf(1+0j) == erf(1) + assert erfc(1+0j) == erfc(1) + assert erf(0.2+0.5j).ae(1 - erfc(0.2+0.5j)) + assert erfc(0) == 1 + assert erfc(1).ae(1-erf(1)) + assert erfc(-1).ae(1-erf(-1)) + assert erfc(1/pi).ae(1-erf(1/pi)) + assert erfc(-10) == 2 + assert erfc(-1000000) == 2 + assert erfc(-inf) == 2 + assert erfc(inf) == 0 + assert isnan(erfc(nan)) + assert (erfc(10**4)*mpf(10)**43429453).ae('3.63998738656420') + assert erf(8+9j).ae(-1072004.2525062051158 + 364149.91954310255423j) + assert erfc(8+9j).ae(1072005.2525062051158 - 364149.91954310255423j) + assert erfc(-8-9j).ae(-1072003.2525062051158 + 364149.91954310255423j) + mp.dps = 50 + # This one does not use the asymptotic series + assert (erfc(10)*10**45).ae('2.0884875837625447570007862949577886115608181193212') + # This one does + assert (erfc(50)*10**1088).ae('2.0709207788416560484484478751657887929322509209954') + mp.dps = 15 + assert str(erfc(10**50)) == '3.66744826532555e-4342944819032518276511289189166050822943970058036665661144537831658646492088707747292249493384317534' + assert erfinv(0) == 0 + assert erfinv(0.5).ae(0.47693627620446987338) + assert erfinv(-0.5).ae(-0.47693627620446987338) + assert erfinv(1) == inf + assert erfinv(-1) == -inf + assert erf(erfinv(0.95)).ae(0.95) + assert erf(erfinv(0.999999999995)).ae(0.999999999995) + assert erf(erfinv(-0.999999999995)).ae(-0.999999999995) + mp.dps = 50 + assert erf(erfinv('0.99999999999999999999999999999995')).ae('0.99999999999999999999999999999995') + assert erf(erfinv('0.999999999999999999999999999999995')).ae('0.999999999999999999999999999999995') + assert erf(erfinv('-0.999999999999999999999999999999995')).ae('-0.999999999999999999999999999999995') + mp.dps = 15 + # Complex asymptotic expansions + v = erfc(50j) + assert v.real == 1 + assert v.imag.ae('-6.1481820666053078736e+1083') + assert erfc(-100+5j).ae(2) + assert (erfc(100+5j)*10**4335).ae(2.3973567853824133572 - 3.9339259530609420597j) + assert erfc(100+100j).ae(0.00065234366376857698698 - 0.0039357263629214118437j) + +def test_pdf(): + mp.dps = 15 + assert npdf(-inf) == 0 + assert npdf(inf) == 0 + assert npdf(5,0,2).ae(npdf(5+4,4,2)) + assert quadts(lambda x: npdf(x,-0.5,0.8), [-inf, inf]) == 1 + assert ncdf(0) == 0.5 + assert ncdf(3,3) == 0.5 + assert ncdf(-inf) == 0 + assert ncdf(inf) == 1 + assert ncdf(10) == 1 + # Verify that this is computed accurately + assert (ncdf(-10)*10**24).ae(7.619853024160526) + +def test_lambertw(): + mp.dps = 15 + assert lambertw(0) == 0 + assert lambertw(0+0j) == 0 + assert lambertw(inf) == inf + assert isnan(lambertw(nan)) + assert lambertw(inf,1).real == inf + assert lambertw(inf,1).imag.ae(2*pi) + assert lambertw(-inf,1).real == inf + assert lambertw(-inf,1).imag.ae(3*pi) + assert lambertw(0,-1) == -inf + assert lambertw(0,1) == -inf + assert lambertw(0,3) == -inf + assert lambertw(e).ae(1) + assert lambertw(1).ae(0.567143290409783873) + assert lambertw(-pi/2).ae(j*pi/2) + assert lambertw(-log(2)/2).ae(-log(2)) + assert lambertw(0.25).ae(0.203888354702240164) + assert lambertw(-0.25).ae(-0.357402956181388903) + assert lambertw(-1./10000,0).ae(-0.000100010001500266719) + assert lambertw(-0.25,-1).ae(-2.15329236411034965) + assert lambertw(0.25,-1).ae(-3.00899800997004620-4.07652978899159763j) + assert lambertw(-0.25,-1).ae(-2.15329236411034965) + assert lambertw(0.25,1).ae(-3.00899800997004620+4.07652978899159763j) + assert lambertw(-0.25,1).ae(-3.48973228422959210+7.41405453009603664j) + assert lambertw(-4).ae(0.67881197132094523+1.91195078174339937j) + assert lambertw(-4,1).ae(-0.66743107129800988+7.76827456802783084j) + assert lambertw(-4,-1).ae(0.67881197132094523-1.91195078174339937j) + assert lambertw(1000).ae(5.24960285240159623) + assert lambertw(1000,1).ae(4.91492239981054535+5.44652615979447070j) + assert lambertw(1000,-1).ae(4.91492239981054535-5.44652615979447070j) + assert lambertw(1000,5).ae(3.5010625305312892+29.9614548941181328j) + assert lambertw(3+4j).ae(1.281561806123775878+0.533095222020971071j) + assert lambertw(-0.4+0.4j).ae(-0.10396515323290657+0.61899273315171632j) + assert lambertw(3+4j,1).ae(-0.11691092896595324+5.61888039871282334j) + assert lambertw(3+4j,-1).ae(0.25856740686699742-3.85211668616143559j) + assert lambertw(-0.5,-1).ae(-0.794023632344689368-0.770111750510379110j) + assert lambertw(-1./10000,1).ae(-11.82350837248724344+6.80546081842002101j) + assert lambertw(-1./10000,-1).ae(-11.6671145325663544) + assert lambertw(-1./10000,-2).ae(-11.82350837248724344-6.80546081842002101j) + assert lambertw(-1./100000,4).ae(-14.9186890769540539+26.1856750178782046j) + assert lambertw(-1./100000,5).ae(-15.0931437726379218666+32.5525721210262290086j) + assert lambertw((2+j)/10).ae(0.173704503762911669+0.071781336752835511j) + assert lambertw((2+j)/10,1).ae(-3.21746028349820063+4.56175438896292539j) + assert lambertw((2+j)/10,-1).ae(-3.03781405002993088-3.53946629633505737j) + assert lambertw((2+j)/10,4).ae(-4.6878509692773249+23.8313630697683291j) + assert lambertw(-(2+j)/10).ae(-0.226933772515757933-0.164986470020154580j) + assert lambertw(-(2+j)/10,1).ae(-2.43569517046110001+0.76974067544756289j) + assert lambertw(-(2+j)/10,-1).ae(-3.54858738151989450-6.91627921869943589j) + assert lambertw(-(2+j)/10,4).ae(-4.5500846928118151+20.6672982215434637j) + mp.dps = 50 + assert lambertw(pi).ae('1.073658194796149172092178407024821347547745350410314531') + mp.dps = 15 + # Former bug in generated branch + assert lambertw(-0.5+0.002j).ae(-0.78917138132659918344 + 0.76743539379990327749j) + assert lambertw(-0.5-0.002j).ae(-0.78917138132659918344 - 0.76743539379990327749j) + assert lambertw(-0.448+0.4j).ae(-0.11855133765652382241 + 0.66570534313583423116j) + assert lambertw(-0.448-0.4j).ae(-0.11855133765652382241 - 0.66570534313583423116j) + assert lambertw(-0.65475+0.0001j).ae(-0.61053421111385310898+1.0396534993944097723803j) + # Huge branch index + w = lambertw(1,10**20) + assert w.real.ae(-47.889578926290259164) + assert w.imag.ae(6.2831853071795864769e+20) + +def test_lambertw_hard(): + def check(x,y): + y = convert(y) + type_ok = True + if isinstance(y, mpf): + type_ok = isinstance(x, mpf) + real_ok = abs(x.real-y.real) <= abs(y.real)*8*eps + imag_ok = abs(x.imag-y.imag) <= abs(y.imag)*8*eps + #print x, y, abs(x.real-y.real), abs(x.imag-y.imag) + return real_ok and imag_ok + # Evaluation near 0 + mp.dps = 15 + assert check(lambertw(1e-10), 9.999999999000000000e-11) + assert check(lambertw(-1e-10), -1.000000000100000000e-10) + assert check(lambertw(1e-10j), 9.999999999999999999733e-21 + 9.99999999999999999985e-11j) + assert check(lambertw(-1e-10j), 9.999999999999999999733e-21 - 9.99999999999999999985e-11j) + assert check(lambertw(1e-10,1), -26.303186778379041559 + 3.265093911703828397j) + assert check(lambertw(-1e-10,1), -26.326236166739163892 + 6.526183280686333315j) + assert check(lambertw(1e-10j,1), -26.312931726911421551 + 4.896366881798013421j) + assert check(lambertw(-1e-10j,1), -26.297238779529035066 + 1.632807161345576513j) + assert check(lambertw(1e-10,-1), -26.303186778379041559 - 3.265093911703828397j) + assert check(lambertw(-1e-10,-1), -26.295238819246925694) + assert check(lambertw(1e-10j,-1), -26.297238779529035028 - 1.6328071613455765135j) + assert check(lambertw(-1e-10j,-1), -26.312931726911421551 - 4.896366881798013421j) + # Test evaluation very close to the branch point -1/e + # on the -1, 0, and 1 branches + add = lambda x, y: fadd(x,y,exact=True) + sub = lambda x, y: fsub(x,y,exact=True) + addj = lambda x, y: fadd(x,fmul(y,1j,exact=True),exact=True) + subj = lambda x, y: fadd(x,fmul(y,-1j,exact=True),exact=True) + mp.dps = 1500 + a = -1/e + 10*eps + d3 = mpf('1e-3') + d10 = mpf('1e-10') + d20 = mpf('1e-20') + d40 = mpf('1e-40') + d80 = mpf('1e-80') + d300 = mpf('1e-300') + d1000 = mpf('1e-1000') + mp.dps = 15 + # ---- Branch 0 ---- + # -1/e + eps + assert check(lambertw(add(a,d3)), -0.92802015005456704876) + assert check(lambertw(add(a,d10)), -0.99997668374140088071) + assert check(lambertw(add(a,d20)), -0.99999999976683560186) + assert lambertw(add(a,d40)) == -1 + assert lambertw(add(a,d80)) == -1 + assert lambertw(add(a,d300)) == -1 + assert lambertw(add(a,d1000)) == -1 + # -1/e - eps + assert check(lambertw(sub(a,d3)), -0.99819016149860989001+0.07367191188934638577j) + assert check(lambertw(sub(a,d10)), -0.9999999998187812114595992+0.0000233164398140346109194j) + assert check(lambertw(sub(a,d20)), -0.99999999999999999998187+2.331643981597124203344e-10j) + assert check(lambertw(sub(a,d40)), -1.0+2.33164398159712420336e-20j) + assert check(lambertw(sub(a,d80)), -1.0+2.33164398159712420336e-40j) + assert check(lambertw(sub(a,d300)), -1.0+2.33164398159712420336e-150j) + assert check(lambertw(sub(a,d1000)), mpc(-1,'2.33164398159712420336e-500')) + # -1/e + eps*j + assert check(lambertw(addj(a,d3)), -0.94790387486938526634+0.05036819639190132490j) + assert check(lambertw(addj(a,d10)), -0.9999835127872943680999899+0.0000164870314895821225256j) + assert check(lambertw(addj(a,d20)), -0.999999999835127872929987+1.64872127051890935830e-10j) + assert check(lambertw(addj(a,d40)), -0.9999999999999999999835+1.6487212707001281468305e-20j) + assert check(lambertw(addj(a,d80)), -1.0 + 1.64872127070012814684865e-40j) + assert check(lambertw(addj(a,d300)), -1.0 + 1.64872127070012814684865e-150j) + assert check(lambertw(addj(a,d1000)), mpc(-1.0,'1.64872127070012814684865e-500')) + # -1/e - eps*j + assert check(lambertw(subj(a,d3)), -0.94790387486938526634-0.05036819639190132490j) + assert check(lambertw(subj(a,d10)), -0.9999835127872943680999899-0.0000164870314895821225256j) + assert check(lambertw(subj(a,d20)), -0.999999999835127872929987-1.64872127051890935830e-10j) + assert check(lambertw(subj(a,d40)), -0.9999999999999999999835-1.6487212707001281468305e-20j) + assert check(lambertw(subj(a,d80)), -1.0 - 1.64872127070012814684865e-40j) + assert check(lambertw(subj(a,d300)), -1.0 - 1.64872127070012814684865e-150j) + assert check(lambertw(subj(a,d1000)), mpc(-1.0,'-1.64872127070012814684865e-500')) + # ---- Branch 1 ---- + assert check(lambertw(addj(a,d3),1), -3.088501303219933378005990 + 7.458676867597474813950098j) + assert check(lambertw(addj(a,d80),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(addj(a,d300),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(addj(a,d1000),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(subj(a,d3),1), -1.0520914180450129534365906 + 0.0539925638125450525673175j) + assert check(lambertw(subj(a,d10),1), -1.0000164872127056318529390 + 0.000016487393927159250398333077j) + assert check(lambertw(subj(a,d20),1), -1.0000000001648721270700128 + 1.64872127088134693542628e-10j) + assert check(lambertw(subj(a,d40),1), -1.000000000000000000016487 + 1.64872127070012814686677e-20j) + assert check(lambertw(subj(a,d80),1), -1.0 + 1.64872127070012814684865e-40j) + assert check(lambertw(subj(a,d300),1), -1.0 + 1.64872127070012814684865e-150j) + assert check(lambertw(subj(a,d1000),1), mpc(-1.0, '1.64872127070012814684865e-500')) + # ---- Branch -1 ---- + # -1/e + eps + assert check(lambertw(add(a,d3),-1), -1.075608941186624989414945) + assert check(lambertw(add(a,d10),-1), -1.000023316621036696460620) + assert check(lambertw(add(a,d20),-1), -1.000000000233164398177834) + assert lambertw(add(a,d40),-1) == -1 + assert lambertw(add(a,d80),-1) == -1 + assert lambertw(add(a,d300),-1) == -1 + assert lambertw(add(a,d1000),-1) == -1 + # -1/e - eps + assert check(lambertw(sub(a,d3),-1), -0.99819016149860989001-0.07367191188934638577j) + assert check(lambertw(sub(a,d10),-1), -0.9999999998187812114595992-0.0000233164398140346109194j) + assert check(lambertw(sub(a,d20),-1), -0.99999999999999999998187-2.331643981597124203344e-10j) + assert check(lambertw(sub(a,d40),-1), -1.0-2.33164398159712420336e-20j) + assert check(lambertw(sub(a,d80),-1), -1.0-2.33164398159712420336e-40j) + assert check(lambertw(sub(a,d300),-1), -1.0-2.33164398159712420336e-150j) + assert check(lambertw(sub(a,d1000),-1), mpc(-1,'-2.33164398159712420336e-500')) + # -1/e + eps*j + assert check(lambertw(addj(a,d3),-1), -1.0520914180450129534365906 - 0.0539925638125450525673175j) + assert check(lambertw(addj(a,d10),-1), -1.0000164872127056318529390 - 0.0000164873939271592503983j) + assert check(lambertw(addj(a,d20),-1), -1.0000000001648721270700 - 1.64872127088134693542628e-10j) + assert check(lambertw(addj(a,d40),-1), -1.00000000000000000001648 - 1.6487212707001281468667726e-20j) + assert check(lambertw(addj(a,d80),-1), -1.0 - 1.64872127070012814684865e-40j) + assert check(lambertw(addj(a,d300),-1), -1.0 - 1.64872127070012814684865e-150j) + assert check(lambertw(addj(a,d1000),-1), mpc(-1.0,'-1.64872127070012814684865e-500')) + # -1/e - eps*j + assert check(lambertw(subj(a,d3),-1), -3.088501303219933378005990-7.458676867597474813950098j) + assert check(lambertw(subj(a,d10),-1), -3.088843015579260686911033-7.461489285372968780020716j) + assert check(lambertw(subj(a,d20),-1), -3.088843015613043855953708-7.461489285654254556877988j) + assert check(lambertw(subj(a,d40),-1), -3.088843015613043855957087-7.461489285654254556906117j) + assert check(lambertw(subj(a,d80),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + assert check(lambertw(subj(a,d300),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + assert check(lambertw(subj(a,d1000),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + # One more case, testing higher precision + mp.dps = 500 + x = -1/e + mpf('1e-13') + ans = "-0.99999926266961377166355784455394913638782494543377383"\ + "744978844374498153493943725364881490261187530235150668593869563"\ + "168276697689459394902153960200361935311512317183678882" + mp.dps = 15 + assert lambertw(x).ae(ans) + mp.dps = 50 + assert lambertw(x).ae(ans) + mp.dps = 150 + assert lambertw(x).ae(ans) + +def test_meijerg(): + mp.dps = 15 + assert meijerg([[2,3],[1]],[[0.5,2],[3,4]], 2.5).ae(4.2181028074787439386) + assert meijerg([[],[1+j]],[[1],[1]], 3+4j).ae(271.46290321152464592 - 703.03330399954820169j) + assert meijerg([[0.25],[1]],[[0.5],[2]],0) == 0 + assert meijerg([[0],[]],[[0,0,'1/3','2/3'], []], '2/27').ae(2.2019391389653314120) + # Verify 1/z series being used + assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -0.5).ae(-1.338096165935754898687431) + assert meijerg([[1-(-1)],[1-(-2.5)]], [[1-(-3)],[1-(-0.5)]], -2.0).ae(-1.338096165935754898687431) + assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -1).ae(-(pi+4)/(4*pi)) + a = 2.5 + b = 1.25 + for z in [mpf(0.25), mpf(2)]: + x1 = hyp1f1(a,b,z) + x2 = gamma(b)/gamma(a)*meijerg([[1-a],[]],[[0],[1-b]],-z) + x3 = gamma(b)/gamma(a)*meijerg([[1-0],[1-(1-b)]],[[1-(1-a)],[]],-1/z) + assert x1.ae(x2) + assert x1.ae(x3) + +def test_appellf1(): + mp.dps = 15 + assert appellf1(2,-2,1,1,2,3).ae(-1.75) + assert appellf1(2,1,-2,1,2,3).ae(-8) + assert appellf1(2,1,-2,1,0.5,0.25).ae(1.5) + assert appellf1(-2,1,3,2,3,3).ae(19) + assert appellf1(1,2,3,4,0.5,0.125).ae( 1.53843285792549786518) + +def test_coulomb(): + # Note: most tests are doctests + # Test for a bug: + mp.dps = 15 + assert coulombg(mpc(-5,0),2,3).ae(20.087729487721430394) + +def test_hyper_param_accuracy(): + mp.dps = 15 + As = [n+1e-10 for n in range(-5,-1)] + Bs = [n+1e-10 for n in range(-12,-5)] + assert hyper(As,Bs,10).ae(-381757055858.652671927) + assert legenp(0.5, 100, 0.25).ae(-2.4124576567211311755e+144) + assert (hyp1f1(1000,1,-100)*10**24).ae(5.2589445437370169113) + assert (hyp2f1(10, -900, 10.5, 0.99)*10**24).ae(1.9185370579660768203) + assert (hyp2f1(1000,1.5,-3.5,-1.5)*10**385).ae(-2.7367529051334000764) + assert hyp2f1(-5, 10, 3, 0.5, zeroprec=500) == 0 + assert (hyp1f1(-10000, 1000, 100)*10**424).ae(-3.1046080515824859974) + assert (hyp2f1(1000,1.5,-3.5,-0.75,maxterms=100000)*10**231).ae(-4.0534790813913998643) + assert legenp(2, 3, 0.25) == 0 + pytest.raises(ValueError, lambda: hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3])) + assert hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3], infprec=200) == inf + assert meijerg([[],[]],[[0,0,0,0],[]],0.1).ae(1.5680822343832351418) + assert (besselk(400,400)*10**94).ae(1.4387057277018550583) + mp.dps = 5 + (hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522) + (hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311) + mp.dps = 15 + (hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522) + (hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311) + assert hyp0f1(fadd(-20,'1e-100',exact=True), 0.25).ae(1.85014429040102783e+49) + assert hyp0f1((-20*10**100+1, 10**100), 0.25).ae(1.85014429040102783e+49) + +def test_hypercomb_zero_pow(): + # check that 0^0 = 1 + assert hypercomb(lambda a: (([0],[a],[],[],[],[],0),), [0]) == 1 + assert meijerg([[-1.5],[]],[[0],[-0.75]],0).ae(1.4464090846320771425) + +def test_spherharm(): + mp.dps = 15 + t = 0.5; r = 0.25 + assert spherharm(0,0,t,r).ae(0.28209479177387814347) + assert spherharm(1,-1,t,r).ae(0.16048941205971996369 - 0.04097967481096344271j) + assert spherharm(1,0,t,r).ae(0.42878904414183579379) + assert spherharm(1,1,t,r).ae(-0.16048941205971996369 - 0.04097967481096344271j) + assert spherharm(2,-2,t,r).ae(0.077915886919031181734 - 0.042565643022253962264j) + assert spherharm(2,-1,t,r).ae(0.31493387233497459884 - 0.08041582001959297689j) + assert spherharm(2,0,t,r).ae(0.41330596756220761898) + assert spherharm(2,1,t,r).ae(-0.31493387233497459884 - 0.08041582001959297689j) + assert spherharm(2,2,t,r).ae(0.077915886919031181734 + 0.042565643022253962264j) + assert spherharm(3,-3,t,r).ae(0.033640236589690881646 - 0.031339125318637082197j) + assert spherharm(3,-2,t,r).ae(0.18091018743101461963 - 0.09883168583167010241j) + assert spherharm(3,-1,t,r).ae(0.42796713930907320351 - 0.10927795157064962317j) + assert spherharm(3,0,t,r).ae(0.27861659336351639787) + assert spherharm(3,1,t,r).ae(-0.42796713930907320351 - 0.10927795157064962317j) + assert spherharm(3,2,t,r).ae(0.18091018743101461963 + 0.09883168583167010241j) + assert spherharm(3,3,t,r).ae(-0.033640236589690881646 - 0.031339125318637082197j) + assert spherharm(0,-1,t,r) == 0 + assert spherharm(0,-2,t,r) == 0 + assert spherharm(0,1,t,r) == 0 + assert spherharm(0,2,t,r) == 0 + assert spherharm(1,2,t,r) == 0 + assert spherharm(1,3,t,r) == 0 + assert spherharm(1,-2,t,r) == 0 + assert spherharm(1,-3,t,r) == 0 + assert spherharm(2,3,t,r) == 0 + assert spherharm(2,4,t,r) == 0 + assert spherharm(2,-3,t,r) == 0 + assert spherharm(2,-4,t,r) == 0 + assert spherharm(3,4.5,0.5,0.25).ae(-22.831053442240790148 + 10.910526059510013757j) + assert spherharm(2+3j, 1-j, 1+j, 3+4j).ae(-2.6582752037810116935 - 1.0909214905642160211j) + assert spherharm(-6,2.5,t,r).ae(0.39383644983851448178 + 0.28414687085358299021j) + assert spherharm(-3.5, 3, 0.5, 0.25).ae(0.014516852987544698924 - 0.015582769591477628495j) + assert spherharm(-3, 3, 0.5, 0.25) == 0 + assert spherharm(-6, 3, 0.5, 0.25).ae(-0.16544349818782275459 - 0.15412657723253924562j) + assert spherharm(-6, 1.5, 0.5, 0.25).ae(0.032208193499767402477 + 0.012678000924063664921j) + assert spherharm(3,0,0,1).ae(0.74635266518023078283) + assert spherharm(3,-2,0,1) == 0 + assert spherharm(3,-2,1,1).ae(-0.16270707338254028971 - 0.35552144137546777097j) + +def test_qfunctions(): + mp.dps = 15 + assert qp(2,3,100).ae('2.7291482267247332183e2391') + +def test_issue_239(): + mp.prec = 150 + x = ldexp(2476979795053773,-52) + assert betainc(206, 385, 0, 0.55, 1).ae('0.99999999999999999999996570910644857895771110649954') + mp.dps = 15 + pytest.raises(ValueError, lambda: hyp2f1(-5,5,0.5,0.5)) + +# Extra stress testing for Bessel functions +# Reference zeros generated with the aid of scipy.special +# jn_zero, jnp_zero, yn_zero, ynp_zero + +V = 15 +M = 15 + +jn_small_zeros = \ +[[2.4048255576957728, + 5.5200781102863106, + 8.6537279129110122, + 11.791534439014282, + 14.930917708487786, + 18.071063967910923, + 21.211636629879259, + 24.352471530749303, + 27.493479132040255, + 30.634606468431975, + 33.775820213573569, + 36.917098353664044, + 40.058425764628239, + 43.19979171317673, + 46.341188371661814], + [3.8317059702075123, + 7.0155866698156188, + 10.173468135062722, + 13.323691936314223, + 16.470630050877633, + 19.615858510468242, + 22.760084380592772, + 25.903672087618383, + 29.046828534916855, + 32.189679910974404, + 35.332307550083865, + 38.474766234771615, + 41.617094212814451, + 44.759318997652822, + 47.901460887185447], + [5.1356223018406826, + 8.4172441403998649, + 11.619841172149059, + 14.795951782351261, + 17.959819494987826, + 21.116997053021846, + 24.270112313573103, + 27.420573549984557, + 30.569204495516397, + 33.7165195092227, + 36.86285651128381, + 40.008446733478192, + 43.153453778371463, + 46.297996677236919, + 49.442164110416873], + [6.3801618959239835, + 9.7610231299816697, + 13.015200721698434, + 16.223466160318768, + 19.409415226435012, + 22.582729593104442, + 25.748166699294978, + 28.908350780921758, + 32.064852407097709, + 35.218670738610115, + 38.370472434756944, + 41.520719670406776, + 44.669743116617253, + 47.817785691533302, + 50.965029906205183], + [7.5883424345038044, + 11.064709488501185, + 14.37253667161759, + 17.615966049804833, + 20.826932956962388, + 24.01901952477111, + 27.199087765981251, + 30.371007667117247, + 33.537137711819223, + 36.699001128744649, + 39.857627302180889, + 43.01373772335443, + 46.167853512924375, + 49.320360686390272, + 52.471551398458023], + [8.771483815959954, + 12.338604197466944, + 15.700174079711671, + 18.980133875179921, + 22.217799896561268, + 25.430341154222704, + 28.626618307291138, + 31.811716724047763, + 34.988781294559295, + 38.159868561967132, + 41.326383254047406, + 44.489319123219673, + 47.649399806697054, + 50.80716520300633, + 53.963026558378149], + [9.9361095242176849, + 13.589290170541217, + 17.003819667816014, + 20.320789213566506, + 23.58608443558139, + 26.820151983411405, + 30.033722386570469, + 33.233041762847123, + 36.422019668258457, + 39.603239416075404, + 42.778481613199507, + 45.949015998042603, + 49.11577372476426, + 52.279453903601052, + 55.440592068853149], + [11.086370019245084, + 14.821268727013171, + 18.287582832481726, + 21.641541019848401, + 24.934927887673022, + 28.191188459483199, + 31.42279419226558, + 34.637089352069324, + 37.838717382853611, + 41.030773691585537, + 44.21540850526126, + 47.394165755570512, + 50.568184679795566, + 53.738325371963291, + 56.905249991978781], + [12.225092264004655, + 16.037774190887709, + 19.554536430997055, + 22.94517313187462, + 26.266814641176644, + 29.54565967099855, + 32.795800037341462, + 36.025615063869571, + 39.240447995178135, + 42.443887743273558, + 45.638444182199141, + 48.825930381553857, + 52.007691456686903, + 55.184747939289049, + 58.357889025269694], + [13.354300477435331, + 17.241220382489128, + 20.807047789264107, + 24.233885257750552, + 27.583748963573006, + 30.885378967696675, + 34.154377923855096, + 37.400099977156589, + 40.628553718964528, + 43.843801420337347, + 47.048700737654032, + 50.245326955305383, + 53.435227157042058, + 56.619580266508436, + 59.799301630960228], + [14.475500686554541, + 18.433463666966583, + 22.046985364697802, + 25.509450554182826, + 28.887375063530457, + 32.211856199712731, + 35.499909205373851, + 38.761807017881651, + 42.004190236671805, + 45.231574103535045, + 48.447151387269394, + 51.653251668165858, + 54.851619075963349, + 58.043587928232478, + 61.230197977292681], + [15.589847884455485, + 19.61596690396692, + 23.275853726263409, + 26.773322545509539, + 30.17906117878486, + 33.526364075588624, + 36.833571341894905, + 40.111823270954241, + 43.368360947521711, + 46.608132676274944, + 49.834653510396724, + 53.050498959135054, + 56.257604715114484, + 59.457456908388002, + 62.651217388202912], + [16.698249933848246, + 20.789906360078443, + 24.494885043881354, + 28.026709949973129, + 31.45996003531804, + 34.829986990290238, + 38.156377504681354, + 41.451092307939681, + 44.721943543191147, + 47.974293531269048, + 51.211967004101068, + 54.437776928325074, + 57.653844811906946, + 60.8618046824805, + 64.062937824850136], + [17.801435153282442, + 21.95624406783631, + 25.705103053924724, + 29.270630441874802, + 32.731053310978403, + 36.123657666448762, + 39.469206825243883, + 42.780439265447158, + 46.06571091157561, + 49.330780096443524, + 52.579769064383396, + 55.815719876305778, + 59.040934037249271, + 62.257189393731728, + 65.465883797232125], + [18.899997953174024, + 23.115778347252756, + 26.907368976182104, + 30.505950163896036, + 33.993184984781542, + 37.408185128639695, + 40.772827853501868, + 44.100590565798301, + 47.400347780543231, + 50.678236946479898, + 53.93866620912693, + 57.184898598119301, + 60.419409852130297, + 63.644117508962281, + 66.860533012260103]] + +jnp_small_zeros = \ +[[0.0, + 3.8317059702075123, + 7.0155866698156188, + 10.173468135062722, + 13.323691936314223, + 16.470630050877633, + 19.615858510468242, + 22.760084380592772, + 25.903672087618383, + 29.046828534916855, + 32.189679910974404, + 35.332307550083865, + 38.474766234771615, + 41.617094212814451, + 44.759318997652822], + [1.8411837813406593, + 5.3314427735250326, + 8.5363163663462858, + 11.706004902592064, + 14.863588633909033, + 18.015527862681804, + 21.16436985918879, + 24.311326857210776, + 27.457050571059246, + 30.601922972669094, + 33.746182898667383, + 36.889987409236811, + 40.033444053350675, + 43.176628965448822, + 46.319597561173912], + [3.0542369282271403, + 6.7061331941584591, + 9.9694678230875958, + 13.170370856016123, + 16.347522318321783, + 19.512912782488205, + 22.671581772477426, + 25.826037141785263, + 28.977672772993679, + 32.127327020443474, + 35.275535050674691, + 38.422654817555906, + 41.568934936074314, + 44.714553532819734, + 47.859641607992093], + [4.2011889412105285, + 8.0152365983759522, + 11.345924310743006, + 14.585848286167028, + 17.78874786606647, + 20.9724769365377, + 24.144897432909265, + 27.310057930204349, + 30.470268806290424, + 33.626949182796679, + 36.781020675464386, + 39.933108623659488, + 43.083652662375079, + 46.232971081836478, + 49.381300092370349], + [5.3175531260839944, + 9.2823962852416123, + 12.681908442638891, + 15.964107037731551, + 19.196028800048905, + 22.401032267689004, + 25.589759681386733, + 28.767836217666503, + 31.938539340972783, + 35.103916677346764, + 38.265316987088158, + 41.423666498500732, + 44.579623137359257, + 47.733667523865744, + 50.886159153182682], + [6.4156163757002403, + 10.519860873772308, + 13.9871886301403, + 17.312842487884625, + 20.575514521386888, + 23.803581476593863, + 27.01030789777772, + 30.20284907898166, + 33.385443901010121, + 36.560777686880356, + 39.730640230067416, + 42.896273163494417, + 46.058566273567043, + 49.218174614666636, + 52.375591529563596], + [7.501266144684147, + 11.734935953042708, + 15.268181461097873, + 18.637443009666202, + 21.931715017802236, + 25.183925599499626, + 28.409776362510085, + 31.617875716105035, + 34.81339298429743, + 37.999640897715301, + 41.178849474321413, + 44.352579199070217, + 47.521956905768113, + 50.687817781723741, + 53.85079463676896], + [8.5778364897140741, + 12.932386237089576, + 16.529365884366944, + 19.941853366527342, + 23.268052926457571, + 26.545032061823576, + 29.790748583196614, + 33.015178641375142, + 36.224380548787162, + 39.422274578939259, + 42.611522172286684, + 45.793999658055002, + 48.971070951900596, + 52.143752969301988, + 55.312820330403446], + [9.6474216519972168, + 14.115518907894618, + 17.774012366915256, + 21.229062622853124, + 24.587197486317681, + 27.889269427955092, + 31.155326556188325, + 34.39662855427218, + 37.620078044197086, + 40.830178681822041, + 44.030010337966153, + 47.221758471887113, + 50.407020967034367, + 53.586995435398319, + 56.762598475105272], + [10.711433970699945, + 15.28673766733295, + 19.004593537946053, + 22.501398726777283, + 25.891277276839136, + 29.218563499936081, + 32.505247352375523, + 35.763792928808799, + 39.001902811514218, + 42.224638430753279, + 45.435483097475542, + 48.636922645305525, + 51.830783925834728, + 55.01844255063594, + 58.200955824859509], + [11.770876674955582, + 16.447852748486498, + 20.223031412681701, + 23.760715860327448, + 27.182021527190532, + 30.534504754007074, + 33.841965775135715, + 37.118000423665604, + 40.371068905333891, + 43.606764901379516, + 46.828959446564562, + 50.040428970943456, + 53.243223214220535, + 56.438892058982552, + 59.628631306921512], + [12.826491228033465, + 17.600266557468326, + 21.430854238060294, + 25.008518704644261, + 28.460857279654847, + 31.838424458616998, + 35.166714427392629, + 38.460388720328256, + 41.728625562624312, + 44.977526250903469, + 48.211333836373288, + 51.433105171422278, + 54.645106240447105, + 57.849056857839799, + 61.046288512821078], + [13.878843069697276, + 18.745090916814406, + 22.629300302835503, + 26.246047773946584, + 29.72897816891134, + 33.131449953571661, + 36.480548302231658, + 39.791940718940855, + 43.075486800191012, + 46.337772104541405, + 49.583396417633095, + 52.815686826850452, + 56.037118687012179, + 59.249577075517968, + 62.454525995970462], + [14.928374492964716, + 19.88322436109951, + 23.81938909003628, + 27.474339750968247, + 30.987394331665278, + 34.414545662167183, + 37.784378506209499, + 41.113512376883377, + 44.412454519229281, + 47.688252845993366, + 50.945849245830813, + 54.188831071035124, + 57.419876154678179, + 60.641030026538746, + 63.853885828967512], + [15.975438807484321, + 21.015404934568315, + 25.001971500138194, + 28.694271223110755, + 32.236969407878118, + 35.688544091185301, + 39.078998185245057, + 42.425854432866141, + 45.740236776624833, + 49.029635055514276, + 52.299319390331728, + 55.553127779547459, + 58.793933759028134, + 62.02393848337554, + 65.244860767043859]] + +yn_small_zeros = \ +[[0.89357696627916752, + 3.9576784193148579, + 7.0860510603017727, + 10.222345043496417, + 13.361097473872763, + 16.500922441528091, + 19.64130970088794, + 22.782028047291559, + 25.922957653180923, + 29.064030252728398, + 32.205204116493281, + 35.346452305214321, + 38.487756653081537, + 41.629104466213808, + 44.770486607221993], + [2.197141326031017, + 5.4296810407941351, + 8.5960058683311689, + 11.749154830839881, + 14.897442128336725, + 18.043402276727856, + 21.188068934142213, + 24.331942571356912, + 27.475294980449224, + 30.618286491641115, + 33.761017796109326, + 36.90355531614295, + 40.045944640266876, + 43.188218097393211, + 46.330399250701687], + [3.3842417671495935, + 6.7938075132682675, + 10.023477979360038, + 13.209986710206416, + 16.378966558947457, + 19.539039990286384, + 22.69395593890929, + 25.845613720902269, + 28.995080395650151, + 32.143002257627551, + 35.289793869635804, + 38.435733485446343, + 41.581014867297885, + 44.725777117640461, + 47.870122696676504], + [4.5270246611496439, + 8.0975537628604907, + 11.396466739595867, + 14.623077742393873, + 17.81845523294552, + 20.997284754187761, + 24.166235758581828, + 27.328799850405162, + 30.486989604098659, + 33.642049384702463, + 36.794791029185579, + 39.945767226378749, + 43.095367507846703, + 46.2438744334407, + 49.391498015725107], + [5.6451478942208959, + 9.3616206152445429, + 12.730144474090465, + 15.999627085382479, + 19.22442895931681, + 22.424810599698521, + 25.610267054939328, + 28.785893657666548, + 31.954686680031668, + 35.118529525584828, + 38.278668089521758, + 41.435960629910073, + 44.591018225353424, + 47.744288086361052, + 50.896105199722123], + [6.7471838248710219, + 10.597176726782031, + 14.033804104911233, + 17.347086393228382, + 20.602899017175335, + 23.826536030287532, + 27.030134937138834, + 30.220335654231385, + 33.401105611047908, + 36.574972486670962, + 39.743627733020277, + 42.908248189569535, + 46.069679073215439, + 49.228543693445843, + 52.385312123112282], + [7.8377378223268716, + 11.811037107609447, + 15.313615118517857, + 18.670704965906724, + 21.958290897126571, + 25.206207715021249, + 28.429037095235496, + 31.634879502950644, + 34.828638524084437, + 38.013473399691765, + 41.19151880917741, + 44.364272633271975, + 47.53281875312084, + 50.697961822183806, + 53.860312300118388], + [8.919605734873789, + 13.007711435388313, + 16.573915129085334, + 19.974342312352426, + 23.293972585596648, + 26.5667563757203, + 29.809531451608321, + 33.031769327150685, + 36.239265816598239, + 39.435790312675323, + 42.623910919472727, + 45.805442883111651, + 48.981708325514764, + 52.153694518185572, + 55.322154420959698], + [9.9946283820824834, + 14.190361295800141, + 17.817887841179873, + 21.26093227125945, + 24.612576377421522, + 27.910524883974868, + 31.173701563441602, + 34.412862242025045, + 37.634648706110989, + 40.843415321050884, + 44.04214994542435, + 47.232978012841169, + 50.417456447370186, + 53.596753874948731, + 56.771765754432457], + [11.064090256031013, + 15.361301343575925, + 19.047949646361388, + 22.532765416313869, + 25.91620496332662, + 29.2394205079349, + 32.523270869465881, + 35.779715464475261, + 39.016196664616095, + 42.237627509803703, + 45.4474001519274, + 48.647941127433196, + 51.841036928216499, + 55.028034667184916, + 58.209970905250097], + [12.128927704415439, + 16.522284394784426, + 20.265984501212254, + 23.791669719454272, + 27.206568881574774, + 30.555020011020762, + 33.859683872746356, + 37.133649760307504, + 40.385117593813002, + 43.619533085646856, + 46.840676630553575, + 50.051265851897857, + 53.253310556711732, + 56.448332488918971, + 59.637507005589829], + [13.189846995683845, + 17.674674253171487, + 21.473493977824902, + 25.03913093040942, + 28.485081336558058, + 31.858644293774859, + 35.184165245422787, + 38.475796636190897, + 41.742455848758449, + 44.990096293791186, + 48.222870660068338, + 51.443777308699826, + 54.655042589416311, + 57.858358441436511, + 61.055036135780528], + [14.247395665073945, + 18.819555894710682, + 22.671697117872794, + 26.276375544903892, + 29.752925495549038, + 33.151412708998983, + 36.497763772987645, + 39.807134090704376, + 43.089121522203808, + 46.350163579538652, + 49.594769786270069, + 52.82620892320143, + 56.046916910756961, + 59.258751140598783, + 62.463155567737854], + [15.30200785858925, + 19.957808654258601, + 23.861599172945054, + 27.504429642227545, + 31.011103429019229, + 34.434283425782942, + 37.801385632318459, + 41.128514139788358, + 44.425913324440663, + 47.700482714581842, + 50.957073905278458, + 54.199216028087261, + 57.429547607017405, + 60.65008661807661, + 63.862406280068586], + [16.354034360047551, + 21.090156519983806, + 25.044040298785627, + 28.724161640881914, + 32.260472459522644, + 35.708083982611664, + 39.095820003878235, + 42.440684315990936, + 45.75353669045622, + 49.041718113283529, + 52.310408280968073, + 55.56338698149062, + 58.803488508906895, + 62.032886550960831, + 65.253280088312461]] + +ynp_small_zeros = \ +[[2.197141326031017, + 5.4296810407941351, + 8.5960058683311689, + 11.749154830839881, + 14.897442128336725, + 18.043402276727856, + 21.188068934142213, + 24.331942571356912, + 27.475294980449224, + 30.618286491641115, + 33.761017796109326, + 36.90355531614295, + 40.045944640266876, + 43.188218097393211, + 46.330399250701687], + [3.6830228565851777, + 6.9414999536541757, + 10.123404655436613, + 13.285758156782854, + 16.440058007293282, + 19.590241756629495, + 22.738034717396327, + 25.884314618788867, + 29.029575819372535, + 32.174118233366201, + 35.318134458192094, + 38.461753870997549, + 41.605066618873108, + 44.74813744908079, + 47.891014070791065], + [5.0025829314460639, + 8.3507247014130795, + 11.574195465217647, + 14.760909306207676, + 17.931285939466855, + 21.092894504412739, + 24.249231678519058, + 27.402145837145258, + 30.552708880564553, + 33.70158627151572, + 36.849213419846257, + 39.995887376143356, + 43.141817835750686, + 46.287157097544201, + 49.432018469138281], + [6.2536332084598136, + 9.6987879841487711, + 12.972409052292216, + 16.19044719506921, + 19.38238844973613, + 22.559791857764261, + 25.728213194724094, + 28.890678419054777, + 32.048984005266337, + 35.204266606440635, + 38.357281675961019, + 41.508551443818436, + 44.658448731963676, + 47.807246956681162, + 50.95515126455207], + [7.4649217367571329, + 11.005169149809189, + 14.3317235192331, + 17.58443601710272, + 20.801062338411128, + 23.997004122902644, + 27.179886689853435, + 30.353960608554323, + 33.521797098666792, + 36.685048382072301, + 39.844826969405863, + 43.001910515625288, + 46.15685955107263, + 49.310088614282257, + 52.461911043685864], + [8.6495562436971983, + 12.280868725807848, + 15.660799304540377, + 18.949739756016503, + 22.192841809428241, + 25.409072788867674, + 28.608039283077593, + 31.795195353138159, + 34.973890634255288, + 38.14630522169358, + 41.313923188794905, + 44.477791768537617, + 47.638672065035628, + 50.797131066967842, + 53.953600129601663], + [9.8147970120105779, + 13.532811875789828, + 16.965526446046053, + 20.291285512443867, + 23.56186260680065, + 26.799499736027237, + 30.015665481543419, + 33.216968050039509, + 36.407516858984748, + 39.590015243560459, + 42.766320595957378, + 45.937754257017323, + 49.105283450953203, + 52.269633324547373, + 55.431358715604255], + [10.965152105242974, + 14.765687379508912, + 18.250123150217555, + 21.612750053384621, + 24.911310600813573, + 28.171051927637585, + 31.40518108895689, + 34.621401012564177, + 37.824552065973114, + 41.017847386464902, + 44.203512240871601, + 47.3831408366063, + 50.557907466622796, + 53.728697478957026, + 56.896191727313342], + [12.103641941939539, + 15.982840905145284, + 19.517731005559611, + 22.916962141504605, + 26.243700855690533, + 29.525960140695407, + 32.778568197561124, + 36.010261572392516, + 39.226578757802172, + 42.43122493258747, + 45.626783824134354, + 48.815117837929515, + 51.997606404328863, + 55.175294723956816, + 58.348990221754937], + [13.232403808592215, + 17.186756572616758, + 20.770762917490496, + 24.206152448722253, + 27.561059462697153, + 30.866053571250639, + 34.137476603379774, + 37.385039772270268, + 40.614946085165892, + 43.831373184731238, + 47.037251786726299, + 50.234705848765229, + 53.425316228549359, + 56.610286079882087, + 59.790548623216652], + [14.35301374369987, + 18.379337301642568, + 22.011118775283494, + 25.482116178696707, + 28.865046588695164, + 32.192853922166294, + 35.483296655830277, + 38.747005493021857, + 41.990815194320955, + 45.219355876831731, + 48.435892856078888, + 51.642803925173029, + 54.84186659475857, + 58.034439083840155, + 61.221578745109862], + [15.466672066554263, + 19.562077985759503, + 23.240325531101082, + 26.746322986645901, + 30.157042415639891, + 33.507642948240263, + 36.817212798512775, + 40.097251300178642, + 43.355193847719752, + 46.596103410173672, + 49.823567279972794, + 53.040208868780832, + 56.247996968470062, + 59.448441365714251, + 62.642721301357187], + [16.574317035530872, + 20.73617763753932, + 24.459631728238804, + 27.999993668839644, + 31.438208790267783, + 34.811512070805535, + 38.140243708611251, + 41.436725143893739, + 44.708963264433333, + 47.962435051891027, + 51.201037321915983, + 54.427630745992975, + 57.644369734615238, + 60.852911791989989, + 64.054555435720397], + [17.676697936439624, + 21.9026148697762, + 25.670073356263225, + 29.244155124266438, + 32.709534477396028, + 36.105399554497548, + 39.453272918267025, + 42.766255701958017, + 46.052899215578358, + 49.319076602061401, + 52.568982147952547, + 55.805705507386287, + 59.031580956740466, + 62.248409689597653, + 65.457606670836759], + [18.774423978290318, + 23.06220035979272, + 26.872520985976736, + 30.479680663499762, + 33.971869047372436, + 37.390118854896324, + 40.757072537673599, + 44.086572292170345, + 47.387688809191869, + 50.66667461073936, + 53.928009929563275, + 57.175005343085052, + 60.410169281219877, + 63.635442539153021, + 66.85235358587768]] + +@pytest.mark.slow +def test_bessel_zeros_extra(): + mp.dps = 15 + for v in range(V): + for m in range(1,M+1): + print(v, m, "of", V, M) + # Twice to test cache (if used) + assert besseljzero(v,m).ae(jn_small_zeros[v][m-1]) + assert besseljzero(v,m).ae(jn_small_zeros[v][m-1]) + assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1]) + assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1]) + assert besselyzero(v,m).ae(yn_small_zeros[v][m-1]) + assert besselyzero(v,m).ae(yn_small_zeros[v][m-1]) + assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1]) + assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1]) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_gammazeta.py b/phivenv/Lib/site-packages/mpmath/tests/test_gammazeta.py new file mode 100644 index 0000000000000000000000000000000000000000..6a18a7964d746561dfd5f81177cd78ccc46d2a5d --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_gammazeta.py @@ -0,0 +1,698 @@ +from mpmath import * +from mpmath.libmp import round_up, from_float, mpf_zeta_int + +def test_zeta_int_bug(): + assert mpf_zeta_int(0, 10) == from_float(-0.5) + +def test_bernoulli(): + assert bernfrac(0) == (1,1) + assert bernfrac(1) == (-1,2) + assert bernfrac(2) == (1,6) + assert bernfrac(3) == (0,1) + assert bernfrac(4) == (-1,30) + assert bernfrac(5) == (0,1) + assert bernfrac(6) == (1,42) + assert bernfrac(8) == (-1,30) + assert bernfrac(10) == (5,66) + assert bernfrac(12) == (-691,2730) + assert bernfrac(18) == (43867,798) + p, q = bernfrac(228) + assert p % 10**10 == 164918161 + assert q == 625170 + p, q = bernfrac(1000) + assert p % 10**10 == 7950421099 + assert q == 342999030 + mp.dps = 15 + assert bernoulli(0) == 1 + assert bernoulli(1) == -0.5 + assert bernoulli(2).ae(1./6) + assert bernoulli(3) == 0 + assert bernoulli(4).ae(-1./30) + assert bernoulli(5) == 0 + assert bernoulli(6).ae(1./42) + assert str(bernoulli(10)) == '0.0757575757575758' + assert str(bernoulli(234)) == '7.62772793964344e+267' + assert str(bernoulli(10**5)) == '-5.82229431461335e+376755' + assert str(bernoulli(10**8+2)) == '1.19570355039953e+676752584' + + mp.dps = 50 + assert str(bernoulli(10)) == '0.075757575757575757575757575757575757575757575757576' + assert str(bernoulli(234)) == '7.6277279396434392486994969020496121553385863373331e+267' + assert str(bernoulli(10**5)) == '-5.8222943146133508236497045360612887555320691004308e+376755' + assert str(bernoulli(10**8+2)) == '1.1957035503995297272263047884604346914602088317782e+676752584' + + mp.dps = 1000 + assert bernoulli(10).ae(mpf(5)/66) + + mp.dps = 50000 + assert bernoulli(10).ae(mpf(5)/66) + + mp.dps = 15 + +def test_bernpoly_eulerpoly(): + mp.dps = 15 + assert bernpoly(0,-1).ae(1) + assert bernpoly(0,0).ae(1) + assert bernpoly(0,'1/2').ae(1) + assert bernpoly(0,'3/4').ae(1) + assert bernpoly(0,1).ae(1) + assert bernpoly(0,2).ae(1) + assert bernpoly(1,-1).ae('-3/2') + assert bernpoly(1,0).ae('-1/2') + assert bernpoly(1,'1/2').ae(0) + assert bernpoly(1,'3/4').ae('1/4') + assert bernpoly(1,1).ae('1/2') + assert bernpoly(1,2).ae('3/2') + assert bernpoly(2,-1).ae('13/6') + assert bernpoly(2,0).ae('1/6') + assert bernpoly(2,'1/2').ae('-1/12') + assert bernpoly(2,'3/4').ae('-1/48') + assert bernpoly(2,1).ae('1/6') + assert bernpoly(2,2).ae('13/6') + assert bernpoly(3,-1).ae(-3) + assert bernpoly(3,0).ae(0) + assert bernpoly(3,'1/2').ae(0) + assert bernpoly(3,'3/4').ae('-3/64') + assert bernpoly(3,1).ae(0) + assert bernpoly(3,2).ae(3) + assert bernpoly(4,-1).ae('119/30') + assert bernpoly(4,0).ae('-1/30') + assert bernpoly(4,'1/2').ae('7/240') + assert bernpoly(4,'3/4').ae('7/3840') + assert bernpoly(4,1).ae('-1/30') + assert bernpoly(4,2).ae('119/30') + assert bernpoly(5,-1).ae(-5) + assert bernpoly(5,0).ae(0) + assert bernpoly(5,'1/2').ae(0) + assert bernpoly(5,'3/4').ae('25/1024') + assert bernpoly(5,1).ae(0) + assert bernpoly(5,2).ae(5) + assert bernpoly(10,-1).ae('665/66') + assert bernpoly(10,0).ae('5/66') + assert bernpoly(10,'1/2').ae('-2555/33792') + assert bernpoly(10,'3/4').ae('-2555/34603008') + assert bernpoly(10,1).ae('5/66') + assert bernpoly(10,2).ae('665/66') + assert bernpoly(11,-1).ae(-11) + assert bernpoly(11,0).ae(0) + assert bernpoly(11,'1/2').ae(0) + assert bernpoly(11,'3/4').ae('-555731/4194304') + assert bernpoly(11,1).ae(0) + assert bernpoly(11,2).ae(11) + assert eulerpoly(0,-1).ae(1) + assert eulerpoly(0,0).ae(1) + assert eulerpoly(0,'1/2').ae(1) + assert eulerpoly(0,'3/4').ae(1) + assert eulerpoly(0,1).ae(1) + assert eulerpoly(0,2).ae(1) + assert eulerpoly(1,-1).ae('-3/2') + assert eulerpoly(1,0).ae('-1/2') + assert eulerpoly(1,'1/2').ae(0) + assert eulerpoly(1,'3/4').ae('1/4') + assert eulerpoly(1,1).ae('1/2') + assert eulerpoly(1,2).ae('3/2') + assert eulerpoly(2,-1).ae(2) + assert eulerpoly(2,0).ae(0) + assert eulerpoly(2,'1/2').ae('-1/4') + assert eulerpoly(2,'3/4').ae('-3/16') + assert eulerpoly(2,1).ae(0) + assert eulerpoly(2,2).ae(2) + assert eulerpoly(3,-1).ae('-9/4') + assert eulerpoly(3,0).ae('1/4') + assert eulerpoly(3,'1/2').ae(0) + assert eulerpoly(3,'3/4').ae('-11/64') + assert eulerpoly(3,1).ae('-1/4') + assert eulerpoly(3,2).ae('9/4') + assert eulerpoly(4,-1).ae(2) + assert eulerpoly(4,0).ae(0) + assert eulerpoly(4,'1/2').ae('5/16') + assert eulerpoly(4,'3/4').ae('57/256') + assert eulerpoly(4,1).ae(0) + assert eulerpoly(4,2).ae(2) + assert eulerpoly(5,-1).ae('-3/2') + assert eulerpoly(5,0).ae('-1/2') + assert eulerpoly(5,'1/2').ae(0) + assert eulerpoly(5,'3/4').ae('361/1024') + assert eulerpoly(5,1).ae('1/2') + assert eulerpoly(5,2).ae('3/2') + assert eulerpoly(10,-1).ae(2) + assert eulerpoly(10,0).ae(0) + assert eulerpoly(10,'1/2').ae('-50521/1024') + assert eulerpoly(10,'3/4').ae('-36581523/1048576') + assert eulerpoly(10,1).ae(0) + assert eulerpoly(10,2).ae(2) + assert eulerpoly(11,-1).ae('-699/4') + assert eulerpoly(11,0).ae('691/4') + assert eulerpoly(11,'1/2').ae(0) + assert eulerpoly(11,'3/4').ae('-512343611/4194304') + assert eulerpoly(11,1).ae('-691/4') + assert eulerpoly(11,2).ae('699/4') + # Potential accuracy issues + assert bernpoly(10000,10000).ae('5.8196915936323387117e+39999') + assert bernpoly(200,17.5).ae(3.8048418524583064909e244) + assert eulerpoly(200,17.5).ae(-3.7309911582655785929e275) + +def test_gamma(): + mp.dps = 15 + assert gamma(0.25).ae(3.6256099082219083119) + assert gamma(0.0001).ae(9999.4228832316241908) + assert gamma(300).ae('1.0201917073881354535e612') + assert gamma(-0.5).ae(-3.5449077018110320546) + assert gamma(-7.43).ae(0.00026524416464197007186) + #assert gamma(Rational(1,2)) == gamma(0.5) + #assert gamma(Rational(-7,3)).ae(gamma(mpf(-7)/3)) + assert gamma(1+1j).ae(0.49801566811835604271 - 0.15494982830181068512j) + assert gamma(-1+0.01j).ae(-0.422733904013474115 + 99.985883082635367436j) + assert gamma(20+30j).ae(-1453876687.5534810 + 1163777777.8031573j) + # Should always give exact factorials when they can + # be represented as mpfs under the current working precision + fact = 1 + for i in range(1, 18): + assert gamma(i) == fact + fact *= i + for dps in [170, 600]: + fact = 1 + mp.dps = dps + for i in range(1, 105): + assert gamma(i) == fact + fact *= i + mp.dps = 100 + assert gamma(0.5).ae(sqrt(pi)) + mp.dps = 15 + assert factorial(0) == fac(0) == 1 + assert factorial(3) == 6 + assert isnan(gamma(nan)) + assert gamma(1100).ae('4.8579168073569433667e2866') + assert rgamma(0) == 0 + assert rgamma(-1) == 0 + assert rgamma(2) == 1.0 + assert rgamma(3) == 0.5 + assert loggamma(2+8j).ae(-8.5205176753667636926 + 10.8569497125597429366j) + assert loggamma('1e10000').ae('2.302485092994045684017991e10004') + assert loggamma('1e10000j').ae(mpc('-1.570796326794896619231322e10000','2.302485092994045684017991e10004')) + +def test_fac2(): + mp.dps = 15 + assert [fac2(n) for n in range(10)] == [1,1,2,3,8,15,48,105,384,945] + assert fac2(-5).ae(1./3) + assert fac2(-11).ae(-1./945) + assert fac2(50).ae(5.20469842636666623e32) + assert fac2(0.5+0.75j).ae(0.81546769394688069176-0.34901016085573266889j) + assert fac2(inf) == inf + assert isnan(fac2(-inf)) + +def test_gamma_quotients(): + mp.dps = 15 + h = 1e-8 + ep = 1e-4 + G = gamma + assert gammaprod([-1],[-3,-4]) == 0 + assert gammaprod([-1,0],[-5]) == inf + assert abs(gammaprod([-1],[-2]) - G(-1+h)/G(-2+h)) < 1e-4 + assert abs(gammaprod([-4,-3],[-2,0]) - G(-4+h)*G(-3+h)/G(-2+h)/G(0+h)) < 1e-4 + assert rf(3,0) == 1 + assert rf(2.5,1) == 2.5 + assert rf(-5,2) == 20 + assert rf(j,j).ae(gamma(2*j)/gamma(j)) + assert rf('-255.5815971722918','-0.5119253100282322').ae('-0.1952720278805729485') # issue 421 + assert ff(-2,0) == 1 + assert ff(-2,1) == -2 + assert ff(4,3) == 24 + assert ff(3,4) == 0 + assert binomial(0,0) == 1 + assert binomial(1,0) == 1 + assert binomial(0,-1) == 0 + assert binomial(3,2) == 3 + assert binomial(5,2) == 10 + assert binomial(5,3) == 10 + assert binomial(5,5) == 1 + assert binomial(-1,0) == 1 + assert binomial(-2,-4) == 3 + assert binomial(4.5, 1.5) == 6.5625 + assert binomial(1100,1) == 1100 + assert binomial(1100,2) == 604450 + assert beta(1,1) == 1 + assert beta(0,0) == inf + assert beta(3,0) == inf + assert beta(-1,-1) == inf + assert beta(1.5,1).ae(2/3.) + assert beta(1.5,2.5).ae(pi/16) + assert (10**15*beta(10,100)).ae(2.3455339739604649879) + assert beta(inf,inf) == 0 + assert isnan(beta(-inf,inf)) + assert isnan(beta(-3,inf)) + assert isnan(beta(0,inf)) + assert beta(inf,0.5) == beta(0.5,inf) == 0 + assert beta(inf,-1.5) == inf + assert beta(inf,-0.5) == -inf + assert beta(1+2j,-1-j/2).ae(1.16396542451069943086+0.08511695947832914640j) + assert beta(-0.5,0.5) == 0 + assert beta(-3,3).ae(-1/3.) + assert beta('-255.5815971722918','-0.5119253100282322').ae('18.157330562703710339') # issue 421 + +def test_zeta(): + mp.dps = 15 + assert zeta(2).ae(pi**2 / 6) + assert zeta(2.0).ae(pi**2 / 6) + assert zeta(mpc(2)).ae(pi**2 / 6) + assert zeta(100).ae(1) + assert zeta(0).ae(-0.5) + assert zeta(0.5).ae(-1.46035450880958681) + assert zeta(-1).ae(-mpf(1)/12) + assert zeta(-2) == 0 + assert zeta(-3).ae(mpf(1)/120) + assert zeta(-4) == 0 + assert zeta(-100) == 0 + assert isnan(zeta(nan)) + assert zeta(1e-30).ae(-0.5) + assert zeta(-1e-30).ae(-0.5) + # Zeros in the critical strip + assert zeta(mpc(0.5, 14.1347251417346937904)).ae(0) + assert zeta(mpc(0.5, 21.0220396387715549926)).ae(0) + assert zeta(mpc(0.5, 25.0108575801456887632)).ae(0) + assert zeta(mpc(1e-30,1e-40)).ae(-0.5) + assert zeta(mpc(-1e-30,1e-40)).ae(-0.5) + mp.dps = 50 + im = '236.5242296658162058024755079556629786895294952121891237' + assert zeta(mpc(0.5, im)).ae(0, 1e-46) + mp.dps = 15 + # Complex reflection formula + assert (zeta(-60+3j) / 10**34).ae(8.6270183987866146+15.337398548226238j) + # issue #358 + assert zeta(0,0.5) == 0 + assert zeta(0,0) == 0.5 + assert zeta(0,0.5,1).ae(-0.34657359027997265) + # see issue #390 + assert zeta(-1.5,0.5j).ae(-0.13671400162512768475 + 0.11411333638426559139j) + +def test_altzeta(): + mp.dps = 15 + assert altzeta(-2) == 0 + assert altzeta(-4) == 0 + assert altzeta(-100) == 0 + assert altzeta(0) == 0.5 + assert altzeta(-1) == 0.25 + assert altzeta(-3) == -0.125 + assert altzeta(-5) == 0.25 + assert altzeta(-21) == 1180529130.25 + assert altzeta(1).ae(log(2)) + assert altzeta(2).ae(pi**2/12) + assert altzeta(10).ae(73*pi**10/6842880) + assert altzeta(50) < 1 + assert altzeta(60, rounding='d') < 1 + assert altzeta(60, rounding='u') == 1 + assert altzeta(10000, rounding='d') < 1 + assert altzeta(10000, rounding='u') == 1 + assert altzeta(3+0j) == altzeta(3) + s = 3+4j + assert altzeta(s).ae((1-2**(1-s))*zeta(s)) + s = -3+4j + assert altzeta(s).ae((1-2**(1-s))*zeta(s)) + assert altzeta(-100.5).ae(4.58595480083585913e+108) + assert altzeta(1.3).ae(0.73821404216623045) + assert altzeta(1e-30).ae(0.5) + assert altzeta(-1e-30).ae(0.5) + assert altzeta(mpc(1e-30,1e-40)).ae(0.5) + assert altzeta(mpc(-1e-30,1e-40)).ae(0.5) + +def test_zeta_huge(): + mp.dps = 15 + assert zeta(inf) == 1 + mp.dps = 50 + assert zeta(100).ae('1.0000000000000000000000000000007888609052210118073522') + assert zeta(40*pi).ae('1.0000000000000000000000000000000000000148407238666182') + mp.dps = 10000 + v = zeta(33000) + mp.dps = 15 + assert str(v-1) == '1.02363019598118e-9934' + assert zeta(pi*1000, rounding=round_up) > 1 + assert zeta(3000, rounding=round_up) > 1 + assert zeta(pi*1000) == 1 + assert zeta(3000) == 1 + +def test_zeta_negative(): + mp.dps = 150 + a = -pi*10**40 + mp.dps = 15 + assert str(zeta(a)) == '2.55880492708712e+1233536161668617575553892558646631323374078' + mp.dps = 50 + assert str(zeta(a)) == '2.5588049270871154960875033337384432038436330847333e+1233536161668617575553892558646631323374078' + mp.dps = 15 + +def test_polygamma(): + mp.dps = 15 + psi0 = lambda z: psi(0,z) + psi1 = lambda z: psi(1,z) + assert psi0(3) == psi(0,3) == digamma(3) + #assert psi2(3) == psi(2,3) == tetragamma(3) + #assert psi3(3) == psi(3,3) == pentagamma(3) + assert psi0(pi).ae(0.97721330794200673) + assert psi0(-pi).ae(7.8859523853854902) + assert psi0(-pi+1).ae(7.5676424992016996) + assert psi0(pi+j).ae(1.04224048313859376 + 0.35853686544063749j) + assert psi0(-pi-j).ae(1.3404026194821986 - 2.8824392476809402j) + assert findroot(psi0, 1).ae(1.4616321449683622) + assert psi0(1e-10).ae(-10000000000.57722) + assert psi0(1e-40).ae(-1.000000000000000e+40) + assert psi0(1e-10+1e-10j).ae(-5000000000.577215 + 5000000000.000000j) + assert psi0(1e-40+1e-40j).ae(-5.000000000000000e+39 + 5.000000000000000e+39j) + assert psi0(inf) == inf + assert psi1(inf) == 0 + assert psi(2,inf) == 0 + assert psi1(pi).ae(0.37424376965420049) + assert psi1(-pi).ae(53.030438740085385) + assert psi1(pi+j).ae(0.32935710377142464 - 0.12222163911221135j) + assert psi1(-pi-j).ae(-0.30065008356019703 + 0.01149892486928227j) + assert (10**6*psi(4,1+10*pi*j)).ae(-6.1491803479004446 - 0.3921316371664063j) + assert psi0(1+10*pi*j).ae(3.4473994217222650 + 1.5548808324857071j) + assert isnan(psi0(nan)) + assert isnan(psi0(-inf)) + assert psi0(-100.5).ae(4.615124601338064) + assert psi0(3+0j).ae(psi0(3)) + assert psi0(-100+3j).ae(4.6106071768714086321+3.1117510556817394626j) + assert isnan(psi(2,mpc(0,inf))) + assert isnan(psi(2,mpc(0,nan))) + assert isnan(psi(2,mpc(0,-inf))) + assert isnan(psi(2,mpc(1,inf))) + assert isnan(psi(2,mpc(1,nan))) + assert isnan(psi(2,mpc(1,-inf))) + assert isnan(psi(2,mpc(inf,inf))) + assert isnan(psi(2,mpc(nan,nan))) + assert isnan(psi(2,mpc(-inf,-inf))) + mp.dps = 30 + # issue #534 + assert digamma(-0.75+1j).ae(mpc('0.46317279488182026118963809283042317', '2.4821070143037957102007677817351115')) + mp.dps = 15 + +def test_polygamma_high_prec(): + mp.dps = 100 + assert str(psi(0,pi)) == "0.9772133079420067332920694864061823436408346099943256380095232865318105924777141317302075654362928734" + assert str(psi(10,pi)) == "-12.98876181434889529310283769414222588307175962213707170773803550518307617769657562747174101900659238" + +def test_polygamma_identities(): + mp.dps = 15 + psi0 = lambda z: psi(0,z) + psi1 = lambda z: psi(1,z) + psi2 = lambda z: psi(2,z) + assert psi0(0.5).ae(-euler-2*log(2)) + assert psi0(1).ae(-euler) + assert psi1(0.5).ae(0.5*pi**2) + assert psi1(1).ae(pi**2/6) + assert psi1(0.25).ae(pi**2 + 8*catalan) + assert psi2(1).ae(-2*apery) + mp.dps = 20 + u = -182*apery+4*sqrt(3)*pi**3 + mp.dps = 15 + assert psi(2,5/6.).ae(u) + assert psi(3,0.5).ae(pi**4) + +def test_foxtrot_identity(): + # A test of the complex digamma function. + # See http://mathworld.wolfram.com/FoxTrotSeries.html and + # http://mathworld.wolfram.com/DigammaFunction.html + psi0 = lambda z: psi(0,z) + mp.dps = 50 + a = (-1)**fraction(1,3) + b = (-1)**fraction(2,3) + x = -psi0(0.5*a) - psi0(-0.5*b) + psi0(0.5*(1+a)) + psi0(0.5*(1-b)) + y = 2*pi*sech(0.5*sqrt(3)*pi) + assert x.ae(y) + mp.dps = 15 + +def test_polygamma_high_order(): + mp.dps = 100 + assert str(psi(50, pi)) == "-1344100348958402765749252447726432491812.641985273160531055707095989227897753035823152397679626136483" + assert str(psi(50, pi + 14*e)) == "-0.00000000000000000189793739550804321623512073101895801993019919886375952881053090844591920308111549337295143780341396" + assert str(psi(50, pi + 14*e*j)) == ("(-0.0000000000000000522516941152169248975225472155683565752375889510631513244785" + "9377385233700094871256507814151956624433 - 0.00000000000000001813157041407010184" + "702414110218205348527862196327980417757665282244728963891298080199341480881811613j)") + mp.dps = 15 + assert str(psi(50, pi)) == "-1.34410034895841e+39" + assert str(psi(50, pi + 14*e)) == "-1.89793739550804e-18" + assert str(psi(50, pi + 14*e*j)) == "(-5.2251694115217e-17 - 1.81315704140701e-17j)" + +def test_harmonic(): + mp.dps = 15 + assert harmonic(0) == 0 + assert harmonic(1) == 1 + assert harmonic(2) == 1.5 + assert harmonic(3).ae(1. + 1./2 + 1./3) + assert harmonic(10**10).ae(23.603066594891989701) + assert harmonic(10**1000).ae(2303.162308658947) + assert harmonic(0.5).ae(2-2*log(2)) + assert harmonic(inf) == inf + assert harmonic(2+0j) == 1.5+0j + assert harmonic(1+2j).ae(1.4918071802755104+0.92080728264223022j) + +def test_gamma_huge_1(): + mp.dps = 500 + x = mpf(10**10) / 7 + mp.dps = 15 + assert str(gamma(x)) == "6.26075321389519e+12458010678" + mp.dps = 50 + assert str(gamma(x)) == "6.2607532138951929201303779291707455874010420783933e+12458010678" + mp.dps = 15 + +def test_gamma_huge_2(): + mp.dps = 500 + x = mpf(10**100) / 19 + mp.dps = 15 + assert str(gamma(x)) == (\ + "1.82341134776679e+5172997469323364168990133558175077136829182824042201886051511" + "9656908623426021308685461258226190190661") + mp.dps = 50 + assert str(gamma(x)) == (\ + "1.82341134776678875374414910350027596939980412984e+5172997469323364168990133558" + "1750771368291828240422018860515119656908623426021308685461258226190190661") + +def test_gamma_huge_3(): + mp.dps = 500 + x = 10**80 // 3 + 10**70*j / 7 + mp.dps = 15 + y = gamma(x) + assert str(y.real) == (\ + "-6.82925203918106e+2636286142112569524501781477865238132302397236429627932441916" + "056964386399485392600") + assert str(y.imag) == (\ + "8.54647143678418e+26362861421125695245017814778652381323023972364296279324419160" + "56964386399485392600") + mp.dps = 50 + y = gamma(x) + assert str(y.real) == (\ + "-6.8292520391810548460682736226799637356016538421817e+26362861421125695245017814" + "77865238132302397236429627932441916056964386399485392600") + assert str(y.imag) == (\ + "8.5464714367841748507479306948130687511711420234015e+263628614211256952450178147" + "7865238132302397236429627932441916056964386399485392600") + +def test_gamma_huge_4(): + x = 3200+11500j + mp.dps = 15 + assert str(gamma(x)) == \ + "(8.95783268539713e+5164 - 1.94678798329735e+5164j)" + mp.dps = 50 + assert str(gamma(x)) == (\ + "(8.9578326853971339570292952697675570822206567327092e+5164" + " - 1.9467879832973509568895402139429643650329524144794e+51" + "64j)") + mp.dps = 15 + +def test_gamma_huge_5(): + mp.dps = 500 + x = 10**60 * j / 3 + mp.dps = 15 + y = gamma(x) + assert str(y.real) == "-3.27753899634941e-227396058973640224580963937571892628368354580620654233316839" + assert str(y.imag) == "-7.1519888950416e-227396058973640224580963937571892628368354580620654233316841" + mp.dps = 50 + y = gamma(x) + assert str(y.real) == (\ + "-3.2775389963494132168950056995974690946983219123935e-22739605897364022458096393" + "7571892628368354580620654233316839") + assert str(y.imag) == (\ + "-7.1519888950415979749736749222530209713136588885897e-22739605897364022458096393" + "7571892628368354580620654233316841") + mp.dps = 15 + +def test_gamma_huge_7(): + mp.dps = 100 + a = 3 + j/mpf(10)**1000 + mp.dps = 15 + y = gamma(a) + assert str(y.real) == "2.0" + # wrong + #assert str(y.imag) == "2.16735365342606e-1000" + assert str(y.imag) == "1.84556867019693e-1000" + mp.dps = 50 + y = gamma(a) + assert str(y.real) == "2.0" + #assert str(y.imag) == "2.1673536534260596065418805612488708028522563689298e-1000" + assert str(y.imag) == "1.8455686701969342787869758198351951379156813281202e-1000" + +def test_stieltjes(): + mp.dps = 15 + assert stieltjes(0).ae(+euler) + mp.dps = 25 + assert stieltjes(1).ae('-0.07281584548367672486058637587') + assert stieltjes(2).ae('-0.009690363192872318484530386035') + assert stieltjes(3).ae('0.002053834420303345866160046543') + assert stieltjes(4).ae('0.002325370065467300057468170178') + mp.dps = 15 + assert stieltjes(1).ae(-0.07281584548367672486058637587) + assert stieltjes(2).ae(-0.009690363192872318484530386035) + assert stieltjes(3).ae(0.002053834420303345866160046543) + assert stieltjes(4).ae(0.0023253700654673000574681701775) + +def test_barnesg(): + mp.dps = 15 + assert barnesg(0) == barnesg(-1) == 0 + assert [superfac(i) for i in range(8)] == [1, 1, 2, 12, 288, 34560, 24883200, 125411328000] + assert str(superfac(1000)) == '3.24570818422368e+1177245' + assert isnan(barnesg(nan)) + assert isnan(superfac(nan)) + assert isnan(hyperfac(nan)) + assert barnesg(inf) == inf + assert superfac(inf) == inf + assert hyperfac(inf) == inf + assert isnan(superfac(-inf)) + assert barnesg(0.7).ae(0.8068722730141471) + assert barnesg(2+3j).ae(-0.17810213864082169+0.04504542715447838j) + assert [hyperfac(n) for n in range(7)] == [1, 1, 4, 108, 27648, 86400000, 4031078400000] + assert [hyperfac(n) for n in range(0,-7,-1)] == [1,1,-1,-4,108,27648,-86400000] + a = barnesg(-3+0j) + assert a == 0 and isinstance(a, mpc) + a = hyperfac(-3+0j) + assert a == -4 and isinstance(a, mpc) + +def test_polylog(): + mp.dps = 15 + zs = [mpmathify(z) for z in [0, 0.5, 0.99, 4, -0.5, -4, 1j, 3+4j]] + for z in zs: assert polylog(1, z).ae(-log(1-z)) + for z in zs: assert polylog(0, z).ae(z/(1-z)) + for z in zs: assert polylog(-1, z).ae(z/(1-z)**2) + for z in zs: assert polylog(-2, z).ae(z*(1+z)/(1-z)**3) + for z in zs: assert polylog(-3, z).ae(z*(1+4*z+z**2)/(1-z)**4) + assert polylog(3, 7).ae(5.3192579921456754382-5.9479244480803301023j) + assert polylog(3, -7).ae(-4.5693548977219423182) + assert polylog(2, 0.9).ae(1.2997147230049587252) + assert polylog(2, -0.9).ae(-0.75216317921726162037) + assert polylog(2, 0.9j).ae(-0.17177943786580149299+0.83598828572550503226j) + assert polylog(2, 1.1).ae(1.9619991013055685931-0.2994257606855892575j) + assert polylog(2, -1.1).ae(-0.89083809026228260587) + assert polylog(2, 1.1*sqrt(j)).ae(0.58841571107611387722+1.09962542118827026011j) + assert polylog(-2, 0.9).ae(1710) + assert polylog(-2, -0.9).ae(-90/6859.) + assert polylog(3, 0.9).ae(1.0496589501864398696) + assert polylog(-3, 0.9).ae(48690) + assert polylog(-3, -4).ae(-0.0064) + assert polylog(0.5+j/3, 0.5+j/2).ae(0.31739144796565650535 + 0.99255390416556261437j) + assert polylog(3+4j,1).ae(zeta(3+4j)) + assert polylog(3+4j,-1).ae(-altzeta(3+4j)) + # issue 390 + assert polylog(1.5, -48.910886523731889).ae(-6.272992229311817) + assert polylog(1.5, 200).ae(-8.349608319033686529 - 8.159694826434266042j) + assert polylog(-2+0j, -2).ae(mpf(1)/13.5) + assert polylog(-2+0j, 1.25).ae(-180) + +def test_bell_polyexp(): + mp.dps = 15 + # TODO: more tests for polyexp + assert (polyexp(0,1e-10)*10**10).ae(1.00000000005) + assert (polyexp(1,1e-10)*10**10).ae(1.0000000001) + assert polyexp(5,3j).ae(-607.7044517476176454+519.962786482001476087j) + assert polyexp(-1,3.5).ae(12.09537536175543444) + # bell(0,x) = 1 + assert bell(0,0) == 1 + assert bell(0,1) == 1 + assert bell(0,2) == 1 + assert bell(0,inf) == 1 + assert bell(0,-inf) == 1 + assert isnan(bell(0,nan)) + # bell(1,x) = x + assert bell(1,4) == 4 + assert bell(1,0) == 0 + assert bell(1,inf) == inf + assert bell(1,-inf) == -inf + assert isnan(bell(1,nan)) + # bell(2,x) = x*(1+x) + assert bell(2,-1) == 0 + assert bell(2,0) == 0 + # large orders / arguments + assert bell(10) == 115975 + assert bell(10,1) == 115975 + assert bell(10, -8) == 11054008 + assert bell(5,-50) == -253087550 + assert bell(50,-50).ae('3.4746902914629720259e74') + mp.dps = 80 + assert bell(50,-50) == 347469029146297202586097646631767227177164818163463279814268368579055777450 + assert bell(40,50) == 5575520134721105844739265207408344706846955281965031698187656176321717550 + assert bell(74) == 5006908024247925379707076470957722220463116781409659160159536981161298714301202 + mp.dps = 15 + assert bell(10,20j) == 7504528595600+15649605360020j + # continuity of the generalization + assert bell(0.5,0).ae(sinc(pi*0.5)) + +def test_primezeta(): + mp.dps = 15 + assert primezeta(0.9).ae(1.8388316154446882243 + 3.1415926535897932385j) + assert primezeta(4).ae(0.076993139764246844943) + assert primezeta(1) == inf + assert primezeta(inf) == 0 + assert isnan(primezeta(nan)) + +def test_rs_zeta(): + mp.dps = 15 + assert zeta(0.5+100000j).ae(1.0730320148577531321 + 5.7808485443635039843j) + assert zeta(0.75+100000j).ae(1.837852337251873704 + 1.9988492668661145358j) + assert zeta(0.5+1000000j, derivative=3).ae(1647.7744105852674733 - 1423.1270943036622097j) + assert zeta(1+1000000j, derivative=3).ae(3.4085866124523582894 - 18.179184721525947301j) + assert zeta(1+1000000j, derivative=1).ae(-0.10423479366985452134 - 0.74728992803359056244j) + assert zeta(0.5-1000000j, derivative=1).ae(11.636804066002521459 + 17.127254072212996004j) + # Additional sanity tests using fp arithmetic. + # Some more high-precision tests are found in the docstrings + def ae(x, y, tol=1e-6): + return abs(x-y) < tol*abs(y) + assert ae(fp.zeta(0.5-100000j), 1.0730320148577531321 - 5.7808485443635039843j) + assert ae(fp.zeta(0.75-100000j), 1.837852337251873704 - 1.9988492668661145358j) + assert ae(fp.zeta(0.5+1e6j), 0.076089069738227100006 + 2.8051021010192989554j) + assert ae(fp.zeta(0.5+1e6j, derivative=1), 11.636804066002521459 - 17.127254072212996004j) + assert ae(fp.zeta(1+1e6j), 0.94738726251047891048 + 0.59421999312091832833j) + assert ae(fp.zeta(1+1e6j, derivative=1), -0.10423479366985452134 - 0.74728992803359056244j) + assert ae(fp.zeta(0.5+100000j, derivative=1), 10.766962036817482375 - 30.92705282105996714j) + assert ae(fp.zeta(0.5+100000j, derivative=2), -119.40515625740538429 + 217.14780631141830251j) + assert ae(fp.zeta(0.5+100000j, derivative=3), 1129.7550282628460881 - 1685.4736895169690346j) + assert ae(fp.zeta(0.5+100000j, derivative=4), -10407.160819314958615 + 13777.786698628045085j) + assert ae(fp.zeta(0.75+100000j, derivative=1), -0.41742276699594321475 - 6.4453816275049955949j) + assert ae(fp.zeta(0.75+100000j, derivative=2), -9.214314279161977266 + 35.07290795337967899j) + assert ae(fp.zeta(0.75+100000j, derivative=3), 110.61331857820103469 - 236.87847130518129926j) + assert ae(fp.zeta(0.75+100000j, derivative=4), -1054.334275898559401 + 1769.9177890161596383j) + +def test_siegelz(): + mp.dps = 15 + assert siegelz(100000).ae(5.87959246868176504171) + assert siegelz(100000, derivative=2).ae(-54.1172711010126452832) + assert siegelz(100000, derivative=3).ae(-278.930831343966552538) + assert siegelz(100000+j,derivative=1).ae(678.214511857070283307-379.742160779916375413j) + + + +def test_zeta_near_1(): + # Test for a former bug in mpf_zeta and mpc_zeta + mp.dps = 15 + s1 = fadd(1, '1e-10', exact=True) + s2 = fadd(1, '-1e-10', exact=True) + s3 = fadd(1, '1e-10j', exact=True) + assert zeta(s1).ae(1.000000000057721566490881444e10) + assert zeta(s2).ae(-9.99999999942278433510574872e9) + z = zeta(s3) + assert z.real.ae(0.57721566490153286060) + assert z.imag.ae(-9.9999999999999999999927184e9) + mp.dps = 30 + s1 = fadd(1, '1e-50', exact=True) + s2 = fadd(1, '-1e-50', exact=True) + s3 = fadd(1, '1e-50j', exact=True) + assert zeta(s1).ae('1e50') + assert zeta(s2).ae('-1e50') + z = zeta(s3) + assert z.real.ae('0.57721566490153286060651209008240243104215933593992') + assert z.imag.ae('-1e50') diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_hp.py b/phivenv/Lib/site-packages/mpmath/tests/test_hp.py new file mode 100644 index 0000000000000000000000000000000000000000..9eba0af798f64ac3f8d464e2d3bf231567a48c9b --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_hp.py @@ -0,0 +1,291 @@ +""" +Check that the output from irrational functions is accurate for +high-precision input, from 5 to 200 digits. The reference values were +verified with Mathematica. +""" + +import time +from mpmath import * + +precs = [5, 15, 28, 35, 57, 80, 100, 150, 200] + +# sqrt(3) + pi/2 +a = \ +"3.302847134363773912758768033145623809041389953497933538543279275605"\ +"841220051904536395163599428307109666700184672047856353516867399774243594"\ +"67433521615861420725323528325327484262075464241255915238845599752675" + +# e + 1/euler**2 +b = \ +"5.719681166601007617111261398629939965860873957353320734275716220045750"\ +"31474116300529519620938123730851145473473708966080207482581266469342214"\ +"824842256999042984813905047895479210702109260221361437411947323431" + +# sqrt(a) +sqrt_a = \ +"1.817373691447021556327498239690365674922395036495564333152483422755"\ +"144321726165582817927383239308173567921345318453306994746434073691275094"\ +"484777905906961689902608644112196725896908619756404253109722911487" + +# sqrt(a+b*i).real +sqrt_abi_real = \ +"2.225720098415113027729407777066107959851146508557282707197601407276"\ +"89160998185797504198062911768240808839104987021515555650875977724230130"\ +"3584116233925658621288393930286871862273400475179312570274423840384" + +# sqrt(a+b*i).imag +sqrt_abi_imag = \ +"1.2849057639084690902371581529110949983261182430040898147672052833653668"\ +"0629534491275114877090834296831373498336559849050755848611854282001250"\ +"1924311019152914021365263161630765255610885489295778894976075186" + +# log(a) +log_a = \ +"1.194784864491089550288313512105715261520511949410072046160598707069"\ +"4336653155025770546309137440687056366757650909754708302115204338077595203"\ +"83005773986664564927027147084436553262269459110211221152925732612" + +# log(a+b*i).real +log_abi_real = \ +"1.8877985921697018111624077550443297276844736840853590212962006811663"\ +"04949387789489704203167470111267581371396245317618589339274243008242708"\ +"014251531496104028712866224020066439049377679709216784954509456421" + +# log(a+b*i).imag +log_abi_imag = \ +"1.0471204952840802663567714297078763189256357109769672185219334169734948"\ +"4265809854092437285294686651806426649541504240470168212723133326542181"\ +"8300136462287639956713914482701017346851009323172531601894918640" + +# exp(a) +exp_a = \ +"27.18994224087168661137253262213293847994194869430518354305430976149"\ +"382792035050358791398632888885200049857986258414049540376323785711941636"\ +"100358982497583832083513086941635049329804685212200507288797531143" + +# exp(a+b*i).real +exp_abi_real = \ +"22.98606617170543596386921087657586890620262522816912505151109385026"\ +"40160179326569526152851983847133513990281518417211964710397233157168852"\ +"4963130831190142571659948419307628119985383887599493378056639916701" + +# exp(a+b*i).imag +exp_abi_imag = \ +"-14.523557450291489727214750571590272774669907424478129280902375851196283"\ +"3377162379031724734050088565710975758824441845278120105728824497308303"\ +"6065619788140201636218705414429933685889542661364184694108251449" + +# a**b +pow_a_b = \ +"928.7025342285568142947391505837660251004990092821305668257284426997"\ +"361966028275685583421197860603126498884545336686124793155581311527995550"\ +"580229264427202446131740932666832138634013168125809402143796691154" + +# (a**(a+b*i)).real +pow_a_abi_real = \ +"44.09156071394489511956058111704382592976814280267142206420038656267"\ +"67707916510652790502399193109819563864568986234654864462095231138500505"\ +"8197456514795059492120303477512711977915544927440682508821426093455" + +# (a**(a+b*i)).imag +pow_a_abi_imag = \ +"27.069371511573224750478105146737852141664955461266218367212527612279886"\ +"9322304536553254659049205414427707675802193810711302947536332040474573"\ +"8166261217563960235014674118610092944307893857862518964990092301" + +# ((a+b*i)**(a+b*i)).real +pow_abi_abi_real = \ +"-0.15171310677859590091001057734676423076527145052787388589334350524"\ +"8084195882019497779202452975350579073716811284169068082670778986235179"\ +"0813026562962084477640470612184016755250592698408112493759742219150452"\ + +# ((a+b*i)**(a+b*i)).imag +pow_abi_abi_imag = \ +"1.2697592504953448936553147870155987153192995316950583150964099070426"\ +"4736837932577176947632535475040521749162383347758827307504526525647759"\ +"97547638617201824468382194146854367480471892602963428122896045019902" + +# sin(a) +sin_a = \ +"-0.16055653857469062740274792907968048154164433772938156243509084009"\ +"38437090841460493108570147191289893388608611542655654723437248152535114"\ +"528368009465836614227575701220612124204622383149391870684288862269631" + +# sin(1000*a) +sin_1000a = \ +"-0.85897040577443833776358106803777589664322997794126153477060795801"\ +"09151695416961724733492511852267067419573754315098042850381158563024337"\ +"216458577140500488715469780315833217177634490142748614625281171216863" + +# sin(a+b*i) +sin_abi_real = \ +"-24.4696999681556977743346798696005278716053366404081910969773939630"\ +"7149215135459794473448465734589287491880563183624997435193637389884206"\ +"02151395451271809790360963144464736839412254746645151672423256977064" + +sin_abi_imag = \ +"-150.42505378241784671801405965872972765595073690984080160750785565810981"\ +"8314482499135443827055399655645954830931316357243750839088113122816583"\ +"7169201254329464271121058839499197583056427233866320456505060735" + +# cos +cos_a = \ +"-0.98702664499035378399332439243967038895709261414476495730788864004"\ +"05406821549361039745258003422386169330787395654908532996287293003581554"\ +"257037193284199198069707141161341820684198547572456183525659969145501" + +cos_1000a = \ +"-0.51202523570982001856195696460663971099692261342827540426136215533"\ +"52686662667660613179619804463250686852463876088694806607652218586060613"\ +"951310588158830695735537073667299449753951774916401887657320950496820" + +# tan +tan_a = \ +"0.162666873675188117341401059858835168007137819495998960250142156848"\ +"639654718809412181543343168174807985559916643549174530459883826451064966"\ +"7996119428949951351938178809444268785629011625179962457123195557310" + +tan_abi_real = \ +"6.822696615947538488826586186310162599974827139564433912601918442911"\ +"1026830824380070400102213741875804368044342309515353631134074491271890"\ +"467615882710035471686578162073677173148647065131872116479947620E-6" + +tan_abi_imag = \ +"0.9999795833048243692245661011298447587046967777739649018690797625964167"\ +"1446419978852235960862841608081413169601038230073129482874832053357571"\ +"62702259309150715669026865777947502665936317953101462202542168429" + + +def test_hp(): + for dps in precs: + mp.dps = dps + 8 + aa = mpf(a) + bb = mpf(b) + a1000 = 1000*mpf(a) + abi = mpc(aa, bb) + mp.dps = dps + assert (sqrt(3) + pi/2).ae(aa) + assert (e + 1/euler**2).ae(bb) + + assert sqrt(aa).ae(mpf(sqrt_a)) + assert sqrt(abi).ae(mpc(sqrt_abi_real, sqrt_abi_imag)) + + assert log(aa).ae(mpf(log_a)) + assert log(abi).ae(mpc(log_abi_real, log_abi_imag)) + + assert exp(aa).ae(mpf(exp_a)) + assert exp(abi).ae(mpc(exp_abi_real, exp_abi_imag)) + + assert (aa**bb).ae(mpf(pow_a_b)) + assert (aa**abi).ae(mpc(pow_a_abi_real, pow_a_abi_imag)) + assert (abi**abi).ae(mpc(pow_abi_abi_real, pow_abi_abi_imag)) + + assert sin(a).ae(mpf(sin_a)) + assert sin(a1000).ae(mpf(sin_1000a)) + assert sin(abi).ae(mpc(sin_abi_real, sin_abi_imag)) + + assert cos(a).ae(mpf(cos_a)) + assert cos(a1000).ae(mpf(cos_1000a)) + + assert tan(a).ae(mpf(tan_a)) + assert tan(abi).ae(mpc(tan_abi_real, tan_abi_imag)) + + # check that complex cancellation is avoided so that both + # real and imaginary parts have high relative accuracy. + # abs_eps should be 0, but has to be set to 1e-205 to pass the + # 200-digit case, probably due to slight inaccuracy in the + # precomputed input + assert (tan(abi).real).ae(mpf(tan_abi_real), abs_eps=1e-205) + assert (tan(abi).imag).ae(mpf(tan_abi_imag), abs_eps=1e-205) + mp.dps = 460 + assert str(log(3))[-20:] == '02166121184001409826' + mp.dps = 15 + +# Since str(a) can differ in the last digit from rounded a, and I want +# to compare the last digits of big numbers with the results in Mathematica, +# I made this hack to get the last 20 digits of rounded a + +def last_digits(a): + r = repr(a) + s = str(a) + #dps = mp.dps + #mp.dps += 3 + m = 10 + r = r.replace(s[:-m],'') + r = r.replace("mpf('",'').replace("')",'') + num0 = 0 + for c in r: + if c == '0': + num0 += 1 + else: + break + b = float(int(r))/10**(len(r) - m) + if b >= 10**m - 0.5: # pragma: no cover + raise NotImplementedError + n = int(round(b)) + sn = str(n) + s = s[:-m] + '0'*num0 + sn + return s[-20:] + +# values checked with Mathematica +def test_log_hp(): + mp.dps = 2000 + a = mpf(10)**15000/3 + r = log(a) + res = last_digits(r) + # Mathematica N[Log[10^15000/3], 2000] + # ...7443804441768333470331 + assert res == '43804441768333470331' + + # see issue 145 + r = log(mpf(3)/2) + # Mathematica N[Log[3/2], 2000] + # ...69653749808140753263288 + res = last_digits(r) + assert res == '53749808140753263288' + + mp.dps = 10000 + r = log(2) + res = last_digits(r) + # Mathematica N[Log[2], 10000] + # ...695615913401856601359655561 + assert res == '13401856601359655561' + r = log(mpf(10)**10/3) + res = last_digits(r) + # Mathematica N[Log[10^10/3], 10000] + # ...587087654020631943060007154 + assert res == '54020631943060007154', res + r = log(mpf(10)**100/3) + res = last_digits(r) + # Mathematica N[Log[10^100/3], 10000] + # ,,,59246336539088351652334666 + assert res == '36539088351652334666', res + mp.dps += 10 + a = 1 - mpf(1)/10**10 + mp.dps -= 10 + r = log(a) + res = last_digits(r) + # ...3310334360482956137216724048322957404 + # 372167240483229574038733026370 + # Mathematica N[Log[1 - 10^-10]*10^10, 10000] + # ...60482956137216724048322957404 + assert res == '37216724048322957404', res + mp.dps = 10000 + mp.dps += 100 + a = 1 + mpf(1)/10**100 + mp.dps -= 100 + + r = log(a) + res = last_digits(+r) + # Mathematica N[Log[1 + 10^-100]*10^10, 10030] + # ...3994733877377412241546890854692521568292338268273 10^-91 + assert res == '39947338773774122415', res + + mp.dps = 15 + +def test_exp_hp(): + mp.dps = 4000 + r = exp(mpf(1)/10) + # IntegerPart[N[Exp[1/10] * 10^4000, 4000]] + # ...92167105162069688129 + assert int(r * 10**mp.dps) % 10**20 == 92167105162069688129 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_identify.py b/phivenv/Lib/site-packages/mpmath/tests/test_identify.py new file mode 100644 index 0000000000000000000000000000000000000000..f75ab0bc4f04ecb614011e7f4599989465cab785 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_identify.py @@ -0,0 +1,19 @@ +from mpmath import * + +def test_pslq(): + mp.dps = 15 + assert pslq([3*pi+4*e/7, pi, e, log(2)]) == [7, -21, -4, 0] + assert pslq([4.9999999999999991, 1]) == [1, -5] + assert pslq([2,1]) == [1, -2] + +def test_identify(): + mp.dps = 20 + assert identify(zeta(4), ['log(2)', 'pi**4']) == '((1/90)*pi**4)' + mp.dps = 15 + assert identify(exp(5)) == 'exp(5)' + assert identify(exp(4)) == 'exp(4)' + assert identify(log(5)) == 'log(5)' + assert identify(exp(3*pi), ['pi']) == 'exp((3*pi))' + assert identify(3, full=True) == ['3', '3', '1/(1/3)', 'sqrt(9)', + '1/sqrt((1/9))', '(sqrt(12)/2)**2', '1/(sqrt(12)/6)**2'] + assert identify(pi+1, {'a':+pi}) == '(1 + 1*a)' diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_interval.py b/phivenv/Lib/site-packages/mpmath/tests/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..251fd8b7ddb00074e8ae27cce4a01d8f4f8fe151 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_interval.py @@ -0,0 +1,453 @@ +from mpmath import * + +def test_interval_identity(): + iv.dps = 15 + assert mpi(2) == mpi(2, 2) + assert mpi(2) != mpi(-2, 2) + assert not (mpi(2) != mpi(2, 2)) + assert mpi(-1, 1) == mpi(-1, 1) + assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]" + assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')" + u = mpi(-1, 3) + assert -1 in u + assert 2 in u + assert 3 in u + assert -1.1 not in u + assert 3.1 not in u + assert mpi(-1, 3) in u + assert mpi(0, 1) in u + assert mpi(-1.1, 2) not in u + assert mpi(2.5, 3.1) not in u + w = mpi(-inf, inf) + assert mpi(-5, 5) in w + assert mpi(2, inf) in w + assert mpi(0, 2) in mpi(0, 10) + assert not (3 in mpi(-inf, 0)) + +def test_interval_hash(): + assert hash(mpi(3)) == hash(3) + assert hash(mpi(3.25)) == hash(3.25) + assert hash(mpi(3,4)) == hash(mpi(3,4)) + assert hash(iv.mpc(3)) == hash(3) + assert hash(iv.mpc(3,4)) == hash(3+4j) + assert hash(iv.mpc((1,3),(2,4))) == hash(iv.mpc((1,3),(2,4))) + +def test_interval_arithmetic(): + iv.dps = 15 + assert mpi(2) + mpi(3,4) == mpi(5,6) + assert mpi(1, 2)**2 == mpi(1, 4) + assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002')) + x = 1 / (1 / mpi(3)) + assert x.a < 3 < x.b + x = mpi(2) ** mpi(0.5) + iv.dps += 5 + sq = iv.sqrt(2) + iv.dps -= 5 + assert x.a < sq < x.b + assert mpi(1) / mpi(1, inf) + assert mpi(2, 3) / inf == mpi(0, 0) + assert mpi(0) / inf == 0 + assert mpi(0) / 0 == mpi(-inf, inf) + assert mpi(inf) / 0 == mpi(-inf, inf) + assert mpi(0) * inf == mpi(-inf, inf) + assert 1 / mpi(2, inf) == mpi(0, 0.5) + assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \ + '[-166.66666666666668561, -166.66666666666665719]' + assert mpi(0, 4) ** 3 == mpi(0, 64) + assert mpi(2,4).mid == 3 + iv.dps = 30 + a = mpi(iv.pi) + iv.dps = 15 + b = +a + assert b.a < a.a + assert b.b > a.b + a = mpi(iv.pi) + assert a == +a + assert abs(mpi(-1,2)) == mpi(0,2) + assert abs(mpi(0.5,2)) == mpi(0.5,2) + assert abs(mpi(-3,2)) == mpi(0,3) + assert abs(mpi(-3,-0.5)) == mpi(0.5,3) + assert mpi(0) * mpi(2,3) == mpi(0) + assert mpi(2,3) * mpi(0) == mpi(0) + assert mpi(1,3).delta == 2 + assert mpi(1,2) - mpi(3,4) == mpi(-3,-1) + assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0) + assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf) + assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf) + +def test_interval_mul(): + assert mpi(-1, 0) * inf == mpi(-inf, 0) + assert mpi(-1, 0) * -inf == mpi(0, inf) + assert mpi(0, 1) * inf == mpi(0, inf) + assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf) + assert mpi(-1, 1) * inf == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160) + assert mpi(2,3) * mpi(-1,2) == mpi(-3,6) + # Should be undefined? + assert mpi(inf, inf) * 0 == mpi(-inf, inf) + assert mpi(-inf, -inf) * 0 == mpi(-inf, inf) + assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf) + assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf) + assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf) + assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf) + +def test_interval_pow(): + assert mpi(3)**2 == mpi(9, 9) + assert mpi(-3)**2 == mpi(9, 9) + assert mpi(-3, 1)**2 == mpi(0, 9) + assert mpi(-3, -1)**2 == mpi(1, 9) + assert mpi(-3, -1)**3 == mpi(-27, -1) + assert mpi(-3, 1)**3 == mpi(-27, 1) + assert mpi(-2, 3)**2 == mpi(0, 9) + assert mpi(-3, 2)**2 == mpi(0, 9) + assert mpi(4) ** -1 == mpi(0.25, 0.25) + assert mpi(-4) ** -1 == mpi(-0.25, -0.25) + assert mpi(4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(-4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(0, 1) ** inf == mpi(0, 1) + assert mpi(0, 1) ** -inf == mpi(1, inf) + assert mpi(0, inf) ** inf == mpi(0, inf) + assert mpi(0, inf) ** -inf == mpi(0, inf) + assert mpi(1, inf) ** inf == mpi(1, inf) + assert mpi(1, inf) ** -inf == mpi(0, 1) + assert mpi(2, 3) ** 1 == mpi(2, 3) + assert mpi(2, 3) ** 0 == 1 + assert mpi(1,3) ** mpi(2) == mpi(1,9) + +def test_interval_sqrt(): + assert mpi(4) ** 0.5 == mpi(2) + +def test_interval_div(): + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf) + assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf) + assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf) + assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8) + assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16) + assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16) + assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf) + assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf) + assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf) + assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0) + assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf) + assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf) + assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0) + assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25) + assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0) + assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf) + assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf) + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0) + assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0) + assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0) + assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25) + assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0) + assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0) + assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5) + assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5) + assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0) + assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0) + assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0) + assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0) + assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25) + assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25) + assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0) + assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0) + # Should be undefined? + assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf) + assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf) + +def test_interval_cos_sin(): + iv.dps = 15 + cos = iv.cos + sin = iv.sin + tan = iv.tan + pi = iv.pi + # Around 0 + assert cos(mpi(0)) == 1 + assert sin(mpi(0)) == 0 + assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0) + assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159) + assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501) + assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0) + assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0) + assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0) + assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159) + assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538) + assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100)) + assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100)) + # Same interval + assert cos(mpi(2, 2.5)) + assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946) + assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168) + assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5)) + assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646) + assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306) + # Higher roots + iv.dps = 55 + w = 4*10**50 + mpi(0.5) + for p in [15, 40, 80]: + iv.dps = p + assert 0 in sin(4*mpi(pi)) + assert 0 in sin(4*10**50*mpi(pi)) + assert 0 in cos((4+0.5)*mpi(pi)) + assert 0 in cos(w*mpi(pi)) + assert 1 in cos(4*mpi(pi)) + assert 1 in cos(4*10**50*mpi(pi)) + iv.dps = 15 + assert cos(mpi(2,inf)) == mpi(-1,1) + assert sin(mpi(2,inf)) == mpi(-1,1) + assert cos(mpi(-inf,2)) == mpi(-1,1) + assert sin(mpi(-inf,2)) == mpi(-1,1) + u = tan(mpi(0.5,1)) + assert mpf(u.a).ae(mp.tan(0.5)) + assert mpf(u.b).ae(mp.tan(1)) + v = iv.cot(mpi(0.5,1)) + assert mpf(v.a).ae(mp.cot(1)) + assert mpf(v.b).ae(mp.cot(0.5)) + # Sanity check of evaluation at n*pi and (n+1/2)*pi + for n in range(-5,7,2): + x = iv.cos(n*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + x = iv.sin((n+0.5)*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + for n in range(-6,8,2): + x = iv.cos(n*iv.pi) + assert 1 in x + assert x <= 1 + if n: + assert x != 1 + x = iv.sin((n+0.5)*iv.pi) + assert 1 in x + assert x <= 1 + assert x != 1 + for n in range(-6,7): + x = iv.cos((n+0.5)*iv.pi) + assert x.a < 0 < x.b + x = iv.sin(n*iv.pi) + if n: + assert x.a < 0 < x.b + +def test_interval_complex(): + # TODO: many more tests + iv.dps = 15 + mp.dps = 15 + assert iv.mpc(2,3) == 2+3j + assert iv.mpc(2,3) != 2+4j + assert iv.mpc(2,3) != 1+3j + assert 1+3j in iv.mpc([1,2],[3,4]) + assert 2+5j not in iv.mpc([1,2],[3,4]) + assert iv.mpc(1,2) + 1j == 1+3j + assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6]) + assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4]) + assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4]) + assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1]) + assert iv.exp(2+3j).ae(mp.exp(2+3j)) + assert iv.log(2+3j).ae(mp.log(2+3j)) + assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2)) + assert 1j in (iv.mpf(-1) ** 0.5) + assert 1j in (iv.mpc(-1) ** 0.5) + assert abs(iv.mpc(0)) == 0 + assert abs(iv.mpc(inf)) == inf + assert abs(iv.mpc(3,4)) == 5 + assert abs(iv.mpc(4)) == 4 + assert abs(iv.mpc(0,4)) == 4 + assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3]) + assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3]) + assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13]) + assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13]) + assert iv.mpc(2,3) ** 0 == 1 + assert iv.mpc(2,3) ** 1 == (2+3j) + assert iv.mpc(2,3) ** 2 == (2+3j)**2 + assert iv.mpc(2,3) ** 3 == (2+3j)**3 + assert iv.mpc(2,3) ** 4 == (2+3j)**4 + assert iv.mpc(2,3) ** 5 == (2+3j)**5 + assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1) + assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2) + assert iv.cos(2).ae(mp.cos(2)) + assert iv.sin(2).ae(mp.sin(2)) + assert iv.cos(2+3j).ae(mp.cos(2+3j)) + assert iv.sin(2+3j).ae(mp.sin(2+3j)) + +def test_interval_complex_arg(): + mp.dps = 15 + iv.dps = 15 + assert iv.arg(3) == 0 + assert iv.arg(0) == 0 + assert iv.arg([0,3]) == 0 + assert iv.arg(-3).ae(pi) + assert iv.arg(2+3j).ae(iv.arg(2+3j)) + z = iv.mpc([-2,-1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1+4j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([-2,1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1+3j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([1,2],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(2+3j)) + assert t.b.ae(mp.arg(1+4j)) + z = iv.mpc([1,2],[-2,3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-2j)) + assert t.b.ae(mp.arg(1+3j)) + z = iv.mpc([1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-4j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1-3j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-2,-1],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-2-3j)) + assert t.b.ae(mp.arg(-1-4j)) + z = iv.mpc([-2,-1],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + z = iv.mpc([-2,2],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + +def test_interval_ae(): + iv.dps = 15 + x = iv.mpf([1,2]) + assert x.ae(1) is None + assert x.ae(1.5) is None + assert x.ae(2) is None + assert x.ae(2.01) is False + assert x.ae(0.99) is False + x = iv.mpf(3.5) + assert x.ae(3.5) is True + assert x.ae(3.5+1e-15) is True + assert x.ae(3.5-1e-15) is True + assert x.ae(3.501) is False + assert x.ae(3.499) is False + assert x.ae(iv.mpf([3.5,3.501])) is None + assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None + +def test_interval_nstr(): + iv.dps = n = 30 + x = mpi(1, 2) + # FIXME: error_dps should not be necessary + assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5' + assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5' + assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)' + assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]' + assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>' + x = mpi('5.2582327113062393041', '5.2582327113062749951') + assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]' + assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]' + assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]' + exp = iv.exp + assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171' + iv.dps = 15 + +def test_mpi_from_str(): + iv.dps = 15 + assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0')) + assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)') + assert iv.convert('[1, 2]') == mpi(1, 2) + assert iv.convert('1[2, 3]') == mpi(12, 13) + assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8') + assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4') + +def test_interval_gamma(): + mp.dps = 15 + iv.dps = 15 + # TODO: need many more tests + assert iv.rgamma(0) == 0 + assert iv.fac(0) == 1 + assert iv.fac(1) == 1 + assert iv.fac(2) == 2 + assert iv.fac(3) == 6 + assert iv.gamma(0) == [-inf,inf] + assert iv.gamma(1) == 1 + assert iv.gamma(2) == 1 + assert iv.gamma(3) == 2 + assert -3.5449077018110320546 in iv.gamma(-0.5) + assert iv.loggamma(1) == 0 + assert iv.loggamma(2) == 0 + assert 0.69314718055994530942 in iv.loggamma(3) + # Test tight log-gamma endpoints based on monotonicity + xs = [iv.mpc([2,3],[1,4]), + iv.mpc([2,3],[-4,-1]), + iv.mpc([2,3],[-1,4]), + iv.mpc([2,3],[-4,1]), + iv.mpc([2,3],[-4,4]), + iv.mpc([-3,-2],[2,4]), + iv.mpc([-3,-2],[-4,-2])] + for x in xs: + ys = [mp.loggamma(mp.mpc(x.a,x.c)), + mp.loggamma(mp.mpc(x.b,x.c)), + mp.loggamma(mp.mpc(x.a,x.d)), + mp.loggamma(mp.mpc(x.b,x.d))] + if 0 in x.imag: + ys += [mp.loggamma(x.a), mp.loggamma(x.b)] + min_real = min([y.real for y in ys]) + max_real = max([y.real for y in ys]) + min_imag = min([y.imag for y in ys]) + max_imag = max([y.imag for y in ys]) + z = iv.loggamma(x) + assert z.a.ae(min_real) + assert z.b.ae(max_real) + assert z.c.ae(min_imag) + assert z.d.ae(max_imag) + +def test_interval_conversions(): + mp.dps = 15 + iv.dps = 15 + for a, b in ((-0.0, 0), (0.0, 0.5), (1.0, 1), \ + ('-inf', 20.5), ('-inf', float(sqrt(2)))): + r = mpi(a, b) + assert int(r.b) == int(b) + assert float(r.a) == float(a) + assert float(r.b) == float(b) + assert complex(r.a) == complex(a) + assert complex(r.b) == complex(b) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_levin.py b/phivenv/Lib/site-packages/mpmath/tests/test_levin.py new file mode 100644 index 0000000000000000000000000000000000000000..b14855df4de1a45da27080dcd239267842a4ac7a --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_levin.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from mpmath import mp +from mpmath import libmp + +xrange = libmp.backend.xrange + +# Attention: +# These tests run with 15-20 decimal digits precision. For higher precision the +# working precision must be raised. + +def test_levin_0(): + mp.dps = 17 + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "levin", variant = "u") + S, s, n = [], 0, 1 + while 1: + s += mp.one / (n * n) + n += 1 + S.append(s) + v, e = L.update_psum(S) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + err = abs(v - mp.pi ** 2 / 6) + assert err < eps + w = mp.nsum(lambda n: 1/(n * n), [1, mp.inf], method = "levin", levin_variant = "u") + err = abs(v - w) + assert err < eps + +def test_levin_1(): + mp.dps = 17 + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "levin", variant = "v") + A, n = [], 1 + while 1: + s = mp.mpf(n) ** (2 + 3j) + n += 1 + A.append(s) + v, e = L.update(A) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + err = abs(v - mp.zeta(-2-3j)) + assert err < eps + w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + err = abs(v - w) + assert err < eps + +def test_levin_2(): + # [2] A. Sidi - "Pratical Extrapolation Methods" p.373 + mp.dps = 17 + z=mp.mpf(10) + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "sidi", variant = "t") + n = 0 + while 1: + s = (-1)**n * mp.fac(n) * z ** (-n) + v, e = L.step(s) + n += 1 + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + # there is also a symbolic expression for the integral: + # exact = z * mp.exp(z) * mp.expint(1,z) + err = abs(v - exact) + assert err < eps + w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + assert err < eps + +def test_levin_3(): + mp.dps = 17 + z=mp.mpf(2) + eps = mp.mpf(mp.eps) + with mp.extraprec(7*mp.prec): # we need copious amount of precision to sum this highly divergent series + L = mp.levin(method = "levin", variant = "t") + n, s = 0, 0 + while 1: + s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) + n += 1 + v, e = L.step_psum(s) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.8 * mp.log(eps)) + exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + # there is also a symbolic expression for the integral: + # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) + err = abs(v - exact) + assert err < eps + w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + err = abs(v - w) + assert err < eps + +def test_levin_nsum(): + mp.dps = 17 + + with mp.extraprec(mp.prec): + z = mp.mpf(10) ** (-10) + a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z + assert abs(a - mp.euler) < 1e-10 + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") + assert abs(a - mp.log(2)) < eps + + z = 2 + 1j + f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + assert abs(exact - v) < eps + +def test_cohen_alt_0(): + mp.dps = 17 + AC = mp.cohen_alt() + S, s, n = [], 0, 1 + while 1: + s += -((-1) ** n) * mp.one / (n * n) + n += 1 + S.append(s) + v, e = AC.update_psum(S) + if e < mp.eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(mp.eps)) + err = abs(v - mp.pi ** 2 / 12) + assert err < eps + +def test_cohen_alt_1(): + mp.dps = 17 + A = [] + AC = mp.cohen_alt() + n = 1 + while 1: + A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) + A.append(-mp.loggamma(1 + mp.one / (2 * n))) + n += 1 + v, e = AC.update(A) + if e < mp.eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + v = mp.exp(v) + err = abs(v - 1.06215090557106) + assert err < 1e-12 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_linalg.py b/phivenv/Lib/site-packages/mpmath/tests/test_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..14256a79f8953d3e4ef8b296258560d48204f547 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_linalg.py @@ -0,0 +1,332 @@ +# TODO: don't use round + +from __future__ import division + +import pytest +from mpmath import * +xrange = libmp.backend.xrange + +# XXX: these shouldn't be visible(?) +LU_decomp = mp.LU_decomp +L_solve = mp.L_solve +U_solve = mp.U_solve +householder = mp.householder +improve_solution = mp.improve_solution + +A1 = matrix([[3, 1, 6], + [2, 1, 3], + [1, 1, 1]]) +b1 = [2, 7, 4] + +A2 = matrix([[ 2, -1, -1, 2], + [ 6, -2, 3, -1], + [-4, 2, 3, -2], + [ 2, 0, 4, -3]]) +b2 = [3, -3, -2, -1] + +A3 = matrix([[ 1, 0, -1, -1, 0], + [ 0, 1, 1, 0, -1], + [ 4, -5, 2, 0, 0], + [ 0, 0, -2, 9,-12], + [ 0, 5, 0, 0, 12]]) +b3 = [0, 0, 0, 0, 50] + +A4 = matrix([[10.235, -4.56, 0., -0.035, 5.67], + [-2.463, 1.27, 3.97, -8.63, 1.08], + [-6.58, 0.86, -0.257, 9.32, -43.6 ], + [ 9.83, 7.39, -17.25, 0.036, 24.86], + [-9.31, 34.9, 78.56, 1.07, 65.8 ]]) +b4 = [8.95, 20.54, 7.42, 5.60, 58.43] + +A5 = matrix([[ 1, 2, -4], + [-2, -3, 5], + [ 3, 5, -8]]) + +A6 = matrix([[ 1.377360, 2.481400, 5.359190], + [ 2.679280, -1.229560, 25.560210], + [-1.225280+1.e6, 9.910180, -35.049900-1.e6]]) +b6 = [23.500000, -15.760000, 2.340000] + +A7 = matrix([[1, -0.5], + [2, 1], + [-2, 6]]) +b7 = [3, 2, -4] + +A8 = matrix([[1, 2, 3], + [-1, 0, 1], + [-1, -2, -1], + [1, 0, -1]]) +b8 = [1, 2, 3, 4] + +A9 = matrix([[ 4, 2, -2], + [ 2, 5, -4], + [-2, -4, 5.5]]) +b9 = [10, 16, -15.5] + +A10 = matrix([[1.0 + 1.0j, 2.0, 2.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0]]) +b10 = [1.0, 1.0 + 1.0j, 1.0] + + +def test_LU_decomp(): + A = A3.copy() + b = b3 + A, p = LU_decomp(A) + y = L_solve(A, b, p) + x = U_solve(A, y) + assert p == [2, 1, 2, 3] + assert [round(i, 14) for i in x] == [3.78953107960742, 2.9989094874591098, + -0.081788440567070006, 3.8713195201744801, 2.9171210468920399] + A = A4.copy() + b = b4 + A, p = LU_decomp(A) + y = L_solve(A, b, p) + x = U_solve(A, y) + assert p == [0, 3, 4, 3] + assert [round(i, 14) for i in x] == [2.6383625899619201, 2.6643834462368399, + 0.79208015947958998, -2.5088376454101899, -1.0567657691375001] + A = randmatrix(3) + bak = A.copy() + LU_decomp(A, overwrite=1) + assert A != bak + +def test_inverse(): + for A in [A1, A2, A5]: + inv = inverse(A) + assert mnorm(A*inv - eye(A.rows), 1) < 1.e-14 + +def test_householder(): + mp.dps = 15 + A, b = A8, b8 + H, p, x, r = householder(extend(A, b)) + assert H == matrix( + [[mpf('3.0'), mpf('-2.0'), mpf('-1.0'), 0], + [-1.0,mpf('3.333333333333333'),mpf('-2.9999999999999991'),mpf('2.0')], + [-1.0, mpf('-0.66666666666666674'),mpf('2.8142135623730948'), + mpf('-2.8284271247461898')], + [1.0, mpf('-1.3333333333333333'),mpf('-0.20000000000000018'), + mpf('4.2426406871192857')]]) + assert p == [-2, -2, mpf('-1.4142135623730949')] + assert round(norm(r, 2), 10) == 4.2426406870999998 + + y = [102.102, 58.344, 36.463, 24.310, 17.017, 12.376, 9.282, 7.140, 5.610, + 4.488, 3.6465, 3.003] + + def coeff(n): + # similiar to Hilbert matrix + A = [] + for i in range(1, 13): + A.append([1. / (i + j - 1) for j in range(1, n + 1)]) + return matrix(A) + + residuals = [] + refres = [] + for n in range(2, 7): + A = coeff(n) + H, p, x, r = householder(extend(A, y)) + x = matrix(x) + y = matrix(y) + residuals.append(norm(r, 2)) + refres.append(norm(residual(A, x, y), 2)) + assert [round(res, 10) for res in residuals] == [15.1733888877, + 0.82378073210000002, 0.302645887, 0.0260109244, + 0.00058653999999999998] + assert norm(matrix(residuals) - matrix(refres), inf) < 1.e-13 + + def hilbert_cmplx(n): + # Complexified Hilbert matrix + A = hilbert(2*n,n) + v = randmatrix(2*n, 2, min=-1, max=1) + v = v.apply(lambda x: exp(1J*pi()*x)) + A = diag(v[:,0])*A*diag(v[:n,1]) + return A + + residuals_cmplx = [] + refres_cmplx = [] + for n in range(2, 10): + A = hilbert_cmplx(n) + H, p, x, r = householder(A.copy()) + residuals_cmplx.append(norm(r, 2)) + refres_cmplx.append(norm(residual(A[:,:n-1], x, A[:,n-1]), 2)) + assert norm(matrix(residuals_cmplx) - matrix(refres_cmplx), inf) < 1.e-13 + +def test_factorization(): + A = randmatrix(5) + P, L, U = lu(A) + assert mnorm(P*A - L*U, 1) < 1.e-15 + +def test_solve(): + assert norm(residual(A6, lu_solve(A6, b6), b6), inf) < 1.e-10 + assert norm(residual(A7, lu_solve(A7, b7), b7), inf) < 1.5 + assert norm(residual(A8, lu_solve(A8, b8), b8), inf) <= 3 + 1.e-10 + assert norm(residual(A6, qr_solve(A6, b6)[0], b6), inf) < 1.e-10 + assert norm(residual(A7, qr_solve(A7, b7)[0], b7), inf) < 1.5 + assert norm(residual(A8, qr_solve(A8, b8)[0], b8), 2) <= 4.3 + assert norm(residual(A10, lu_solve(A10, b10), b10), 2) < 1.e-10 + assert norm(residual(A10, qr_solve(A10, b10)[0], b10), 2) < 1.e-10 + +def test_solve_overdet_complex(): + A = matrix([[1, 2j], [3, 4j], [5, 6]]) + b = matrix([1 + j, 2, -j]) + assert norm(residual(A, lu_solve(A, b), b)) < 1.0208 + +def test_singular(): + mp.dps = 15 + A = [[5.6, 1.2], [7./15, .1]] + B = repr(zeros(2)) + b = [1, 2] + for i in ['lu_solve(%s, %s)' % (A, b), 'lu_solve(%s, %s)' % (B, b), + 'qr_solve(%s, %s)' % (A, b), 'qr_solve(%s, %s)' % (B, b)]: + pytest.raises((ZeroDivisionError, ValueError), lambda: eval(i)) + +def test_cholesky(): + assert fp.cholesky(fp.matrix(A9)) == fp.matrix([[2, 0, 0], [1, 2, 0], [-1, -3/2, 3/2]]) + x = fp.cholesky_solve(A9, b9) + assert fp.norm(fp.residual(A9, x, b9), fp.inf) == 0 + +def test_det(): + assert det(A1) == 1 + assert round(det(A2), 14) == 8 + assert round(det(A3)) == 1834 + assert round(det(A4)) == 4443376 + assert det(A5) == 1 + assert round(det(A6)) == 78356463 + assert det(zeros(3)) == 0 + +def test_cond(): + mp.dps = 15 + A = matrix([[1.2969, 0.8648], [0.2161, 0.1441]]) + assert cond(A, lambda x: mnorm(x,1)) == mpf('327065209.73817754') + assert cond(A, lambda x: mnorm(x,inf)) == mpf('327065209.73817754') + assert cond(A, lambda x: mnorm(x,'F')) == mpf('249729266.80008656') + +@extradps(50) +def test_precision(): + A = randmatrix(10, 10) + assert mnorm(inverse(inverse(A)) - A, 1) < 1.e-45 + +def test_interval_matrix(): + mp.dps = 15 + iv.dps = 15 + a = iv.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']]) + b = iv.matrix(['4','0.6','0.5']) + c = iv.lu_solve(a, b) + assert c[0].delta < 1e-13 + assert c[1].delta < 1e-13 + assert c[2].delta < 1e-13 + assert 5.25823271130625686059275 in c[0] + assert -13.155049396267837541163 in c[1] + assert 7.42069154774972557628979 in c[2] + +def test_LU_cache(): + A = randmatrix(3) + LU = LU_decomp(A) + assert A._LU == LU_decomp(A) + A[0,0] = -1000 + assert A._LU is None + +def test_improve_solution(): + A = randmatrix(5, min=1e-20, max=1e20) + b = randmatrix(5, 1, min=-1000, max=1000) + x1 = lu_solve(A, b) + randmatrix(5, 1, min=-1e-5, max=1.e-5) + x2 = improve_solution(A, x1, b) + assert norm(residual(A, x2, b), 2) < norm(residual(A, x1, b), 2) + +def test_exp_pade(): + for i in range(3): + dps = 15 + extra = 15 + mp.dps = dps + extra + dm = 0 + N = 3 + dg = range(1,N+1) + a = diag(dg) + expa = diag([exp(x) for x in dg]) + # choose a random matrix not close to be singular + # to avoid adding too much extra precision in computing + # m**-1 * M * m + while abs(dm) < 0.01: + m = randmatrix(N) + dm = det(m) + m = m/dm + a1 = m**-1 * a * m + e2 = m**-1 * expa * m + mp.dps = dps + e1 = expm(a1, method='pade') + mp.dps = dps + extra + d = e2 - e1 + #print d + mp.dps = dps + assert norm(d, inf).ae(0) + mp.dps = 15 + +def test_qr(): + mp.dps = 15 # used default value for dps + lowlimit = -9 # lower limit of matrix element value + uplimit = 9 # uppter limit of matrix element value + maxm = 4 # max matrix size + flg = False # toggle to create real vs complex matrix + zero = mpf('0.0') + + for k in xrange(0,10): + exdps = 0 + mode = 'full' + flg = bool(k % 2) + + # generate arbitrary matrix size (2 to maxm) + num1 = nint(maxm*rand()) + num2 = nint(maxm*rand()) + m = int(max(num1, num2)) + n = int(min(num1, num2)) + + # create matrix + A = mp.matrix(m,n) + + # populate matrix values with arbitrary integers + if flg: + flg = False + dtype = 'complex' + for j in xrange(0,n): + for i in xrange(0,m): + val = nint(lowlimit + (uplimit-lowlimit)*rand()) + val2 = nint(lowlimit + (uplimit-lowlimit)*rand()) + A[i,j] = mpc(val, val2) + else: + flg = True + dtype = 'real' + for j in xrange(0,n): + for i in xrange(0,m): + val = nint(lowlimit + (uplimit-lowlimit)*rand()) + A[i,j] = mpf(val) + + # perform A -> QR decomposition + Q, R = qr(A, mode, edps = exdps) + + #print('\n\n A = \n', nstr(A, 4)) + #print('\n Q = \n', nstr(Q, 4)) + #print('\n R = \n', nstr(R, 4)) + #print('\n Q*R = \n', nstr(Q*R, 4)) + + maxnorm = mpf('1.0E-11') + n1 = norm(A - Q * R) + #print '\n Norm of A - Q * R = ', n1 + assert n1 <= maxnorm + + if dtype == 'real': + n1 = norm(eye(m) - Q.T * Q) + #print ' Norm of I - Q.T * Q = ', n1 + assert n1 <= maxnorm + + n1 = norm(eye(m) - Q * Q.T) + #print ' Norm of I - Q * Q.T = ', n1 + assert n1 <= maxnorm + + if dtype == 'complex': + n1 = norm(eye(m) - Q.T * Q.conjugate()) + #print ' Norm of I - Q.T * Q.conjugate() = ', n1 + assert n1 <= maxnorm + + n1 = norm(eye(m) - Q.conjugate() * Q.T) + #print ' Norm of I - Q.conjugate() * Q.T = ', n1 + assert n1 <= maxnorm diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_matrices.py b/phivenv/Lib/site-packages/mpmath/tests/test_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..1547b90664dba66a98a7f026a04a4ed1aa1ed3b4 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_matrices.py @@ -0,0 +1,253 @@ +import pytest +import sys +from mpmath import * + +def test_matrix_basic(): + A1 = matrix(3) + for i in range(3): + A1[i,i] = 1 + assert A1 == eye(3) + assert A1 == matrix(A1) + A2 = matrix(3, 2) + assert not A2._matrix__data + A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + assert list(A3) == list(range(1, 10)) + A3[1,1] = 0 + assert not (1, 1) in A3._matrix__data + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + assert A4 * A5 == matrix([[12, -6], [39, -12]]) + assert A1 * A3 == A3 * A1 == A3 + pytest.raises(ValueError, lambda: A2*A2) + l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]] + A6 = matrix(l) + assert A6.tolist() == l + assert A6 == eval(repr(A6)) + A6 = fp.matrix(A6) + assert A6 == eval(repr(A6)) + assert A6*1j == eval(repr(A6*1j)) + assert A3 * 10 == 10 * A3 == A6 + assert A2.rows == 3 + assert A2.cols == 2 + A3.rows = 2 + A3.cols = 2 + assert len(A3._matrix__data) == 3 + assert A4 + A4 == 2*A4 + pytest.raises(ValueError, lambda: A4 + A2) + assert sum(A1 - A1) == 0 + A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]]) + x = matrix([10, -10]) + assert A7*x == matrix([-10, -10, -10, -10]) + A8 = ones(5) + assert sum((A8 + 1) - (2 - zeros(5))) == 0 + assert (1 + ones(4)) / 2 - 1 == zeros(4) + assert eye(3)**10 == eye(3) + pytest.raises(ValueError, lambda: A7**2) + A9 = randmatrix(3) + A10 = matrix(A9) + A9[0,0] = -100 + assert A9 != A10 + assert nstr(A9) + +def test_matmul(): + """ + Test the PEP465 "@" matrix multiplication syntax. + To avoid syntax errors when importing this file in Python 3.5 and below, we have to use exec() - sorry for that. + """ + # TODO remove exec() wrapper as soon as we drop support for Python <= 3.5 + if sys.hexversion < 0x30500f0: + # we are on Python < 3.5 + pytest.skip("'@' (__matmul__) is only supported in Python 3.5 or newer") + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + exec("assert A4 @ A5 == A4 * A5") + +def test_matrix_slices(): + A = matrix([ [1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + V = matrix([1,2,3,4,5]) + + # Get slice + assert A[:,:] == A + assert A[:,1] == matrix([[2],[5],[8]]) + assert A[2,:] == matrix([[7, 8 ,9]]) + assert A[1:3,1:3] == matrix([[5,6],[8,9]]) + assert V[2:4] == matrix([3,4]) + pytest.raises(IndexError, lambda: A[:,1:6]) + + # Assign slice with matrix + A1 = matrix(3) + A1[:,:] = A + assert A1[:,:] == matrix([[1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + A1[0,:] = matrix([[10, 11, 12]]) + assert A1 == matrix([ [10, 11, 12], + [4, 5 ,6], + [7, 8 ,9]]) + A1[:,2] = matrix([[13], [14], [15]]) + assert A1 == matrix([ [10, 11, 13], + [4, 5 ,14], + [7, 8 ,15]]) + A1[:2,:2] = matrix([[16, 17], [18 , 19]]) + assert A1 == matrix([ [16, 17, 13], + [18, 19 ,14], + [7, 8 ,15]]) + V[1:3] = 10 + assert V == matrix([1,10,10,4,5]) + with pytest.raises(ValueError): + A1[2,:] = A[:,1] + + with pytest.raises(IndexError): + A1[2,1:20] = A[:,:] + + # Assign slice with scalar + A1[:,2] = 10 + assert A1 == matrix([ [16, 17, 10], + [18, 19 ,10], + [7, 8 ,10]]) + A1[:,:] = 40 + for x in A1: + assert x == 40 + + +def test_matrix_power(): + A = matrix([[1, 2], [3, 4]]) + assert A**2 == A*A + assert A**3 == A*A*A + assert A**-1 == inverse(A) + assert A**-2 == inverse(A*A) + +def test_matrix_transform(): + A = matrix([[1, 2], [3, 4], [5, 6]]) + assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]]) + swap_row(A, 1, 2) + assert A == matrix([[1, 2], [5, 6], [3, 4]]) + l = [1, 2] + swap_row(l, 0, 1) + assert l == [2, 1] + assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]]) + +def test_matrix_conjugate(): + A = matrix([[1 + j, 0], [2, j]]) + assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]]) + assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2], + [0, mpc(0, -1)]]) + +def test_matrix_creation(): + assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) + A1 = ones(2, 3) + assert A1.rows == 2 and A1.cols == 3 + for a in A1: + assert a == 1 + A2 = zeros(3, 2) + assert A2.rows == 3 and A2.cols == 2 + for a in A2: + assert a == 0 + assert randmatrix(10) != randmatrix(10) + one = mpf(1) + assert hilbert(3) == matrix([[one, one/2, one/3], + [one/2, one/3, one/4], + [one/3, one/4, one/5]]) + +def test_norms(): + # matrix norms + A = matrix([[1, -2], [-3, -1], [2, 1]]) + assert mnorm(A,1) == 6 + assert mnorm(A,inf) == 4 + assert mnorm(A,'F') == sqrt(20) + # vector norms + assert norm(-3) == 3 + x = [1, -2, 7, -12] + assert norm(x, 1) == 22 + assert round(norm(x, 2), 10) == 14.0712472795 + assert round(norm(x, 10), 10) == 12.0054633727 + assert norm(x, inf) == 12 + +def test_vector(): + x = matrix([0, 1, 2, 3, 4]) + assert x == matrix([[0], [1], [2], [3], [4]]) + assert x[3] == 3 + assert len(x._matrix__data) == 4 + assert list(x) == list(range(5)) + x[0] = -10 + x[4] = 0 + assert x[0] == -10 + assert len(x) == len(x.T) == 5 + assert x.T*x == matrix([[114]]) + +def test_matrix_copy(): + A = ones(6) + B = A.copy() + C = +A + assert A == B + assert A == C + B[0,0] = 0 + assert A != B + C[0,0] = 42 + assert A != C + +def test_matrix_numpy(): + try: + import numpy + except ImportError: + return + l = [[1, 2], [3, 4], [5, 6]] + a = numpy.array(l) + assert matrix(l) == matrix(a) + +def test_interval_matrix_scalar_mult(): + """Multiplication of iv.matrix and any scalar type""" + a = mpi(-1, 1) + b = a + a * 2j + c = mpf(42) + d = c + c * 2j + e = 1.234 + f = fp.convert(e) + g = e + e * 3j + h = fp.convert(g) + M = iv.ones(1) + for x in [a, b, c, d, e, f, g, h]: + assert x * M == iv.matrix([x]) + assert M * x == iv.matrix([x]) + +@pytest.mark.xfail() +def test_interval_matrix_matrix_mult(): + """Multiplication of iv.matrix and other matrix types""" + A = ones(1) + B = fp.ones(1) + M = iv.ones(1) + for X in [A, B, M]: + assert X * M == iv.matrix(X) + assert X * M == X + assert M * X == iv.matrix(X) + assert M * X == X + +def test_matrix_conversion_to_iv(): + # Test that matrices with foreign datatypes are properly converted + for other_type_eye in [eye(3), fp.eye(3), iv.eye(3)]: + A = iv.matrix(other_type_eye) + B = iv.eye(3) + assert type(A[0,0]) == type(B[0,0]) + assert A.tolist() == B.tolist() + +def test_interval_matrix_mult_bug(): + # regression test for interval matrix multiplication: + # result must be nonzero-width and contain the exact result + x = convert('1.00000000000001') # note: this is implicitly rounded to some near mpf float value + A = matrix([[x]]) + B = iv.matrix(A) + C = iv.matrix([[x]]) + assert B == C + B = B * B + C = C * C + assert B == C + assert B[0, 0].delta > 1e-16 + assert B[0, 0].delta < 3e-16 + assert C[0, 0].delta > 1e-16 + assert C[0, 0].delta < 3e-16 + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in B[0, 0] + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in C[0, 0] + # the following caused an error before the bug was fixed + assert iv.matrix(mp.eye(2)) * (iv.ones(2) + mpi(1, 2)) == iv.matrix([[mpi(2, 3), mpi(2, 3)], [mpi(2, 3), mpi(2, 3)]]) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_mpmath.py b/phivenv/Lib/site-packages/mpmath/tests/test_mpmath.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1fe36ae9b1b0feca4677eeb90396bfa7ed8f7a --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_mpmath.py @@ -0,0 +1,7 @@ +from mpmath.libmp import * +from mpmath import * + +def test_newstyle_classes(): + for cls in [mp, fp, iv, mpf, mpc]: + for s in cls.__class__.__mro__: + assert isinstance(s, type) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_ode.py b/phivenv/Lib/site-packages/mpmath/tests/test_ode.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6dbffa79cfd4ca6dbf14f8591296ee48b16682 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_ode.py @@ -0,0 +1,73 @@ +#from mpmath.calculus import ODE_step_euler, ODE_step_rk4, odeint, arange +from mpmath import odefun, cos, sin, mpf, sinc, mp + +''' +solvers = [ODE_step_euler, ODE_step_rk4] + +def test_ode1(): + """ + Let's solve: + + x'' + w**2 * x = 0 + + i.e. x1 = x, x2 = x1': + + x1' = x2 + x2' = -x1 + """ + def derivs((x1, x2), t): + return x2, -x1 + + for solver in solvers: + t = arange(0, 3.1415926, 0.005) + sol = odeint(derivs, (0., 1.), t, solver) + x1 = [a[0] for a in sol] + x2 = [a[1] for a in sol] + # the result is x1 = sin(t), x2 = cos(t) + # let's just check the end points for t = pi + assert abs(x1[-1]) < 1e-2 + assert abs(x2[-1] - (-1)) < 1e-2 + +def test_ode2(): + """ + Let's solve: + + x' - x = 0 + + i.e. x = exp(x) + + """ + def derivs((x), t): + return x + + for solver in solvers: + t = arange(0, 1, 1e-3) + sol = odeint(derivs, (1.,), t, solver) + x = [a[0] for a in sol] + # the result is x = exp(t) + # let's just check the end point for t = 1, i.e. x = e + assert abs(x[-1] - 2.718281828) < 1e-2 +''' + +def test_odefun_rational(): + mp.dps = 15 + # A rational function + f = lambda t: 1/(1+mpf(t)**2) + g = odefun(lambda x, y: [-2*x*y[0]**2], 0, [f(0)]) + assert f(2).ae(g(2)[0]) + +def test_odefun_sinc_large(): + mp.dps = 15 + # Sinc function; test for large x + f = sinc + g = odefun(lambda x, y: [(cos(x)-y[0])/x], 1, [f(1)], tol=0.01, degree=5) + assert abs(f(100) - g(100)[0])/f(100) < 0.01 + +def test_odefun_harmonic(): + mp.dps = 15 + # Harmonic oscillator + f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0]) + for x in [0, 1, 2.5, 8, 3.7]: # we go back to 3.7 to check caching + c, s = f(x) + assert c.ae(cos(x)) + assert s.ae(sin(x)) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_pickle.py b/phivenv/Lib/site-packages/mpmath/tests/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d96e73a53603e0fa3f9525c5c0059725bdffb7 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_pickle.py @@ -0,0 +1,27 @@ +import os +import tempfile +import pickle + +from mpmath import * + +def pickler(obj): + fn = tempfile.mktemp() + + f = open(fn, 'wb') + pickle.dump(obj, f) + f.close() + + f = open(fn, 'rb') + obj2 = pickle.load(f) + f.close() + os.remove(fn) + + return obj2 + +def test_pickle(): + + obj = mpf('0.5') + assert obj == pickler(obj) + + obj = mpc('0.5','0.2') + assert obj == pickler(obj) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_power.py b/phivenv/Lib/site-packages/mpmath/tests/test_power.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2447a62c36f9e02df79b9a40a8603f8a69b1d8 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_power.py @@ -0,0 +1,156 @@ +from mpmath import * +from mpmath.libmp import * + +import random + +def test_fractional_pow(): + mp.dps = 15 + assert mpf(16) ** 2.5 == 1024 + assert mpf(64) ** 0.5 == 8 + assert mpf(64) ** -0.5 == 0.125 + assert mpf(16) ** -2.5 == 0.0009765625 + assert (mpf(10) ** 0.5).ae(3.1622776601683791) + assert (mpf(10) ** 2.5).ae(316.2277660168379) + assert (mpf(10) ** -0.5).ae(0.31622776601683794) + assert (mpf(10) ** -2.5).ae(0.0031622776601683794) + assert (mpf(10) ** 0.3).ae(1.9952623149688795) + assert (mpf(10) ** -0.3).ae(0.50118723362727224) + +def test_pow_integer_direction(): + """ + Test that inexact integer powers are rounded in the right + direction. + """ + random.seed(1234) + for prec in [10, 53, 200]: + for i in range(50): + a = random.randint(1<<(prec-1), 1< ab + + +def test_pow_epsilon_rounding(): + """ + Stress test directed rounding for powers with integer exponents. + Basically, we look at the following cases: + + >>> 1.0001 ** -5 # doctest: +SKIP + 0.99950014996500702 + >>> 0.9999 ** -5 # doctest: +SKIP + 1.000500150035007 + >>> (-1.0001) ** -5 # doctest: +SKIP + -0.99950014996500702 + >>> (-0.9999) ** -5 # doctest: +SKIP + -1.000500150035007 + + >>> 1.0001 ** -6 # doctest: +SKIP + 0.99940020994401269 + >>> 0.9999 ** -6 # doctest: +SKIP + 1.0006002100560125 + >>> (-1.0001) ** -6 # doctest: +SKIP + 0.99940020994401269 + >>> (-0.9999) ** -6 # doctest: +SKIP + 1.0006002100560125 + + etc. + + We run the tests with values a very small epsilon away from 1: + small enough that the result is indistinguishable from 1 when + rounded to nearest at the output precision. We check that the + result is not erroneously rounded to 1 in cases where the + rounding should be done strictly away from 1. + """ + + def powr(x, n, r): + return make_mpf(mpf_pow_int(x._mpf_, n, mp.prec, r)) + + for (inprec, outprec) in [(100, 20), (5000, 3000)]: + + mp.prec = inprec + + pos10001 = mpf(1) + mpf(2)**(-inprec+5) + pos09999 = mpf(1) - mpf(2)**(-inprec+5) + neg10001 = -pos10001 + neg09999 = -pos09999 + + mp.prec = outprec + r = round_up + assert powr(pos10001, 5, r) > 1 + assert powr(pos09999, 5, r) == 1 + assert powr(neg10001, 5, r) < -1 + assert powr(neg09999, 5, r) == -1 + assert powr(pos10001, 6, r) > 1 + assert powr(pos09999, 6, r) == 1 + assert powr(neg10001, 6, r) > 1 + assert powr(neg09999, 6, r) == 1 + + assert powr(pos10001, -5, r) == 1 + assert powr(pos09999, -5, r) > 1 + assert powr(neg10001, -5, r) == -1 + assert powr(neg09999, -5, r) < -1 + assert powr(pos10001, -6, r) == 1 + assert powr(pos09999, -6, r) > 1 + assert powr(neg10001, -6, r) == 1 + assert powr(neg09999, -6, r) > 1 + + r = round_down + assert powr(pos10001, 5, r) == 1 + assert powr(pos09999, 5, r) < 1 + assert powr(neg10001, 5, r) == -1 + assert powr(neg09999, 5, r) > -1 + assert powr(pos10001, 6, r) == 1 + assert powr(pos09999, 6, r) < 1 + assert powr(neg10001, 6, r) == 1 + assert powr(neg09999, 6, r) < 1 + + assert powr(pos10001, -5, r) < 1 + assert powr(pos09999, -5, r) == 1 + assert powr(neg10001, -5, r) > -1 + assert powr(neg09999, -5, r) == -1 + assert powr(pos10001, -6, r) < 1 + assert powr(pos09999, -6, r) == 1 + assert powr(neg10001, -6, r) < 1 + assert powr(neg09999, -6, r) == 1 + + r = round_ceiling + assert powr(pos10001, 5, r) > 1 + assert powr(pos09999, 5, r) == 1 + assert powr(neg10001, 5, r) == -1 + assert powr(neg09999, 5, r) > -1 + assert powr(pos10001, 6, r) > 1 + assert powr(pos09999, 6, r) == 1 + assert powr(neg10001, 6, r) > 1 + assert powr(neg09999, 6, r) == 1 + + assert powr(pos10001, -5, r) == 1 + assert powr(pos09999, -5, r) > 1 + assert powr(neg10001, -5, r) > -1 + assert powr(neg09999, -5, r) == -1 + assert powr(pos10001, -6, r) == 1 + assert powr(pos09999, -6, r) > 1 + assert powr(neg10001, -6, r) == 1 + assert powr(neg09999, -6, r) > 1 + + r = round_floor + assert powr(pos10001, 5, r) == 1 + assert powr(pos09999, 5, r) < 1 + assert powr(neg10001, 5, r) < -1 + assert powr(neg09999, 5, r) == -1 + assert powr(pos10001, 6, r) == 1 + assert powr(pos09999, 6, r) < 1 + assert powr(neg10001, 6, r) == 1 + assert powr(neg09999, 6, r) < 1 + + assert powr(pos10001, -5, r) < 1 + assert powr(pos09999, -5, r) == 1 + assert powr(neg10001, -5, r) == -1 + assert powr(neg09999, -5, r) < -1 + assert powr(pos10001, -6, r) < 1 + assert powr(pos09999, -6, r) == 1 + assert powr(neg10001, -6, r) < 1 + assert powr(neg09999, -6, r) == 1 + + mp.dps = 15 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_quad.py b/phivenv/Lib/site-packages/mpmath/tests/test_quad.py new file mode 100644 index 0000000000000000000000000000000000000000..fc71c5f5ef9c0ecd876c988e7d033b321f065cdc --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_quad.py @@ -0,0 +1,95 @@ +import pytest +from mpmath import * + +def ae(a, b): + return abs(a-b) < 10**(-mp.dps+5) + +def test_basic_integrals(): + for prec in [15, 30, 100]: + mp.dps = prec + assert ae(quadts(lambda x: x**3 - 3*x**2, [-2, 4]), -12) + assert ae(quadgl(lambda x: x**3 - 3*x**2, [-2, 4]), -12) + assert ae(quadts(sin, [0, pi]), 2) + assert ae(quadts(sin, [0, 2*pi]), 0) + assert ae(quadts(exp, [-inf, -1]), 1/e) + assert ae(quadts(lambda x: exp(-x), [0, inf]), 1) + assert ae(quadts(lambda x: exp(-x*x), [-inf, inf]), sqrt(pi)) + assert ae(quadts(lambda x: 1/(1+x*x), [-1, 1]), pi/2) + assert ae(quadts(lambda x: 1/(1+x*x), [-inf, inf]), pi) + assert ae(quadts(lambda x: 2*sqrt(1-x*x), [-1, 1]), pi) + mp.dps = 15 + +def test_multiple_intervals(): + y,err = quad(lambda x: sign(x), [-0.5, 0.9, 1], maxdegree=2, error=True) + assert abs(y-0.5) < 2*err + +def test_quad_symmetry(): + assert quadts(sin, [-1, 1]) == 0 + assert quadgl(sin, [-1, 1]) == 0 + +def test_quad_infinite_mirror(): + # Check mirrored infinite interval + assert ae(quad(lambda x: exp(-x*x), [inf,-inf]), -sqrt(pi)) + assert ae(quad(lambda x: exp(x), [0,-inf]), -1) + +def test_quadgl_linear(): + assert quadgl(lambda x: x, [0, 1], maxdegree=1).ae(0.5) + +def test_complex_integration(): + assert quadts(lambda x: x, [0, 1+j]).ae(j) + +def test_quadosc(): + mp.dps = 15 + assert quadosc(lambda x: sin(x)/x, [0, inf], period=2*pi).ae(pi/2) + +# Double integrals +def test_double_trivial(): + assert ae(quadts(lambda x, y: x, [0, 1], [0, 1]), 0.5) + assert ae(quadts(lambda x, y: x, [-1, 1], [-1, 1]), 0.0) + +def test_double_1(): + assert ae(quadts(lambda x, y: cos(x+y/2), [-pi/2, pi/2], [0, pi]), 4) + +def test_double_2(): + assert ae(quadts(lambda x, y: (x-1)/((1-x*y)*log(x*y)), [0, 1], [0, 1]), euler) + +def test_double_3(): + assert ae(quadts(lambda x, y: 1/sqrt(1+x*x+y*y), [-1, 1], [-1, 1]), 4*log(2+sqrt(3))-2*pi/3) + +def test_double_4(): + assert ae(quadts(lambda x, y: 1/(1-x*x * y*y), [0, 1], [0, 1]), pi**2 / 8) + +def test_double_5(): + assert ae(quadts(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]), pi**2 / 6) + +def test_double_6(): + assert ae(quadts(lambda x, y: exp(-(x+y)), [0, inf], [0, inf]), 1) + +def test_double_7(): + assert ae(quadts(lambda x, y: exp(-x*x-y*y), [-inf, inf], [-inf, inf]), pi) + + +# Test integrals from "Experimentation in Mathematics" by Borwein, +# Bailey & Girgensohn +def test_expmath_integrals(): + for prec in [15, 30, 50]: + mp.dps = prec + assert ae(quadts(lambda x: x/sinh(x), [0, inf]), pi**2 / 4) + assert ae(quadts(lambda x: log(x)**2 / (1+x**2), [0, inf]), pi**3 / 8) + assert ae(quadts(lambda x: (1+x**2)/(1+x**4), [0, inf]), pi/sqrt(2)) + assert ae(quadts(lambda x: log(x)/cosh(x)**2, [0, inf]), log(pi)-2*log(2)-euler) + assert ae(quadts(lambda x: log(1+x**3)/(1-x+x**2), [0, inf]), 2*pi*log(3)/sqrt(3)) + assert ae(quadts(lambda x: log(x)**2 / (x**2+x+1), [0, 1]), 8*pi**3 / (81*sqrt(3))) + assert ae(quadts(lambda x: log(cos(x))**2, [0, pi/2]), pi/2 * (log(2)**2+pi**2/12)) + assert ae(quadts(lambda x: x**2 / sin(x)**2, [0, pi/2]), pi*log(2)) + assert ae(quadts(lambda x: x**2/sqrt(exp(x)-1), [0, inf]), 4*pi*(log(2)**2 + pi**2/12)) + assert ae(quadts(lambda x: x*exp(-x)*sqrt(1-exp(-2*x)), [0, inf]), pi*(1+2*log(2))/8) + mp.dps = 15 + +# Do not reach full accuracy +@pytest.mark.xfail +def test_expmath_fail(): + assert ae(quadts(lambda x: sqrt(tan(x)), [0, pi/2]), pi*sqrt(2)/2) + assert ae(quadts(lambda x: atan(x)/(x*sqrt(1-x**2)), [0, 1]), pi*log(1+sqrt(2))/2) + assert ae(quadts(lambda x: log(1+x**2)/x**2, [0, 1]), pi/2-log(2)) + assert ae(quadts(lambda x: x**2/((1+x**4)*sqrt(1-x**4)), [0, 1]), pi/8) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_rootfinding.py b/phivenv/Lib/site-packages/mpmath/tests/test_rootfinding.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3c06463682eb1fd60efeb75b809bbb932a241c --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_rootfinding.py @@ -0,0 +1,91 @@ +import pytest +from mpmath import * +from mpmath.calculus.optimization import Secant, Muller, Bisection, Illinois, \ + Pegasus, Anderson, Ridder, ANewton, Newton, MNewton, MDNewton + +def test_findroot(): + # old tests, assuming secant + mp.dps = 15 + assert findroot(lambda x: 4*x-3, mpf(5)).ae(0.75) + assert findroot(sin, mpf(3)).ae(pi) + assert findroot(sin, (mpf(3), mpf(3.14))).ae(pi) + assert findroot(lambda x: x*x+1, mpc(2+2j)).ae(1j) + # test all solvers with 1 starting point + f = lambda x: cos(x) + for solver in [Newton, Secant, MNewton, Muller, ANewton]: + x = findroot(f, 2., solver=solver) + assert abs(f(x)) < eps + # test all solvers with interval of 2 points + for solver in [Secant, Muller, Bisection, Illinois, Pegasus, Anderson, + Ridder]: + x = findroot(f, (1., 2.), solver=solver) + assert abs(f(x)) < eps + # test types + f = lambda x: (x - 2)**2 + + assert isinstance(findroot(f, 1, tol=1e-10), mpf) + assert isinstance(iv.findroot(f, 1., tol=1e-10), iv.mpf) + assert isinstance(fp.findroot(f, 1, tol=1e-10), float) + assert isinstance(fp.findroot(f, 1+0j, tol=1e-10), complex) + + # issue 401 + with pytest.raises(ValueError): + with workprec(2): + findroot(lambda x: x**2 - 4456178*x + 60372201703370, + mpc(real='5.278e+13', imag='-5.278e+13')) + + # issue 192 + with pytest.raises(ValueError): + findroot(lambda x: -1, 0) + + # issue 387 + with pytest.raises(ValueError): + findroot(lambda p: (1 - p)**30 - 1, 0.9) + +def test_bisection(): + # issue 273 + assert findroot(lambda x: x**2-1,(0,2),solver='bisect') == 1 + +def test_mnewton(): + f = lambda x: polyval([1,3,3,1],x) + x = findroot(f, -0.9, solver='mnewton') + assert abs(f(x)) < eps + +def test_anewton(): + f = lambda x: (x - 2)**100 + x = findroot(f, 1., solver=ANewton) + assert abs(f(x)) < eps + +def test_muller(): + f = lambda x: (2 + x)**3 + 2 + x = findroot(f, 1., solver=Muller) + assert abs(f(x)) < eps + +def test_multiplicity(): + for i in range(1, 5): + assert multiplicity(lambda x: (x - 1)**i, 1) == i + assert multiplicity(lambda x: x**2, 1) == 0 + +def test_multidimensional(): + def f(*x): + return [3*x[0]**2-2*x[1]**2-1, x[0]**2-2*x[0]+x[1]**2+2*x[1]-8] + assert mnorm(jacobian(f, (1,-2)) - matrix([[6,8],[0,-2]]),1) < 1.e-7 + for x, error in MDNewton(mp, f, (1,-2), verbose=0, + norm=lambda x: norm(x, inf)): + pass + assert norm(f(*x), 2) < 1e-14 + # The Chinese mathematician Zhu Shijie was the very first to solve this + # nonlinear system 700 years ago + f1 = lambda x, y: -x + 2*y + f2 = lambda x, y: (x**2 + x*(y**2 - 2) - 4*y) / (x + 4) + f3 = lambda x, y: sqrt(x**2 + y**2) + def f(x, y): + f1x = f1(x, y) + return (f2(x, y) - f1x, f3(x, y) - f1x) + x = findroot(f, (10, 10)) + assert [int(round(i)) for i in x] == [3, 4] + +def test_trivial(): + assert findroot(lambda x: 0, 1) == 1 + assert findroot(lambda x: x, 0) == 0 + #assert findroot(lambda x, y: x + y, (1, -1)) == (1, -1) diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_special.py b/phivenv/Lib/site-packages/mpmath/tests/test_special.py new file mode 100644 index 0000000000000000000000000000000000000000..30825abd89ada00f937260cb51ef649546be7021 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_special.py @@ -0,0 +1,113 @@ +from mpmath import * + +def test_special(): + assert inf == inf + assert inf != -inf + assert -inf == -inf + assert inf != nan + assert nan != nan + assert isnan(nan) + assert --inf == inf + assert abs(inf) == inf + assert abs(-inf) == inf + assert abs(nan) != abs(nan) + + assert isnan(inf - inf) + assert isnan(inf + (-inf)) + assert isnan(-inf - (-inf)) + + assert isnan(inf + nan) + assert isnan(-inf + nan) + + assert mpf(2) + inf == inf + assert 2 + inf == inf + assert mpf(2) - inf == -inf + assert 2 - inf == -inf + + assert inf > 3 + assert 3 < inf + assert 3 > -inf + assert -inf < 3 + assert inf > mpf(3) + assert mpf(3) < inf + assert mpf(3) > -inf + assert -inf < mpf(3) + + assert not (nan < 3) + assert not (nan > 3) + + assert isnan(inf * 0) + assert isnan(-inf * 0) + assert inf * 3 == inf + assert inf * -3 == -inf + assert -inf * 3 == -inf + assert -inf * -3 == inf + assert inf * inf == inf + assert -inf * -inf == inf + + assert isnan(nan / 3) + assert inf / -3 == -inf + assert inf / 3 == inf + assert 3 / inf == 0 + assert -3 / inf == 0 + assert 0 / inf == 0 + assert isnan(inf / inf) + assert isnan(inf / -inf) + assert isnan(inf / nan) + + assert mpf('inf') == mpf('+inf') == inf + assert mpf('-inf') == -inf + assert isnan(mpf('nan')) + + assert isinf(inf) + assert isinf(-inf) + assert not isinf(mpf(0)) + assert not isinf(nan) + +def test_special_powers(): + assert inf**3 == inf + assert isnan(inf**0) + assert inf**-3 == 0 + assert (-inf)**2 == inf + assert (-inf)**3 == -inf + assert isnan((-inf)**0) + assert (-inf)**-2 == 0 + assert (-inf)**-3 == 0 + assert isnan(nan**5) + assert isnan(nan**0) + +def test_functions_special(): + assert exp(inf) == inf + assert exp(-inf) == 0 + assert isnan(exp(nan)) + assert log(inf) == inf + assert isnan(log(nan)) + assert isnan(sin(inf)) + assert isnan(sin(nan)) + assert atan(inf).ae(pi/2) + assert atan(-inf).ae(-pi/2) + assert isnan(sqrt(nan)) + assert sqrt(inf) == inf + +def test_convert_special(): + float_inf = 1e300 * 1e300 + float_ninf = -float_inf + float_nan = float_inf/float_ninf + assert mpf(3) * float_inf == inf + assert mpf(3) * float_ninf == -inf + assert isnan(mpf(3) * float_nan) + assert not (mpf(3) < float_nan) + assert not (mpf(3) > float_nan) + assert not (mpf(3) <= float_nan) + assert not (mpf(3) >= float_nan) + assert float(mpf('1e1000')) == float_inf + assert float(mpf('-1e1000')) == float_ninf + assert float(mpf('1e100000000000000000')) == float_inf + assert float(mpf('-1e100000000000000000')) == float_ninf + assert float(mpf('1e-100000000000000000')) == 0.0 + +def test_div_bug(): + assert isnan(nan/1) + assert isnan(nan/2) + assert inf/2 == inf + assert (-inf)/2 == -inf diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_str.py b/phivenv/Lib/site-packages/mpmath/tests/test_str.py new file mode 100644 index 0000000000000000000000000000000000000000..569244f252c057ec1029b7efbd8b0ffbfbc47522 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_str.py @@ -0,0 +1,14 @@ +from mpmath import nstr, matrix, inf + +def test_nstr(): + m = matrix([[0.75, 0.190940654, -0.0299195971], + [0.190940654, 0.65625, 0.205663228], + [-0.0299195971, 0.205663228, 0.64453125e-20]]) + assert nstr(m, 4, min_fixed=-inf) == \ + '''[ 0.75 0.1909 -0.02992] +[ 0.1909 0.6563 0.2057] +[-0.02992 0.2057 0.000000000000000000006445]''' + assert nstr(m, 4) == \ + '''[ 0.75 0.1909 -0.02992] +[ 0.1909 0.6563 0.2057] +[-0.02992 0.2057 6.445e-21]''' diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_summation.py b/phivenv/Lib/site-packages/mpmath/tests/test_summation.py new file mode 100644 index 0000000000000000000000000000000000000000..04ffd29f994e1e6310678eec292c0e03f2d6c725 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_summation.py @@ -0,0 +1,53 @@ +from mpmath import * + +def test_sumem(): + mp.dps = 15 + assert sumem(lambda k: 1/k**2.5, [50, 100]).ae(0.0012524505324784962) + assert sumem(lambda k: k**4 + 3*k + 1, [10, 100]).ae(2050333103) + +def test_nsum(): + mp.dps = 15 + assert nsum(lambda x: x**2, [1, 3]) == 14 + assert nsum(lambda k: 1/factorial(k), [0, inf]).ae(e) + assert nsum(lambda k: (-1)**(k+1) / k, [1, inf]).ae(log(2)) + assert nsum(lambda k: (-1)**(k+1) / k**2, [1, inf]).ae(pi**2 / 12) + assert nsum(lambda k: (-1)**k / log(k), [2, inf]).ae(0.9242998972229388) + assert nsum(lambda k: 1/k**2, [1, inf]).ae(pi**2 / 6) + assert nsum(lambda k: 2**k/fac(k), [0, inf]).ae(exp(2)) + assert nsum(lambda k: 1/k**2, [4, inf], method='e').ae(0.2838229557371153) + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf]) - 1.082323233711138) < 1e-5 + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf], method='e') - 1.082323233711138) < 1e-4 + +def test_nprod(): + mp.dps = 15 + assert nprod(lambda k: exp(1/k**2), [1,inf], method='r').ae(exp(pi**2/6)) + assert nprod(lambda x: x**2, [1, 3]) == 36 + +def test_fsum(): + mp.dps = 15 + assert fsum([]) == 0 + assert fsum([-4]) == -4 + assert fsum([2,3]) == 5 + assert fsum([1e-100,1]) == 1 + assert fsum([1,1e-100]) == 1 + assert fsum([1e100,1]) == 1e100 + assert fsum([1,1e100]) == 1e100 + assert fsum([1e-100,0]) == 1e-100 + assert fsum([1e-100,1e100,1e-100]) == 1e100 + assert fsum([2,1+1j,1]) == 4+1j + assert fsum([2,inf,3]) == inf + assert fsum([2,-1], absolute=1) == 3 + assert fsum([2,-1], squared=1) == 5 + assert fsum([1,1+j], squared=1) == 1+2j + assert fsum([1,3+4j], absolute=1) == 6 + assert fsum([1,2+3j], absolute=1, squared=1) == 14 + assert isnan(fsum([inf,-inf])) + assert fsum([inf,-inf], absolute=1) == inf + assert fsum([inf,-inf], squared=1) == inf + assert fsum([inf,-inf], absolute=1, squared=1) == inf + assert iv.fsum([1,mpi(2,3)]) == mpi(3,4) + +def test_fprod(): + mp.dps = 15 + assert fprod([]) == 1 + assert fprod([2,3]) == 6 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_trig.py b/phivenv/Lib/site-packages/mpmath/tests/test_trig.py new file mode 100644 index 0000000000000000000000000000000000000000..c70a2a0ff4c44c784404ecdb15357d5b91a992d6 --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_trig.py @@ -0,0 +1,136 @@ +from mpmath import * +from mpmath.libmp import * + +def test_trig_misc_hard(): + mp.prec = 53 + # Worst-case input for an IEEE double, from a paper by Kahan + x = ldexp(6381956970095103,797) + assert cos(x) == mpf('-4.6871659242546277e-19') + assert sin(x) == 1 + + mp.prec = 150 + a = mpf(10**50) + mp.prec = 53 + assert sin(a).ae(-0.7896724934293100827) + assert cos(a).ae(-0.6135286082336635622) + + # Check relative accuracy close to x = zero + assert sin(1e-100) == 1e-100 # when rounding to nearest + assert sin(1e-6).ae(9.999999999998333e-007, rel_eps=2e-15, abs_eps=0) + assert sin(1e-6j).ae(1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert sin(-1e-6j).ae(-1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert cos(1e-100) == 1 + assert cos(1e-6).ae(0.9999999999995) + assert cos(-1e-6j).ae(1.0000000000005) + assert tan(1e-100) == 1e-100 + assert tan(1e-6).ae(1.0000000000003335e-006, rel_eps=2e-15, abs_eps=0) + assert tan(1e-6j).ae(9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + assert tan(-1e-6j).ae(-9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + +def test_trig_near_zero(): + mp.dps = 15 + + for r in [round_nearest, round_down, round_up, round_floor, round_ceiling]: + assert sin(0, rounding=r) == 0 + assert cos(0, rounding=r) == 1 + + a = mpf('1e-100') + b = mpf('-1e-100') + + assert sin(a, rounding=round_nearest) == a + assert sin(a, rounding=round_down) < a + assert sin(a, rounding=round_floor) < a + assert sin(a, rounding=round_up) >= a + assert sin(a, rounding=round_ceiling) >= a + assert sin(b, rounding=round_nearest) == b + assert sin(b, rounding=round_down) > b + assert sin(b, rounding=round_floor) <= b + assert sin(b, rounding=round_up) <= b + assert sin(b, rounding=round_ceiling) > b + + assert cos(a, rounding=round_nearest) == 1 + assert cos(a, rounding=round_down) < 1 + assert cos(a, rounding=round_floor) < 1 + assert cos(a, rounding=round_up) == 1 + assert cos(a, rounding=round_ceiling) == 1 + assert cos(b, rounding=round_nearest) == 1 + assert cos(b, rounding=round_down) < 1 + assert cos(b, rounding=round_floor) < 1 + assert cos(b, rounding=round_up) == 1 + assert cos(b, rounding=round_ceiling) == 1 + + +def test_trig_near_n_pi(): + + mp.dps = 15 + a = [n*pi for n in [1, 2, 6, 11, 100, 1001, 10000, 100001]] + mp.dps = 135 + a.append(10**100 * pi) + mp.dps = 15 + + assert sin(a[0]) == mpf('1.2246467991473531772e-16') + assert sin(a[1]) == mpf('-2.4492935982947063545e-16') + assert sin(a[2]) == mpf('-7.3478807948841190634e-16') + assert sin(a[3]) == mpf('4.8998251578625894243e-15') + assert sin(a[4]) == mpf('1.9643867237284719452e-15') + assert sin(a[5]) == mpf('-8.8632615209684813458e-15') + assert sin(a[6]) == mpf('-4.8568235395684898392e-13') + assert sin(a[7]) == mpf('3.9087342299491231029e-11') + assert sin(a[8]) == mpf('-1.369235466754566993528e-36') + + r = round_nearest + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_up + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_down + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) < 1 + + r = round_floor + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) < 1 + + r = round_ceiling + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) == 1 + + mp.dps = 15 diff --git a/phivenv/Lib/site-packages/mpmath/tests/test_visualization.py b/phivenv/Lib/site-packages/mpmath/tests/test_visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..81ffd05194322f00e4c75dc02bc862b383468bff --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/test_visualization.py @@ -0,0 +1,32 @@ +""" +Limited tests of the visualization module. Right now it just makes +sure that passing custom Axes works. + +""" + +from mpmath import mp, fp + +def test_axes(): + try: + import matplotlib + version = matplotlib.__version__.split("-")[0] + version = version.split(".")[:2] + if [int(_) for _ in version] < [0,99]: + raise ImportError + import pylab + except ImportError: + print("\nSkipping test (pylab not available or too old version)\n") + return + fig = pylab.figure() + axes = fig.add_subplot(111) + for ctx in [mp, fp]: + ctx.plot(lambda x: x**2, [0, 3], axes=axes) + assert axes.get_xlabel() == 'x' + assert axes.get_ylabel() == 'f(x)' + + fig = pylab.figure() + axes = fig.add_subplot(111) + for ctx in [mp, fp]: + ctx.cplot(lambda z: z, [-2, 2], [-10, 10], axes=axes) + assert axes.get_xlabel() == 'Re(z)' + assert axes.get_ylabel() == 'Im(z)' diff --git a/phivenv/Lib/site-packages/mpmath/tests/torture.py b/phivenv/Lib/site-packages/mpmath/tests/torture.py new file mode 100644 index 0000000000000000000000000000000000000000..845d5c6d7d017e51e1ed9a8fe3106cfa32fd967f --- /dev/null +++ b/phivenv/Lib/site-packages/mpmath/tests/torture.py @@ -0,0 +1,224 @@ +""" +Torture tests for asymptotics and high precision evaluation of +special functions. + +(Other torture tests may also be placed here.) + +Running this file (gmpy recommended!) takes several CPU minutes. +With Python 2.6+, multiprocessing is used automatically to run tests +in parallel if many cores are available. (A single test may take between +a second and several minutes; possibly more.) + +The idea: + +* We evaluate functions at positive, negative, imaginary, 45- and 135-degree + complex values with magnitudes between 10^-20 to 10^20, at precisions between + 5 and 150 digits (we can go even higher for fast functions). + +* Comparing the result from two different precision levels provides + a strong consistency check (particularly for functions that use + different algorithms at different precision levels). + +* That the computation finishes at all (without failure), within reasonable + time, provides a check that evaluation works at all: that the code runs, + that it doesn't get stuck in an infinite loop, and that it doesn't use + some extremely slowly algorithm where it could use a faster one. + +TODO: + +* Speed up those functions that take long to finish! +* Generalize to test more cases; more options. +* Implement a timeout mechanism. +* Some functions are notably absent, including the following: + * inverse trigonometric functions (some become inaccurate for complex arguments) + * ci, si (not implemented properly for large complex arguments) + * zeta functions (need to modify test not to try too large imaginary values) + * and others... + +""" + + +import sys, os +from timeit import default_timer as clock + +if "-nogmpy" in sys.argv: + sys.argv.remove('-nogmpy') + os.environ['MPMATH_NOGMPY'] = 'Y' + +filt = '' +if not sys.argv[-1].endswith(".py"): + filt = sys.argv[-1] + +from mpmath import * +from mpmath.libmp.backend import exec_ + +def test_asymp(f, maxdps=150, verbose=False, huge_range=False): + dps = [5,15,25,50,90,150,500,1500,5000,10000] + dps = [p for p in dps if p <= maxdps] + def check(x,y,p,inpt): + if abs(x-y)/abs(y) < workprec(20)(power)(10, -p+1): + return + print() + print("Error!") + print("Input:", inpt) + print("dps =", p) + print("Result 1:", x) + print("Result 2:", y) + print("Absolute error:", abs(x-y)) + print("Relative error:", abs(x-y)/abs(y)) + raise AssertionError + exponents = range(-20,20) + if huge_range: + exponents += [-1000, -100, -50, 50, 100, 1000] + for n in exponents: + if verbose: + sys.stdout.write(". ") + mp.dps = 25 + xpos = mpf(10)**n / 1.1287 + xneg = -xpos + ximag = xpos*j + xcomplex1 = xpos*(1+j) + xcomplex2 = xpos*(-1+j) + for i in range(len(dps)): + if verbose: + print("Testing dps = %s" % dps[i]) + mp.dps = dps[i] + new = f(xpos), f(xneg), f(ximag), f(xcomplex1), f(xcomplex2) + if i != 0: + p = dps[i-1] + check(prev[0], new[0], p, xpos) + check(prev[1], new[1], p, xneg) + check(prev[2], new[2], p, ximag) + check(prev[3], new[3], p, xcomplex1) + check(prev[4], new[4], p, xcomplex2) + prev = new + if verbose: + print() + +a1, a2, a3, a4, a5 = 1.5, -2.25, 3.125, 4, 2 + +def test_bernoulli_huge(): + p, q = bernfrac(9000) + assert p % 10**10 == 9636701091 + assert q == 4091851784687571609141381951327092757255270 + mp.dps = 15 + assert str(bernoulli(10**100)) == '-2.58183325604736e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 50 + assert str(bernoulli(10**100)) == '-2.5818332560473632073252488656039475548106223822913e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 15 + +cases = """\ +test_bernoulli_huge() +test_asymp(lambda z: +pi, maxdps=10000) +test_asymp(lambda z: +e, maxdps=10000) +test_asymp(lambda z: +ln2, maxdps=10000) +test_asymp(lambda z: +ln10, maxdps=10000) +test_asymp(lambda z: +phi, maxdps=10000) +test_asymp(lambda z: +catalan, maxdps=5000) +test_asymp(lambda z: +euler, maxdps=5000) +test_asymp(lambda z: +glaisher, maxdps=1000) +test_asymp(lambda z: +khinchin, maxdps=1000) +test_asymp(lambda z: +twinprime, maxdps=150) +test_asymp(lambda z: stieltjes(2), maxdps=150) +test_asymp(lambda z: +mertens, maxdps=150) +test_asymp(lambda z: +apery, maxdps=5000) +test_asymp(sqrt, maxdps=10000, huge_range=True) +test_asymp(cbrt, maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,4), maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,-5), maxdps=5000, huge_range=True) +test_asymp(exp, maxdps=5000, huge_range=True) +test_asymp(expm1, maxdps=1500) +test_asymp(ln, maxdps=5000, huge_range=True) +test_asymp(cosh, maxdps=5000) +test_asymp(sinh, maxdps=5000) +test_asymp(tanh, maxdps=1500) +test_asymp(sin, maxdps=5000, huge_range=True) +test_asymp(cos, maxdps=5000, huge_range=True) +test_asymp(tan, maxdps=1500) +test_asymp(agm, maxdps=1500, huge_range=True) +test_asymp(ellipk, maxdps=1500) +test_asymp(ellipe, maxdps=1500) +test_asymp(lambertw, huge_range=True) +test_asymp(lambda z: lambertw(z,-1)) +test_asymp(lambda z: lambertw(z,1)) +test_asymp(lambda z: lambertw(z,4)) +test_asymp(gamma) +test_asymp(loggamma) # huge_range=True ? +test_asymp(ei) +test_asymp(e1) +test_asymp(li, huge_range=True) +test_asymp(ci) +test_asymp(si) +test_asymp(chi) +test_asymp(shi) +test_asymp(erf) +test_asymp(erfc) +test_asymp(erfi) +test_asymp(lambda z: besselj(2, z)) +test_asymp(lambda z: bessely(2, z)) +test_asymp(lambda z: besseli(2, z)) +test_asymp(lambda z: besselk(2, z)) +test_asymp(lambda z: besselj(-2.25, z)) +test_asymp(lambda z: bessely(-2.25, z)) +test_asymp(lambda z: besseli(-2.25, z)) +test_asymp(lambda z: besselk(-2.25, z)) +test_asymp(airyai) +test_asymp(airybi) +test_asymp(lambda z: hyp0f1(a1, z)) +test_asymp(lambda z: hyp1f1(a1, a2, z)) +test_asymp(lambda z: hyp1f2(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f0(a1, a2, z)) +test_asymp(lambda z: hyperu(a1, a2, z)) +test_asymp(lambda z: hyp2f1(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f2(a1, a2, a3, a4, z)) +test_asymp(lambda z: hyp2f3(a1, a2, a3, a4, a5, z)) +test_asymp(lambda z: coulombf(a1, a2, z)) +test_asymp(lambda z: coulombg(a1, a2, z)) +test_asymp(lambda z: polylog(2,z)) +test_asymp(lambda z: polylog(3,z)) +test_asymp(lambda z: polylog(-2,z)) +test_asymp(lambda z: expint(4, z)) +test_asymp(lambda z: expint(-4, z)) +test_asymp(lambda z: expint(2.25, z)) +test_asymp(lambda z: gammainc(2.5, z, 5)) +test_asymp(lambda z: gammainc(2.5, 5, z)) +test_asymp(lambda z: hermite(3, z)) +test_asymp(lambda z: hermite(2.5, z)) +test_asymp(lambda z: legendre(3, z)) +test_asymp(lambda z: legendre(4, z)) +test_asymp(lambda z: legendre(2.5, z)) +test_asymp(lambda z: legenp(a1, a2, z)) +test_asymp(lambda z: legenq(a1, a2, z), maxdps=90) # abnormally slow +test_asymp(lambda z: jtheta(1, z, 0.5)) +test_asymp(lambda z: jtheta(2, z, 0.5)) +test_asymp(lambda z: jtheta(3, z, 0.5)) +test_asymp(lambda z: jtheta(4, z, 0.5)) +test_asymp(lambda z: jtheta(1, z, 0.5, 1)) +test_asymp(lambda z: jtheta(2, z, 0.5, 1)) +test_asymp(lambda z: jtheta(3, z, 0.5, 1)) +test_asymp(lambda z: jtheta(4, z, 0.5, 1)) +test_asymp(barnesg, maxdps=90) +""" + +def testit(line): + if filt in line: + print(line) + t1 = clock() + exec_(line, globals(), locals()) + t2 = clock() + elapsed = t2-t1 + print("Time:", elapsed, "for", line, "(OK)") + +if __name__ == '__main__': + try: + from multiprocessing import Pool + mapf = Pool(None).map + print("Running tests with multiprocessing") + except ImportError: + print("Not using multiprocessing") + mapf = map + t1 = clock() + tasks = cases.splitlines() + mapf(testit, tasks) + t2 = clock() + print("Cumulative wall time:", t2-t1) diff --git a/phivenv/Lib/site-packages/networkx/__init__.py b/phivenv/Lib/site-packages/networkx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eda9418b9866eecbb8304d5a665facca6a7545ee --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/__init__.py @@ -0,0 +1,49 @@ +""" +NetworkX +======== + +NetworkX is a Python package for the creation, manipulation, and study of the +structure, dynamics, and functions of complex networks. + +See https://networkx.org for complete documentation. +""" + +__version__ = "3.2.1" + + +# These are imported in order as listed +from networkx.lazy_imports import _lazy_import + +from networkx.exception import * + +from networkx import utils +from networkx.utils.backends import _dispatch + +from networkx import classes +from networkx.classes import filters +from networkx.classes import * + +from networkx import convert +from networkx.convert import * + +from networkx import convert_matrix +from networkx.convert_matrix import * + +from networkx import relabel +from networkx.relabel import * + +from networkx import generators +from networkx.generators import * + +from networkx import readwrite +from networkx.readwrite import * + +# Need to test with SciPy, when available +from networkx import algorithms +from networkx.algorithms import * + +from networkx import linalg +from networkx.linalg import * + +from networkx import drawing +from networkx.drawing import * diff --git a/phivenv/Lib/site-packages/networkx/algorithms/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db6d6cebb1900a636fc34822102bf0271f703952 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/__init__.py @@ -0,0 +1,132 @@ +from networkx.algorithms.assortativity import * +from networkx.algorithms.asteroidal import * +from networkx.algorithms.boundary import * +from networkx.algorithms.bridges import * +from networkx.algorithms.chains import * +from networkx.algorithms.centrality import * +from networkx.algorithms.chordal import * +from networkx.algorithms.cluster import * +from networkx.algorithms.clique import * +from networkx.algorithms.communicability_alg import * +from networkx.algorithms.components import * +from networkx.algorithms.coloring import * +from networkx.algorithms.core import * +from networkx.algorithms.covering import * +from networkx.algorithms.cycles import * +from networkx.algorithms.cuts import * +from networkx.algorithms.d_separation import * +from networkx.algorithms.dag import * +from networkx.algorithms.distance_measures import * +from networkx.algorithms.distance_regular import * +from networkx.algorithms.dominance import * +from networkx.algorithms.dominating import * +from networkx.algorithms.efficiency_measures import * +from networkx.algorithms.euler import * +from networkx.algorithms.graphical import * +from networkx.algorithms.hierarchy import * +from networkx.algorithms.hybrid import * +from networkx.algorithms.link_analysis import * +from networkx.algorithms.link_prediction import * +from networkx.algorithms.lowest_common_ancestors import * +from networkx.algorithms.isolate import * +from networkx.algorithms.matching import * +from networkx.algorithms.minors import * +from networkx.algorithms.mis import * +from networkx.algorithms.moral import * +from networkx.algorithms.non_randomness import * +from networkx.algorithms.operators import * +from networkx.algorithms.planarity import * +from networkx.algorithms.planar_drawing import * +from networkx.algorithms.reciprocity import * +from networkx.algorithms.regular import * +from networkx.algorithms.richclub import * +from networkx.algorithms.shortest_paths import * +from networkx.algorithms.similarity import * +from networkx.algorithms.graph_hashing import * +from networkx.algorithms.simple_paths import * +from networkx.algorithms.smallworld import * +from networkx.algorithms.smetric import * +from networkx.algorithms.structuralholes import * +from networkx.algorithms.sparsifiers import * +from networkx.algorithms.summarization import * +from networkx.algorithms.swap import * +from networkx.algorithms.time_dependent import * +from networkx.algorithms.traversal import * +from networkx.algorithms.triads import * +from networkx.algorithms.vitality import * +from networkx.algorithms.voronoi import * +from networkx.algorithms.walks import * +from networkx.algorithms.wiener import * +from networkx.algorithms.polynomials import * + +# Make certain subpackages available to the user as direct imports from +# the `networkx` namespace. +from networkx.algorithms import approximation +from networkx.algorithms import assortativity +from networkx.algorithms import bipartite +from networkx.algorithms import node_classification +from networkx.algorithms import centrality +from networkx.algorithms import chordal +from networkx.algorithms import cluster +from networkx.algorithms import clique +from networkx.algorithms import components +from networkx.algorithms import connectivity +from networkx.algorithms import community +from networkx.algorithms import coloring +from networkx.algorithms import flow +from networkx.algorithms import isomorphism +from networkx.algorithms import link_analysis +from networkx.algorithms import lowest_common_ancestors +from networkx.algorithms import operators +from networkx.algorithms import shortest_paths +from networkx.algorithms import tournament +from networkx.algorithms import traversal +from networkx.algorithms import tree + +# Make certain functions from some of the previous subpackages available +# to the user as direct imports from the `networkx` namespace. +from networkx.algorithms.bipartite import complete_bipartite_graph +from networkx.algorithms.bipartite import is_bipartite +from networkx.algorithms.bipartite import projected_graph +from networkx.algorithms.connectivity import all_pairs_node_connectivity +from networkx.algorithms.connectivity import all_node_cuts +from networkx.algorithms.connectivity import average_node_connectivity +from networkx.algorithms.connectivity import edge_connectivity +from networkx.algorithms.connectivity import edge_disjoint_paths +from networkx.algorithms.connectivity import k_components +from networkx.algorithms.connectivity import k_edge_components +from networkx.algorithms.connectivity import k_edge_subgraphs +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity import is_k_edge_connected +from networkx.algorithms.connectivity import minimum_edge_cut +from networkx.algorithms.connectivity import minimum_node_cut +from networkx.algorithms.connectivity import node_connectivity +from networkx.algorithms.connectivity import node_disjoint_paths +from networkx.algorithms.connectivity import stoer_wagner +from networkx.algorithms.flow import capacity_scaling +from networkx.algorithms.flow import cost_of_flow +from networkx.algorithms.flow import gomory_hu_tree +from networkx.algorithms.flow import max_flow_min_cost +from networkx.algorithms.flow import maximum_flow +from networkx.algorithms.flow import maximum_flow_value +from networkx.algorithms.flow import min_cost_flow +from networkx.algorithms.flow import min_cost_flow_cost +from networkx.algorithms.flow import minimum_cut +from networkx.algorithms.flow import minimum_cut_value +from networkx.algorithms.flow import network_simplex +from networkx.algorithms.isomorphism import could_be_isomorphic +from networkx.algorithms.isomorphism import fast_could_be_isomorphic +from networkx.algorithms.isomorphism import faster_could_be_isomorphic +from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.isomorphism.vf2pp import * +from networkx.algorithms.tree.branchings import maximum_branching +from networkx.algorithms.tree.branchings import maximum_spanning_arborescence +from networkx.algorithms.tree.branchings import minimum_branching +from networkx.algorithms.tree.branchings import minimum_spanning_arborescence +from networkx.algorithms.tree.branchings import ArborescenceIterator +from networkx.algorithms.tree.coding import * +from networkx.algorithms.tree.decomposition import * +from networkx.algorithms.tree.mst import * +from networkx.algorithms.tree.operations import * +from networkx.algorithms.tree.recognition import * +from networkx.algorithms.tournament import is_tournament diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e39dc00aa250b05cbd8f0ce9b38cf32ecc752946 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__init__.py @@ -0,0 +1,24 @@ +"""Approximations of graph properties and Heuristic methods for optimization. + +The functions in this class are not imported into the top-level ``networkx`` +namespace so the easiest way to use them is with:: + + >>> from networkx.algorithms import approximation + +Another option is to import the specific function with +``from networkx.algorithms.approximation import function_name``. + +""" +from networkx.algorithms.approximation.clustering_coefficient import * +from networkx.algorithms.approximation.clique import * +from networkx.algorithms.approximation.connectivity import * +from networkx.algorithms.approximation.distance_measures import * +from networkx.algorithms.approximation.dominating_set import * +from networkx.algorithms.approximation.kcomponents import * +from networkx.algorithms.approximation.matching import * +from networkx.algorithms.approximation.ramsey import * +from networkx.algorithms.approximation.steinertree import * +from networkx.algorithms.approximation.traveling_salesman import * +from networkx.algorithms.approximation.treewidth import * +from networkx.algorithms.approximation.vertex_cover import * +from networkx.algorithms.approximation.maxcut import * diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49b382acae7608d94ca7b9c839a0b4c0ccf1b68e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e433debd67f440c37b8794d679b7ea1f1e6a747 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..031daacf8a663996b362444bba4001f210d19eb7 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f209a647b5f8ecbae33ddcf1e6da3c321e325171 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4237a36a629789d70d12c91e5580b1800ede1bb6 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..936262c17500e98ff34c17f1b314fb1095350860 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee55b2248198d82b836d29b975b2e45f472aa96f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53499bc14398e565cc9bc8ba60377ed3af155b05 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8972e1a6d4bc9a743a364ba14b23b7b85439bdec Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..983486fad849ac600d96e3dbf54454f6a90ffc9a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e476910adf4917776bd41a39e324604b9cb07b8 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d385d979bbe1efe2daf777cf2757189de320481b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b39c942659d9b4047deb968ff19b047c9c11e0f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cca511cc925fa649783da86ba69e0a6e6107a6e3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/clique.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/clique.py new file mode 100644 index 0000000000000000000000000000000000000000..4a3d8beba6103172988f49aa1d7a91bf670f7201 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/clique.py @@ -0,0 +1,258 @@ +"""Functions for computing large cliques and maximum independent sets.""" +import networkx as nx +from networkx.algorithms.approximation import ramsey +from networkx.utils import not_implemented_for + +__all__ = [ + "clique_removal", + "max_clique", + "large_clique_size", + "maximum_independent_set", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def maximum_independent_set(G): + """Returns an approximate maximum independent set. + + Independent set or stable set is a set of vertices in a graph, no two of + which are adjacent. That is, it is a set I of vertices such that for every + two vertices in I, there is no edge connecting the two. Equivalently, each + edge in the graph has at most one endpoint in I. The size of an independent + set is the number of vertices it contains [1]_. + + A maximum independent set is a largest independent set for a given graph G + and its size is denoted $\\alpha(G)$. The problem of finding such a set is called + the maximum independent set problem and is an NP-hard optimization problem. + As such, it is unlikely that there exists an efficient algorithm for finding + a maximum independent set of a graph. + + The Independent Set algorithm is based on [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + iset : Set + The apx-maximum independent set + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.maximum_independent_set(G) + {0, 2, 4, 6, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case. + + References + ---------- + .. [1] `Wikipedia: Independent set + `_ + .. [2] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + iset, _ = clique_removal(G) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def max_clique(G): + r"""Find the Maximum Clique + + Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set + in the worst case. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + clique : set + The apx-maximum clique of the graph + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.max_clique(G) + {8, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + A clique in an undirected graph G = (V, E) is a subset of the vertex set + `C \subseteq V` such that for every two vertices in C there exists an edge + connecting the two. This is equivalent to saying that the subgraph + induced by C is complete (in some cases, the term clique may also refer + to the subgraph). + + A maximum clique is a clique of the largest possible size in a given graph. + The clique number `\omega(G)` of a graph G is the number of + vertices in a maximum clique in G. The intersection number of + G is the smallest number of cliques that together cover all edges of G. + + https://en.wikipedia.org/wiki/Maximum_clique + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + doi:10.1007/BF01994876 + """ + # finding the maximum clique in a graph is equivalent to finding + # the independent set in the complementary graph + cgraph = nx.complement(G) + iset, _ = clique_removal(cgraph) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def clique_removal(G): + r"""Repeatedly remove cliques from the graph. + + Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique + and independent set. Returns the largest independent set found, along + with found maximal cliques. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_ind_cliques : (set, list) tuple + 2-tuple of Maximal Independent Set and list of maximal cliques (sets). + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.clique_removal(G) + ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}]) + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + graph = G.copy() + c_i, i_i = ramsey.ramsey_R2(graph) + cliques = [c_i] + isets = [i_i] + while graph: + graph.remove_nodes_from(c_i) + c_i, i_i = ramsey.ramsey_R2(graph) + if c_i: + cliques.append(c_i) + if i_i: + isets.append(i_i) + # Determine the largest independent set as measured by cardinality. + maxiset = max(isets, key=len) + return maxiset, cliques + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def large_clique_size(G): + """Find the size of a large clique in a graph. + + A *clique* is a subset of nodes in which each pair of nodes is + adjacent. This function is a heuristic for finding the size of a + large clique in the graph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + k: integer + The size of a large clique in the graph. + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.large_clique_size(G) + 2 + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + This implementation is from [1]_. Its worst case time complexity is + :math:`O(n d^2)`, where *n* is the number of nodes in the graph and + *d* is the maximum degree. + + This function is a heuristic, which means it may work well in + practice, but there is no rigorous mathematical guarantee on the + ratio between the returned number and the actual largest clique size + in the graph. + + References + ---------- + .. [1] Pattabiraman, Bharath, et al. + "Fast Algorithms for the Maximum Clique Problem on Massive Graphs + with Applications to Overlapping Community Detection." + *Internet Mathematics* 11.4-5 (2015): 421--448. + + + See also + -------- + + :func:`networkx.algorithms.approximation.clique.max_clique` + A function that returns an approximate maximum clique with a + guarantee on the approximation ratio. + + :mod:`networkx.algorithms.clique` + Functions for finding the exact maximum clique in a graph. + + """ + degrees = G.degree + + def _clique_heuristic(G, U, size, best_size): + if not U: + return max(best_size, size) + u = max(U, key=degrees) + U.remove(u) + N_prime = {v for v in G[u] if degrees[v] >= best_size} + return _clique_heuristic(G, U & N_prime, size + 1, best_size) + + best_size = 0 + nodes = (u for u in G if degrees[u] >= best_size) + for u in nodes: + neighbors = {v for v in G[u] if degrees[v] >= best_size} + best_size = _clique_heuristic(G, neighbors, 1, best_size) + return best_size diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py new file mode 100644 index 0000000000000000000000000000000000000000..e15ac68460bb5704fc3bf3726f5a6d6405efa320 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py @@ -0,0 +1,66 @@ +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["average_clustering"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatch(name="approximate_average_clustering") +def average_clustering(G, trials=1000, seed=None): + r"""Estimates the average clustering coefficient of G. + + The local clustering of each node in `G` is the fraction of triangles + that actually exist over all possible triangles in its neighborhood. + The average clustering coefficient of a graph `G` is the mean of + local clusterings. + + This function finds an approximate average clustering coefficient + for G by repeating `n` times (defined in `trials`) the following + experiment: choose a node at random, choose two of its neighbors + at random, and check if they are connected. The approximate + coefficient is the fraction of triangles found over the number + of trials [1]_. + + Parameters + ---------- + G : NetworkX graph + + trials : integer + Number of trials to perform (default 1000). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + c : float + Approximated average clustering coefficient. + + Examples + -------- + >>> from networkx.algorithms import approximation + >>> G = nx.erdos_renyi_graph(10, 0.2, seed=10) + >>> approximation.average_clustering(G, trials=1000, seed=10) + 0.214 + + References + ---------- + .. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering + coefficient and transitivity. Universität Karlsruhe, Fakultät für + Informatik, 2004. + https://doi.org/10.5445/IR/1000001239 + + """ + n = len(G) + triangles = 0 + nodes = list(G) + for i in [int(seed.random() * n) for i in range(trials)]: + nbrs = list(G[nodes[i]]) + if len(nbrs) < 2: + continue + u, v = seed.sample(nbrs, 2) + if u in G[v]: + triangles += 1 + return triangles / trials diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5e7125937309e53ab3a2312434406447f5b0cd --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py @@ -0,0 +1,412 @@ +""" Fast approximation for node connectivity +""" +import itertools +from operator import itemgetter + +import networkx as nx + +__all__ = [ + "local_node_connectivity", + "node_connectivity", + "all_pairs_node_connectivity", +] + + +@nx._dispatch(name="approximate_local_node_connectivity") +def local_node_connectivity(G, source, target, cutoff=None): + """Compute node connectivity between source and target. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for node connectivity + + target : node + Ending node for node connectivity + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff. Default value None. + + Returns + ------- + k: integer + pairwise node connectivity + + Examples + -------- + >>> # Platonic octahedral graph has node connectivity 4 + >>> # for each non adjacent node pair + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.local_node_connectivity(G, 0, 5) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + Note that the authors propose a further refinement, losing accuracy and + gaining speed, which is not implemented yet. + + See also + -------- + all_pairs_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if target == source: + raise nx.NetworkXError("source and target have to be different nodes.") + + # Maximum possible node independent paths + if G.is_directed(): + possible = min(G.out_degree(source), G.in_degree(target)) + else: + possible = min(G.degree(source), G.degree(target)) + + K = 0 + if not possible: + return K + + if cutoff is None: + cutoff = float("inf") + + exclude = set() + for i in range(min(possible, cutoff)): + try: + path = _bidirectional_shortest_path(G, source, target, exclude) + exclude.update(set(path)) + K += 1 + except nx.NetworkXNoPath: + break + + return K + + +@nx._dispatch(name="approximate_node_connectivity") +def node_connectivity(G, s=None, t=None): + r"""Returns an approximation for node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. By Menger's theorem, + this is equal to the number of node independent paths (paths that + share no nodes other than source and target). + + If source and target nodes are provided, this function returns the + local node connectivity: the minimum number of nodes that must be + removed to break all paths from source to target in G. + + This algorithm is based on a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic octahedral graph is 4-node-connected + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.node_connectivity(G) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + See also + -------- + all_pairs_node_connectivity + local_node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t) + + # Global node connectivity + if G.is_directed(): + connected_func = nx.is_weakly_connected + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain(G.predecessors(v), G.successors(v)) + + else: + connected_func = nx.is_connected + iter_func = itertools.combinations + neighbors = G.neighbors + + if not connected_func(G): + return 0 + + # Choose a node with minimum degree + v, minimum_degree = min(G.degree(), key=itemgetter(1)) + # Node connectivity is bounded by minimum degree + K = minimum_degree + # compute local node connectivity with all non-neighbors nodes + # and store the minimum + for w in set(G) - set(neighbors(v)) - {v}: + K = min(K, local_node_connectivity(G, v, w, cutoff=K)) + # Same for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y not in G[x] and x != y: + K = min(K, local_node_connectivity(G, x, y, cutoff=K)) + return K + + +@nx._dispatch(name="approximate_all_pairs_node_connectivity") +def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): + """Compute node connectivity between all pairs of nodes. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + + Parameters + ---------- + G : NetworkX graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff in each pair of nodes. + Default value None. + + Returns + ------- + K : dictionary + Dictionary, keyed by source and target, of pairwise node connectivity + + Examples + -------- + A 3 node cycle with one extra node attached has connectivity 2 between all + nodes in the cycle and connectivity 1 between the extra node and the rest: + + >>> G = nx.cycle_graph(3) + >>> G.add_edge(2, 3) + >>> import pprint # for nice dictionary formatting + >>> pprint.pprint(nx.all_pairs_node_connectivity(G)) + {0: {1: 2, 2: 2, 3: 1}, + 1: {0: 2, 2: 2, 3: 1}, + 2: {0: 2, 1: 2, 3: 1}, + 3: {0: 1, 1: 1, 2: 1}} + + See Also + -------- + local_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + for u, v in iter_func(nbunch, 2): + k = local_node_connectivity(G, u, v, cutoff=cutoff) + all_pairs[u][v] = k + if not directed: + all_pairs[v][u] = k + + return all_pairs + + +def _bidirectional_shortest_path(G, source, target, exclude): + """Returns shortest path between source and target ignoring nodes in the + container 'exclude'. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + exclude: container + Container for nodes to exclude from the search for shortest paths + + Returns + ------- + path: list + Shortest path between source and target ignoring nodes in 'exclude' + + Raises + ------ + NetworkXNoPath + If there is no path or if nodes are adjacent and have only one path + between them + + Notes + ----- + This function and its helper are originally from + networkx.algorithms.shortest_paths.unweighted and are modified to + accept the extra parameter 'exclude', which is a container for nodes + already used in other paths that should be ignored. + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, exclude) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target, exclude): + # does BFS from both source and target and meets in the middle + # excludes nodes in the container "exclude" from the search + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + level = 0 + + while forward_fringe and reverse_fringe: + # Make sure that we iterate one step forward and one step backwards + # thus source and target will only trigger "found path" when they are + # adjacent and then they can be safely included in the container 'exclude' + level += 1 + if level % 2 != 0: + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w in exclude: + continue + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + return pred, succ, w # found path + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w in exclude: + continue + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + return pred, succ, w # found path + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..9b817b3317c8d793a66c44b1ae98f29417fb777f --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py @@ -0,0 +1,141 @@ +"""Distance measures approximated metrics.""" + +import networkx as nx +from networkx.utils.decorators import py_random_state + +__all__ = ["diameter"] + + +@py_random_state(1) +@nx._dispatch(name="approximate_diameter") +def diameter(G, seed=None): + """Returns a lower bound on the diameter of the graph G. + + The function computes a lower bound on the diameter (i.e., the maximum eccentricity) + of a directed or undirected graph G. The procedure used varies depending on the graph + being directed or not. + + If G is an `undirected` graph, then the function uses the `2-sweep` algorithm [1]_. + The main idea is to pick the farthest node from a random node and return its eccentricity. + + Otherwise, if G is a `directed` graph, the function uses the `2-dSweep` algorithm [2]_, + The procedure starts by selecting a random source node $s$ from which it performs a + forward and a backward BFS. Let $a_1$ and $a_2$ be the farthest nodes in the forward and + backward cases, respectively. Then, it computes the backward eccentricity of $a_1$ using + a backward BFS and the forward eccentricity of $a_2$ using a forward BFS. + Finally, it returns the best lower bound between the two. + + In both cases, the time complexity is linear with respect to the size of G. + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + d : integer + Lower Bound on the Diameter of G + + Raises + ------ + NetworkXError + If the graph is empty or + If the graph is undirected and not connected or + If the graph is directed and not strongly connected. + + See Also + -------- + networkx.algorithms.distance_measures.diameter + + References + ---------- + .. [1] Magnien, Clémence, Matthieu Latapy, and Michel Habib. + *Fast computation of empirically tight bounds for the diameter of massive graphs.* + Journal of Experimental Algorithmics (JEA), 2009. + https://arxiv.org/pdf/0904.2728.pdf + .. [2] Crescenzi, Pierluigi, Roberto Grossi, Leonardo Lanzi, and Andrea Marino. + *On computing the diameter of real-world directed (weighted) graphs.* + International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012. + https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf + """ + # if G is empty + if not G: + raise nx.NetworkXError("Expected non-empty NetworkX graph!") + # if there's only a node + if G.number_of_nodes() == 1: + return 0 + # if G is directed + if G.is_directed(): + return _two_sweep_directed(G, seed) + # else if G is undirected + return _two_sweep_undirected(G, seed) + + +def _two_sweep_undirected(G, seed): + """Helper function for finding a lower bound on the diameter + for undirected Graphs. + + The idea is to pick the farthest node from a random node + and return its eccentricity. + + ``G`` is a NetworkX undirected graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # select a random source node + source = seed.choice(list(G)) + # get the distances to the other nodes + distances = nx.shortest_path_length(G, source) + # if some nodes have not been visited, then the graph is not connected + if len(distances) != len(G): + raise nx.NetworkXError("Graph not connected.") + # take a node that is (one of) the farthest nodes from the source + *_, node = distances + # return the eccentricity of the node + return nx.eccentricity(G, node) + + +def _two_sweep_directed(G, seed): + """Helper function for finding a lower bound on the diameter + for directed Graphs. + + It implements 2-dSweep, the directed version of the 2-sweep algorithm. + The algorithm follows the following steps. + 1. Select a source node $s$ at random. + 2. Perform a forward BFS from $s$ to select a node $a_1$ at the maximum + distance from the source, and compute $LB_1$, the backward eccentricity of $a_1$. + 3. Perform a backward BFS from $s$ to select a node $a_2$ at the maximum + distance from the source, and compute $LB_2$, the forward eccentricity of $a_2$. + 4. Return the maximum between $LB_1$ and $LB_2$. + + ``G`` is a NetworkX directed graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # get a new digraph G' with the edges reversed in the opposite direction + G_reversed = G.reverse() + # select a random source node + source = seed.choice(list(G)) + # compute forward distances from source + forward_distances = nx.shortest_path_length(G, source) + # compute backward distances from source + backward_distances = nx.shortest_path_length(G_reversed, source) + # if either the source can't reach every node or not every node + # can reach the source, then the graph is not strongly connected + n = len(G) + if len(forward_distances) != n or len(backward_distances) != n: + raise nx.NetworkXError("DiGraph not strongly connected.") + # take a node a_1 at the maximum distance from the source in G + *_, a_1 = forward_distances + # take a node a_2 at the maximum distance from the source in G_reversed + *_, a_2 = backward_distances + # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2 + return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py new file mode 100644 index 0000000000000000000000000000000000000000..97edb172f94bf0bd078b5f92b05c2f757ae79d8c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py @@ -0,0 +1,126 @@ +"""Functions for finding node and edge dominating sets. + +A `dominating set`_ for an undirected graph *G* with vertex set *V* +and edge set *E* is a subset *D* of *V* such that every vertex not in +*D* is adjacent to at least one member of *D*. An `edge dominating set`_ +is a subset *F* of *E* such that every edge not in *F* is +incident to an endpoint of at least one edge in *F*. + +.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set +.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set + +""" +import networkx as nx + +from ...utils import not_implemented_for +from ..matching import maximal_matching + +__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"] + + +# TODO Why doesn't this algorithm work for directed graphs? +@not_implemented_for("directed") +@nx._dispatch(node_attrs="weight") +def min_weighted_dominating_set(G, weight=None): + r"""Returns a dominating set that approximates the minimum weight node + dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + weight : string + The node attribute storing the weight of an node. If provided, + the node attribute with this key must be a number for each + node. If not provided, each node is assumed to have weight one. + + Returns + ------- + min_weight_dominating_set : set + A set of nodes, the sum of whose weights is no more than `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of + each node in the graph and `w(V^*)` denotes the sum of the + weights of each node in the minimum weight dominating set. + + Notes + ----- + This algorithm computes an approximate minimum weighted dominating + set for the graph `G`. The returned solution has weight `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each + node in the graph and `w(V^*)` denotes the sum of the weights of + each node in the minimum weight dominating set for the graph. + + This implementation of the algorithm runs in $O(m)$ time, where $m$ + is the number of edges in the graph. + + References + ---------- + .. [1] Vazirani, Vijay V. + *Approximation Algorithms*. + Springer Science & Business Media, 2001. + + """ + # The unique dominating set for the null graph is the empty set. + if len(G) == 0: + return set() + + # This is the dominating set that will eventually be returned. + dom_set = set() + + def _cost(node_and_neighborhood): + """Returns the cost-effectiveness of greedily choosing the given + node. + + `node_and_neighborhood` is a two-tuple comprising a node and its + closed neighborhood. + + """ + v, neighborhood = node_and_neighborhood + return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set) + + # This is a set of all vertices not already covered by the + # dominating set. + vertices = set(G) + # This is a dictionary mapping each node to the closed neighborhood + # of that node. + neighborhoods = {v: {v} | set(G[v]) for v in G} + + # Continue until all vertices are adjacent to some node in the + # dominating set. + while vertices: + # Find the most cost-effective node to add, along with its + # closed neighborhood. + dom_node, min_set = min(neighborhoods.items(), key=_cost) + # Add the node to the dominating set and reduce the remaining + # set of nodes to cover. + dom_set.add(dom_node) + del neighborhoods[dom_node] + vertices -= min_set + + return dom_set + + +@nx._dispatch +def min_edge_dominating_set(G): + r"""Returns minimum cardinality edge dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_edge_dominating_set : set + Returns a set of dominating edges whose size is no more than 2 * OPT. + + Notes + ----- + The algorithm computes an approximate solution to the edge dominating set + problem. The result is no more than 2 * OPT in terms of size of the set. + Runtime of the algorithm is $O(|E|)$. + """ + if not G: + raise ValueError("Expected non-empty NetworkX graph!") + return maximal_matching(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..a5df6cc686c381c954b056e242b10ab8b7a164f5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py @@ -0,0 +1,369 @@ +""" Fast approximation for k-component structure +""" +import itertools +from collections import defaultdict +from collections.abc import Mapping +from functools import cached_property + +import networkx as nx +from networkx.algorithms.approximation import local_node_connectivity +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +@nx._dispatch(name="approximate_k_components") +def k_components(G, min_density=0.95): + r"""Returns the approximate k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + This implementation is based on the fast heuristics to approximate + the `k`-component structure of a graph [1]_. Which, in turn, it is based on + a fast approximation algorithm for finding good lower bounds of the number + of node independent paths between two nodes [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + min_density : Float + Density relaxation threshold. Default value 0.95 + + Returns + ------- + k_components : dict + Dictionary with connectivity level `k` as key and a list of + sets of nodes that form a k-component of level `k` as values. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> from networkx.algorithms import approximation as apxa + >>> G = nx.petersen_graph() + >>> k_components = apxa.k_components(G) + + Notes + ----- + The logic of the approximation algorithm for computing the `k`-component + structure [1]_ is based on repeatedly applying simple and fast algorithms + for `k`-cores and biconnected components in order to narrow down the + number of pairs of nodes over which we have to compute White and Newman's + approximation algorithm for finding node independent paths [2]_. More + formally, this algorithm is based on Whitney's theorem, which states + an inclusion relation among node connectivity, edge connectivity, and + minimum degree for any graph G. This theorem implies that every + `k`-component is nested inside a `k`-edge-component, which in turn, + is contained in a `k`-core. Thus, this algorithm computes node independent + paths among pairs of nodes in each biconnected part of each `k`-core, + and repeats this procedure for each `k` from 3 to the maximal core number + of a node in the input graph. + + Because, in practice, many nodes of the core of level `k` inside a + bicomponent actually are part of a component of level k, the auxiliary + graph needed for the algorithm is likely to be very dense. Thus, we use + a complement graph data structure (see `AntiGraph`) to save memory. + AntiGraph only stores information of the edges that are *not* present + in the actual auxiliary graph. When applying algorithms to this + complement graph data structure, it behaves as if it were the dense + version. + + See also + -------- + k_components + + References + ---------- + .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + https://www.santafe.edu/research/results/working-papers/fast-approximation-algorithms-for-finding-node-ind + + .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + https://doi.org/10.2307/3088904 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values + k_components = defaultdict(list) + # make a few functions local for speed + node_connectivity = local_node_connectivity + k_core = nx.k_core + core_number = nx.core_number + biconnected_components = nx.biconnected_components + combinations = itertools.combinations + # Exact solution for k = {1,2} + # There is a linear time algorithm for triconnectivity, if we had an + # implementation available we could start from k = 4. + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + for bicomponent in nx.biconnected_components(G): + # avoid considering dyads as bicomponents + bicomp = set(bicomponent) + if len(bicomp) > 2: + k_components[2].append(bicomp) + # There is no k-component of k > maximum core number + # \kappa(G) <= \lambda(G) <= \delta(G) + g_cnumber = core_number(G) + max_core = max(g_cnumber.values()) + for k in range(3, max_core + 1): + C = k_core(G, k, core_number=g_cnumber) + for nodes in biconnected_components(C): + # Build a subgraph SG induced by the nodes that are part of + # each biconnected component of the k-core subgraph C. + if len(nodes) < k: + continue + SG = G.subgraph(nodes) + # Build auxiliary graph + H = _AntiGraph() + H.add_nodes_from(SG.nodes()) + for u, v in combinations(SG, 2): + K = node_connectivity(SG, u, v, cutoff=k) + if k > K: + H.add_edge(u, v) + for h_nodes in biconnected_components(H): + if len(h_nodes) <= k: + continue + SH = H.subgraph(h_nodes) + for Gc in _cliques_heuristic(SG, SH, k, min_density): + for k_nodes in biconnected_components(Gc): + Gk = nx.k_core(SG.subgraph(k_nodes), k) + if len(Gk) <= k: + continue + k_components[k].append(set(Gk)) + return k_components + + +def _cliques_heuristic(G, H, k, min_density): + h_cnumber = nx.core_number(H) + for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)): + cands = {n for n, c in h_cnumber.items() if c == c_value} + # Skip checking for overlap for the highest core value + if i == 0: + overlap = False + else: + overlap = set.intersection( + *[{x for x in H[n] if x not in cands} for n in cands] + ) + if overlap and len(overlap) < k: + SH = H.subgraph(cands | overlap) + else: + SH = H.subgraph(cands) + sh_cnumber = nx.core_number(SH) + SG = nx.k_core(G.subgraph(SH), k) + while not (_same(sh_cnumber) and nx.density(SH) >= min_density): + # This subgraph must be writable => .copy() + SH = H.subgraph(SG).copy() + if len(SH) <= k: + break + sh_cnumber = nx.core_number(SH) + sh_deg = dict(SH.degree()) + min_deg = min(sh_deg.values()) + SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg) + SG = nx.k_core(G.subgraph(SH), k) + else: + yield SG + + +def _same(measure, tol=0): + vals = set(measure.values()) + if (max(vals) - min(vals)) <= tol: + return True + return False + + +class _AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory footprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. In this + case we only use k-core, connected_components, and biconnected_components. + """ + + all_edge_dict = {"weight": 1} + + def single_edge_dict(self): + return self.all_edge_dict + + edge_attr_dict_factory = single_edge_dict # type: ignore[assignment] + + def __getitem__(self, n): + """Returns a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + all_edge_dict = self.all_edge_dict + return { + node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n} + } + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n in the + dense graph. + """ + try: + return iter(set(self._adj) - set(self._adj[n]) - {n}) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + class AntiAtlasView(Mapping): + """An adjacency inner dict for AntiGraph""" + + def __init__(self, graph, node): + self._graph = graph + self._atlas = graph._adj[node] + self._node = node + + def __len__(self): + return len(self._graph) - len(self._atlas) - 1 + + def __iter__(self): + return (n for n in self._graph if n not in self._atlas and n != self._node) + + def __getitem__(self, nbr): + nbrs = set(self._graph._adj) - set(self._atlas) - {self._node} + if nbr in nbrs: + return self._graph.all_edge_dict + raise KeyError(nbr) + + class AntiAdjacencyView(AntiAtlasView): + """An adjacency outer dict for AntiGraph""" + + def __init__(self, graph): + self._graph = graph + self._atlas = graph._adj + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._graph) + + def __getitem__(self, node): + if node not in self._graph: + raise KeyError(node) + return self._graph.AntiAtlasView(self._graph, node) + + @cached_property + def adj(self): + return self.AntiAdjacencyView(self) + + def subgraph(self, nodes): + """This subgraph method returns a full AntiGraph. Not a View""" + nodes = set(nodes) + G = _AntiGraph() + G.add_nodes_from(nodes) + for n in G: + Gnbrs = G.adjlist_inner_dict_factory() + G._adj[n] = Gnbrs + for nbr, d in self._adj[n].items(): + if nbr in G._adj: + Gnbrs[nbr] = d + G._adj[nbr][n] = d + G.graph = self.graph + return G + + class AntiDegreeView(nx.reportviews.DegreeView): + def __iter__(self): + all_nodes = set(self._succ) + for n in self._nodes: + nbrs = all_nodes - set(self._succ[n]) - {n} + yield (n, len(nbrs)) + + def __getitem__(self, n): + nbrs = set(self._succ) - set(self._succ[n]) - {n} + # AntiGraph is a ThinGraph so all edges have weight 1 + return len(nbrs) + (n in nbrs) + + @cached_property + def degree(self): + """Returns an iterator for (node, degree) and degree for single node. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + deg: + Degree of the node, if a single node is passed as argument. + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return self.AntiDegreeView(self) + + def adjacency(self): + """Returns an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self._adj: + yield (n, set(self._adj) - set(self._adj[n]) - {n}) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/matching.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..8f1c35016665a0b22d51c4d9d66fe8bf3a8a593a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/matching.py @@ -0,0 +1,43 @@ +""" +************** +Graph Matching +************** + +Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent +edges; that is, no two edges share a common vertex. + +`Wikipedia: Matching `_ +""" +import networkx as nx + +__all__ = ["min_maximal_matching"] + + +@nx._dispatch +def min_maximal_matching(G): + r"""Returns the minimum maximal matching of G. That is, out of all maximal + matchings of the graph G, the smallest is returned. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_maximal_matching : set + Returns a set of edges such that no two edges share a common endpoint + and every edge not in the set shares some common endpoint in the set. + Cardinality will be 2*OPT in the worst case. + + Notes + ----- + The algorithm computes an approximate solution for the minimum maximal + cardinality matching problem. The solution is no more than 2 * OPT in size. + Runtime is $O(|E|)$. + + References + ---------- + .. [1] Vazirani, Vijay Approximation Algorithms (2001) + """ + return nx.maximal_matching(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py new file mode 100644 index 0000000000000000000000000000000000000000..ec62b346bb499ce7c1fff499b7482eaa74240382 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py @@ -0,0 +1,113 @@ +import networkx as nx +from networkx.utils.decorators import not_implemented_for, py_random_state + +__all__ = ["randomized_partitioning", "one_exchange"] + + +@not_implemented_for("directed", "multigraph") +@py_random_state(1) +@nx._dispatch(edge_attrs="weight") +def randomized_partitioning(G, seed=None, p=0.5, weight=None): + """Compute a random partitioning of the graph nodes and its cut value. + + A partitioning is calculated by observing each node + and deciding to add it to the partition with probability `p`, + returning a random cut and its corresponding value (the + sum of weights of edges connecting different partitions). + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + p : scalar + Probability for each node to be part of the first partition. + Should be in [0,1] + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_size : scalar + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + """ + cut = {node for node in G.nodes() if seed.random() < p} + cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + partition = (cut, G.nodes - cut) + return cut_size, partition + + +def _swap_node_partition(cut, node): + return cut - {node} if node in cut else cut.union({node}) + + +@not_implemented_for("directed", "multigraph") +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def one_exchange(G, initial_cut=None, seed=None, weight=None): + """Compute a partitioning of the graphs nodes and the corresponding cut value. + + Use a greedy one exchange strategy to find a locally maximal cut + and its value, it works by finding the best node (one that gives + the highest gain to the cut value) to add to the current cut + and repeats this process until no improvement can be made. + + Parameters + ---------- + G : networkx Graph + Graph to find a maximum cut for. + + initial_cut : set + Cut to use as a starting point. If not supplied the algorithm + starts with an empty cut. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_value : scalar + Value of the maximum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a maximum cut. + """ + if initial_cut is None: + initial_cut = set() + cut = set(initial_cut) + current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + while True: + nodes = list(G.nodes()) + # Shuffling the nodes ensures random tie-breaks in the following call to max + seed.shuffle(nodes) + best_node_to_swap = max( + nodes, + key=lambda v: nx.algorithms.cut_size( + G, _swap_node_partition(cut, v), weight=weight + ), + default=None, + ) + potential_cut = _swap_node_partition(cut, best_node_to_swap) + potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight) + + if potential_cut_size > current_cut_size: + cut = potential_cut + current_cut_size = potential_cut_size + else: + break + + partition = (cut, G.nodes - cut) + return current_cut_size, partition diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py new file mode 100644 index 0000000000000000000000000000000000000000..6f45c4f49717a1fe77da7b75d974eb5a55546642 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py @@ -0,0 +1,52 @@ +""" +Ramsey numbers. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = ["ramsey_R2"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def ramsey_R2(G): + r"""Compute the largest clique and largest independent set in `G`. + + This can be used to estimate bounds for the 2-color + Ramsey number `R(2;s,t)` for `G`. + + This is a recursive implementation which could run into trouble + for large recursions. Note that self-loop edges are ignored. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_pair : (set, set) tuple + Maximum clique, Maximum independent set. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + if not G: + return set(), set() + + node = arbitrary_element(G) + nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node) + nnbrs = nx.non_neighbors(G, node) + c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy()) + c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy()) + + c_1.add(node) + i_2.add(node) + # Choose the larger of the two cliques and the larger of the two + # independent sets, according to cardinality. + return max(c_1, c_2, key=len), max(i_1, i_2, key=len) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py new file mode 100644 index 0000000000000000000000000000000000000000..50aea045feae0e1ee4b509a5d6038e43913dec13 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py @@ -0,0 +1,220 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["metric_closure", "steiner_tree"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def metric_closure(G, weight="weight"): + """Return the metric closure of a graph. + + The metric closure of a graph *G* is the complete graph in which each edge + is weighted by the shortest path distance between the nodes in *G* . + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + NetworkX graph + Metric closure of the graph `G`. + + """ + M = nx.Graph() + + Gnodes = set(G) + + # check for connected graph while processing first node + all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight) + u, (distance, path) = next(all_paths_iter) + if Gnodes - set(distance): + msg = "G is not a connected graph. metric_closure is not defined." + raise nx.NetworkXError(msg) + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + # first node done -- now process the rest + for u, (distance, path) in all_paths_iter: + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + return M + + +def _mehlhorn_steiner_tree(G, terminal_nodes, weight): + paths = nx.multi_source_dijkstra_path(G, terminal_nodes) + + d_1 = {} + s = {} + for v in G.nodes(): + s[v] = paths[v][0] + d_1[(v, s[v])] = len(paths[v]) - 1 + + # G1-G4 names match those from the Mehlhorn 1988 paper. + G_1_prime = nx.Graph() + for u, v, data in G.edges(data=True): + su, sv = s[u], s[v] + weight_here = d_1[(u, su)] + data.get(weight, 1) + d_1[(v, sv)] + if not G_1_prime.has_edge(su, sv): + G_1_prime.add_edge(su, sv, weight=weight_here) + else: + new_weight = min(weight_here, G_1_prime[su][sv][weight]) + G_1_prime.add_edge(su, sv, weight=new_weight) + + G_2 = nx.minimum_spanning_edges(G_1_prime, data=True) + + G_3 = nx.Graph() + for u, v, d in G_2: + path = nx.shortest_path(G, u, v, weight) + for n1, n2 in pairwise(path): + G_3.add_edge(n1, n2) + + G_3_mst = list(nx.minimum_spanning_edges(G_3, data=False)) + if G.is_multigraph(): + G_3_mst = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in G_3_mst + ) + G_4 = G.edge_subgraph(G_3_mst).copy() + _remove_nonterminal_leaves(G_4, terminal_nodes) + return G_4.edges() + + +def _kou_steiner_tree(G, terminal_nodes, weight): + # H is the subgraph induced by terminal_nodes in the metric closure M of G. + M = metric_closure(G, weight=weight) + H = M.subgraph(terminal_nodes) + + # Use the 'distance' attribute of each edge provided by M. + mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True) + + # Create an iterator over each edge in each shortest path; repeats are okay + mst_all_edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges) + if G.is_multigraph(): + mst_all_edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) + for u, v in mst_all_edges + ) + + # Find the MST again, over this new set of edges + G_S = G.edge_subgraph(mst_all_edges) + T_S = nx.minimum_spanning_edges(G_S, weight="weight", data=False) + + # Leaf nodes that are not terminal might still remain; remove them here + T_H = G.edge_subgraph(T_S).copy() + _remove_nonterminal_leaves(T_H, terminal_nodes) + + return T_H.edges() + + +def _remove_nonterminal_leaves(G, terminals): + terminals_set = set(terminals) + for n in list(G.nodes): + if n not in terminals_set and G.degree(n) == 1: + G.remove_node(n) + + +ALGORITHMS = { + "kou": _kou_steiner_tree, + "mehlhorn": _mehlhorn_steiner_tree, +} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def steiner_tree(G, terminal_nodes, weight="weight", method=None): + r"""Return an approximation to the minimum Steiner tree of a graph. + + The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*) + is a tree within `G` that spans those nodes and has minimum size (sum of + edge weights) among all such trees. + + The approximation algorithm is specified with the `method` keyword + argument. All three available algorithms produce a tree whose weight is + within a ``(2 - (2 / l))`` factor of the weight of the optimal Steiner tree, + where ``l`` is the minimum number of leaf nodes across all possible Steiner + trees. + + * ``"kou"`` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of + the subgraph of the metric closure of *G* induced by the terminal nodes, + where the metric closure of *G* is the complete graph in which each edge is + weighted by the shortest path distance between the nodes in *G*. + + * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s + algorithm, beginning by finding the closest terminal node for each + non-terminal. This data is used to create a complete graph containing only + the terminal nodes, in which edge is weighted with the shortest path + distance between them. The algorithm then proceeds in the same way as Kou + et al.. + + Parameters + ---------- + G : NetworkX graph + + terminal_nodes : list + A list of terminal nodes for which minimum steiner tree is + to be found. + + weight : string (default = 'weight') + Use the edge attribute specified by this string as the edge weight. + Any edge attribute not present defaults to 1. + + method : string, optional (default = 'kou') + The algorithm to use to approximate the Steiner tree. + Supported options: 'kou', 'mehlhorn'. + Other inputs produce a ValueError. + + Returns + ------- + NetworkX graph + Approximation to the minimum steiner tree of `G` induced by + `terminal_nodes` . + + Notes + ----- + For multigraphs, the edge between two nodes with minimum weight is the + edge put into the Steiner tree. + + + References + ---------- + .. [1] Steiner_tree_problem on Wikipedia. + https://en.wikipedia.org/wiki/Steiner_tree_problem + .. [2] Kou, L., G. Markowsky, and L. Berman. 1981. + ‘A Fast Algorithm for Steiner Trees’. + Acta Informatica 15 (2): 141–45. + https://doi.org/10.1007/BF00288961. + .. [3] Mehlhorn, Kurt. 1988. + ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’. + Information Processing Letters 27 (3): 125–28. + https://doi.org/10.1016/0020-0190(88)90066-X. + """ + if method is None: + import warnings + + msg = ( + "steiner_tree will change default method from 'kou' to 'mehlhorn' " + "in version 3.2.\nSet the `method` kwarg to remove this warning." + ) + warnings.warn(msg, FutureWarning, stacklevel=4) + method = "kou" + + try: + algo = ALGORITHMS[method] + except KeyError as e: + msg = f"{method} is not a valid choice for an algorithm." + raise ValueError(msg) from e + + edges = algo(G, terminal_nodes, weight) + # For multigraph we should add the minimal weight edge keys + if G.is_multigraph(): + edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges + ) + T = G.edge_subgraph(edges) + return T diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f6e3ec89adcd583d654662c69180a31d0cfe92c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d7e0f19d3c6fd6e0c0204b5d4557789ad782335 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f36591de8c31e0ff74a5ac7dc0e19f21ed6e5a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d4b51ebe95b1472f3ad9d6a433eb82badc50edb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c2be13a3ebf14eec8da8240dae5bc263fb4f227 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c81e73d466e2a7e67af3b4e20abcb369e38624 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b62ac740921d23d491f3103f27562d7b2d42b3c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b05c3a535d3305ce790bb989eac902958daf057 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef0d622985f70959ec104fe2f33ff212ed8789cd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1820c5f1b42dcb1f7ee049506508c9c5386cbccb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f8c149415155a253b9dbb0c95232b47afc5170a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d68e040e214920c725632d200ac70f2ab70befee Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d56f4c42a090ee1146375a7e52378a7844c6804 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..143ae0dd0bfc191124ddbc30186d4ce12ec306b7 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py new file mode 100644 index 0000000000000000000000000000000000000000..5eab5c1ee79408c9f90a1993415a6c3d7d957141 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py @@ -0,0 +1,41 @@ +import networkx as nx +from networkx.algorithms.approximation import average_clustering + +# This approximation has to be exact in regular graphs +# with no triangles or with all possible triangles. + + +def test_petersen(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_petersen_seed(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2, seed=1) == nx.average_clustering(G) + + +def test_tetrahedral(): + # Actual coefficient is 1 + G = nx.tetrahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_dodecahedral(): + # Actual coefficient is 0 + G = nx.dodecahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_empty(): + G = nx.empty_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 0 + + +def test_complete(): + G = nx.complete_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 1 + G = nx.complete_graph(7) + assert average_clustering(G, trials=len(G) // 2) == 1 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py new file mode 100644 index 0000000000000000000000000000000000000000..ebda285b7d8c887a37cc7064cb41a10acdb074d5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.clique` module.""" + + +import networkx as nx +from networkx.algorithms.approximation import ( + clique_removal, + large_clique_size, + max_clique, + maximum_independent_set, +) + + +def is_independent_set(G, nodes): + """Returns True if and only if `nodes` is a clique in `G`. + + `G` is a NetworkX graph. `nodes` is an iterable of nodes in + `G`. + + """ + return G.subgraph(nodes).number_of_edges() == 0 + + +def is_clique(G, nodes): + """Returns True if and only if `nodes` is an independent set + in `G`. + + `G` is an undirected simple graph. `nodes` is an iterable of + nodes in `G`. + + """ + H = G.subgraph(nodes) + n = len(H) + return H.number_of_edges() == n * (n - 1) // 2 + + +class TestCliqueRemoval: + """Unit tests for the + :func:`~networkx.algorithms.approximation.clique_removal` function. + + """ + + def test_trivial_graph(self): + G = nx.trivial_graph() + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + # In fact, we should only have 1-cliques, that is, singleton nodes. + assert all(len(clique) == 1 for clique in cliques) + + def test_complete_graph(self): + G = nx.complete_graph(10) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + def test_barbell_graph(self): + G = nx.barbell_graph(10, 5) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + +class TestMaxClique: + """Unit tests for the :func:`networkx.algorithms.approximation.max_clique` + function. + + """ + + def test_null_graph(self): + G = nx.null_graph() + assert len(max_clique(G)) == 0 + + def test_complete_graph(self): + graph = nx.complete_graph(30) + # this should return the entire graph + mc = max_clique(graph) + assert 30 == len(mc) + + def test_maximal_by_cardinality(self): + """Tests that the maximal clique is computed according to maximum + cardinality of the sets. + + For more information, see pull request #1531. + + """ + G = nx.complete_graph(5) + G.add_edge(4, 5) + clique = max_clique(G) + assert len(clique) > 1 + + G = nx.lollipop_graph(30, 2) + clique = max_clique(G) + assert len(clique) > 2 + + +def test_large_clique_size(): + G = nx.complete_graph(9) + nx.add_cycle(G, [9, 10, 11]) + G.add_edge(8, 9) + G.add_edge(1, 12) + G.add_node(13) + + assert large_clique_size(G) == 9 + G.remove_node(5) + assert large_clique_size(G) == 8 + G.remove_edge(2, 3) + assert large_clique_size(G) == 7 + + +def test_independent_set(): + # smoke test + G = nx.Graph() + assert len(maximum_independent_set(G)) == 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..887db20bcaef8dd2641c64e963c789234aecbb20 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py @@ -0,0 +1,199 @@ +import pytest + +import networkx as nx +from networkx.algorithms import approximation as approx + + +def test_global_node_connectivity(): + # Figure 1 chapter on Connectivity + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + assert 2 == approx.local_node_connectivity(G, 1, 11) + assert 2 == approx.node_connectivity(G) + assert 2 == approx.node_connectivity(G, 1, 11) + + +def test_white_harary1(): + # Figure 1b white and harary (2001) + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + assert 1 == approx.node_connectivity(G) + + +def test_complete_graphs(): + for n in range(5, 25, 5): + G = nx.complete_graph(n) + assert n - 1 == approx.node_connectivity(G) + assert n - 1 == approx.node_connectivity(G, 0, 3) + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + assert 0 == approx.node_connectivity(G) + assert 0 == approx.node_connectivity(G, 0, 3) + + +def test_petersen(): + G = nx.petersen_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +# Approximation fails with tutte graph +# def test_tutte(): +# G = nx.tutte_graph() +# assert_equal(3, approx.node_connectivity(G)) + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +def test_octahedral(): + G = nx.octahedral_graph() + assert 4 == approx.node_connectivity(G) + assert 4 == approx.node_connectivity(G, 0, 5) + + +# Approximation can fail with icosahedral graph depending +# on iteration order. +# def test_icosahedral(): +# G=nx.icosahedral_graph() +# assert_equal(5, approx.node_connectivity(G)) +# assert_equal(5, approx.node_connectivity(G, 0, 5)) + + +def test_only_source(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0) + + +def test_only_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0) + + +def test_missing_source(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1) + + +def test_missing_target(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10) + + +def test_source_equals_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0) + + +def test_directed_node_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + assert 1 == approx.node_connectivity(G) + assert 1 == approx.node_connectivity(G, 1, 4) + assert 2 == approx.node_connectivity(D) + assert 2 == approx.node_connectivity(D, 1, 4) + + +class TestAllPairsNodeConnectivityApprox: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = approx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = approx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = approx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = approx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = approx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_cutoff(self): + for G in [self.K10, self.K5, self.K20]: + for mp in [2, 3, 4]: + paths = approx.all_pairs_node_connectivity(G, cutoff=mp) + for source in paths: + for target, K in paths[source].items(): + assert K == mp + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = approx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..81251503c5d55a6a2d50071414ecc6e1e8cc8a67 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py @@ -0,0 +1,60 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.distance_measures` module. +""" + +import pytest + +import networkx as nx +from networkx.algorithms.approximation import diameter + + +class TestDiameter: + """Unit tests for the approximate diameter function + :func:`~networkx.algorithms.approximation.distance_measures.diameter`. + """ + + def test_null_graph(self): + """Test empty graph.""" + G = nx.null_graph() + with pytest.raises( + nx.NetworkXError, match="Expected non-empty NetworkX graph!" + ): + diameter(G) + + def test_undirected_non_connected(self): + """Test an undirected disconnected graph.""" + graph = nx.path_graph(10) + graph.remove_edge(3, 4) + with pytest.raises(nx.NetworkXError, match="Graph not connected."): + diameter(graph) + + def test_directed_non_strongly_connected(self): + """Test a directed non strongly connected graph.""" + graph = nx.path_graph(10, create_using=nx.DiGraph()) + with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."): + diameter(graph) + + def test_complete_undirected_graph(self): + """Test a complete undirected graph.""" + graph = nx.complete_graph(10) + assert diameter(graph) == 1 + + def test_complete_directed_graph(self): + """Test a complete directed graph.""" + graph = nx.complete_graph(10, create_using=nx.DiGraph()) + assert diameter(graph) == 1 + + def test_undirected_path_graph(self): + """Test an undirected path graph with 10 nodes.""" + graph = nx.path_graph(10) + assert diameter(graph) == 9 + + def test_directed_path_graph(self): + """Test a directed path graph with 10 nodes.""" + graph = nx.path_graph(10).to_directed() + assert diameter(graph) == 9 + + def test_single_node(self): + """Test a graph which contains just a node.""" + graph = nx.Graph() + graph.add_node(1) + assert diameter(graph) == 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py new file mode 100644 index 0000000000000000000000000000000000000000..6b90d85ecf73bb56370fd92fdec25e3bbbb91ce3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py @@ -0,0 +1,78 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation import ( + min_edge_dominating_set, + min_weighted_dominating_set, +) + + +class TestMinWeightDominatingSet: + def test_min_weighted_dominating_set(self): + graph = nx.Graph() + graph.add_edge(1, 2) + graph.add_edge(1, 5) + graph.add_edge(2, 3) + graph.add_edge(2, 5) + graph.add_edge(3, 4) + graph.add_edge(3, 6) + graph.add_edge(5, 6) + + vertices = {1, 2, 3, 4, 5, 6} + # due to ties, this might be hard to test tight bounds + dom_set = min_weighted_dominating_set(graph) + for vertex in vertices - dom_set: + neighbors = set(graph.neighbors(vertex)) + assert len(neighbors & dom_set) > 0, "Non dominating set found!" + + def test_star_graph(self): + """Tests that an approximate dominating set for the star graph, + even when the center node does not have the smallest integer + label, gives just the center node. + + For more information, see #1527. + + """ + # Create a star graph in which the center node has the highest + # label instead of the lowest. + G = nx.star_graph(10) + G = nx.relabel_nodes(G, {0: 9, 9: 0}) + assert min_weighted_dominating_set(G) == {9} + + def test_null_graph(self): + """Tests that the unique dominating set for the null graph is an empty set""" + G = nx.Graph() + assert min_weighted_dominating_set(G) == set() + + def test_min_edge_dominating_set(self): + graph = nx.path_graph(5) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.complete_graph(10) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.Graph() # empty Networkx graph + with pytest.raises(ValueError, match="Expected non-empty NetworkX graph!"): + min_edge_dominating_set(graph) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..65ba802171a6b43a5157f12010c8164e5e867eb8 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py @@ -0,0 +1,303 @@ +# Test for approximation to k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.approximation import k_components +from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same + + +def build_k_number_dict(k_components): + k_num = {} + for k, comps in sorted(k_components.items()): + for comp in comps: + for node in comp: + k_num[node] = k + return k_num + + +## +# Some nice synthetic graphs +## + + +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function + + +def _check_connectivity(G): + result = k_components(G) + for k, components in result.items(): + if k < 3: + continue + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_connectivity(G) + + +def test_example_1(): + G = graph_example_1() + _check_connectivity(G) + + +def test_karate_0(): + G = nx.karate_club_graph() + _check_connectivity(G) + + +def test_karate_1(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + approx_karate_k_num = karate_k_num.copy() + approx_karate_k_num[24] = 2 + approx_karate_k_num[25] = 2 + G = nx.karate_club_graph() + k_comps = k_components(G) + k_num = build_k_number_dict(k_comps) + assert k_num in (karate_k_num, approx_karate_k_num) + + +def test_example_1_detail_3_and_4(): + G = graph_example_1() + result = k_components(G) + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + # Finally check that the k-components detected have actually node + # connectivity >= k. + for k, components in result.items(): + if k < 3: + continue + for component in components: + K = nx.node_connectivity(G.subgraph(component)) + assert K >= k + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.4, directed=True) + kc = k_components(G) + + +def test_same(): + equal = {"A": 2, "B": 2, "C": 2} + slightly_different = {"A": 2, "B": 1, "C": 2} + different = {"A": 2, "B": 8, "C": 18} + assert _same(equal) + assert not _same(slightly_different) + assert _same(slightly_different, tol=1) + assert not _same(different) + assert not _same(different, tol=4) + + +class TestAntiGraph: + @classmethod + def setup_class(cls): + cls.Gnp = nx.gnp_random_graph(20, 0.8, seed=42) + cls.Anp = _AntiGraph(nx.complement(cls.Gnp)) + cls.Gd = nx.davis_southern_women_graph() + cls.Ad = _AntiGraph(nx.complement(cls.Gd)) + cls.Gk = nx.karate_club_graph() + cls.Ak = _AntiGraph(nx.complement(cls.Gk)) + cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)] + + def test_size(self): + for G, A in self.GA: + n = G.order() + s = len(list(G.edges())) + len(list(A.edges())) + assert s == (n * (n - 1)) / 2 + + def test_degree(self): + for G, A in self.GA: + assert sorted(G.degree()) == sorted(A.degree()) + + def test_core_number(self): + for G, A in self.GA: + assert nx.core_number(G) == nx.core_number(A) + + def test_connected_components(self): + # ccs are same unless isolated nodes or any node has degree=len(G)-1 + # graphs in self.GA avoid this problem + for G, A in self.GA: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert comp in gc + + def test_adj(self): + for G, A in self.GA: + for n, nbrs in G.adj.items(): + a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items()) + g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items()) + assert a_adj == g_adj + + def test_adjacency(self): + for G, A in self.GA: + a_adj = list(A.adjacency()) + for n, nbrs in G.adjacency(): + assert (n, set(nbrs)) in a_adj + + def test_neighbors(self): + for G, A in self.GA: + node = list(G.nodes())[0] + assert set(G.neighbors(node)) == set(A.neighbors(node)) + + def test_node_not_in_graph(self): + for G, A in self.GA: + node = "non_existent_node" + pytest.raises(nx.NetworkXError, A.neighbors, node) + pytest.raises(nx.NetworkXError, G.neighbors, node) + + def test_degree_thingraph(self): + for G, A in self.GA: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert G.degree(node) == A.degree(node) + assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree()) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert sum(d for n, d in A.degree()) == sum( + d for n, d in A.degree(weight="weight") + ) + assert sum(d for n, d in G.degree(nodes)) == sum( + d for n, d in A.degree(nodes) + ) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..f50da3d2e07310fc19e1db2bd18fdce23223771c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py @@ -0,0 +1,8 @@ +import networkx as nx +import networkx.algorithms.approximation as a + + +def test_min_maximal_matching(): + # smoke test + G = nx.Graph() + assert len(a.min_maximal_matching(G)) == 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py new file mode 100644 index 0000000000000000000000000000000000000000..39291fbf14d5b3d411cdef50e4f367aa67132a1c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py @@ -0,0 +1,82 @@ +import random + +import networkx as nx +from networkx.algorithms.approximation import maxcut + + +def _is_valid_cut(G, set1, set2): + union = set1.union(set2) + assert union == set(G.nodes) + assert len(set1) + len(set2) == G.number_of_nodes() + + +def _cut_is_locally_optimal(G, cut_size, set1): + # test if cut can be locally improved + for i, node in enumerate(set1): + cut_size_without_node = nx.algorithms.cut_size( + G, set1 - {node}, weight="weight" + ) + assert cut_size_without_node <= cut_size + + +def test_random_partitioning(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, seed=5) + _is_valid_cut(G, set1, set2) + + +def test_random_partitioning_all_to_one(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, p=1) + _is_valid_cut(G, set1, set2) + assert len(set1) == G.number_of_nodes() + assert len(set2) == 0 + + +def test_one_exchange_basic(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = random.randrange(-100, 100, 1) / 10 + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange( + G, initial_cut, weight="weight", seed=5 + ) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + + +def test_one_exchange_optimal(): + # Greedy one exchange should find the optimal solution for this graph (14) + G = nx.Graph() + G.add_edge(1, 2, weight=3) + G.add_edge(1, 3, weight=3) + G.add_edge(1, 4, weight=3) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=5) + + cut_size, (set1, set2) = maxcut.one_exchange(G, weight="weight", seed=5) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + # check global optimality + assert cut_size == 14 + + +def test_negative_weights(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = -1 * random.random() + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange(G, initial_cut, weight="weight") + + # make sure it is a valid cut + _is_valid_cut(G, set1, set2) + # check local optimality + _cut_is_locally_optimal(G, cut_size, set1) + # test that all nodes are in the same partition + assert len(set1) == len(G.nodes) or len(set2) == len(G.nodes) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py new file mode 100644 index 0000000000000000000000000000000000000000..32fe1fb8fa917c557954d9da0d960895a6953a11 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py @@ -0,0 +1,31 @@ +import networkx as nx +import networkx.algorithms.approximation as apxa + + +def test_ramsey(): + # this should only find the complete graph + graph = nx.complete_graph(10) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # this trivial graph has no cliques. should just find i-sets + graph = nx.trivial_graph() + c, i = apxa.ramsey_R2(graph) + assert c == {0}, "clique not correctly found by ramsey!" + assert i == {0}, "i-set not correctly found by ramsey!" + + graph = nx.barbell_graph(10, 5, nx.Graph()) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # add self-loops and test again + graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)]) + cc, ii = apxa.ramsey_R2(graph) + assert cc == c + assert ii == i diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py new file mode 100644 index 0000000000000000000000000000000000000000..d7af1a1af4101d4ac702d95a7104cff466ecdb7b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py @@ -0,0 +1,191 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation.steinertree import metric_closure, steiner_tree +from networkx.utils import edges_equal + + +class TestSteinerTree: + @classmethod + def setup_class(cls): + G1 = nx.Graph() + G1.add_edge(1, 2, weight=10) + G1.add_edge(2, 3, weight=10) + G1.add_edge(3, 4, weight=10) + G1.add_edge(4, 5, weight=10) + G1.add_edge(5, 6, weight=10) + G1.add_edge(2, 7, weight=1) + G1.add_edge(7, 5, weight=1) + + G2 = nx.Graph() + G2.add_edge(0, 5, weight=6) + G2.add_edge(1, 2, weight=2) + G2.add_edge(1, 5, weight=3) + G2.add_edge(2, 4, weight=4) + G2.add_edge(3, 5, weight=5) + G2.add_edge(4, 5, weight=1) + + G3 = nx.Graph() + G3.add_edge(1, 2, weight=8) + G3.add_edge(1, 9, weight=3) + G3.add_edge(1, 8, weight=6) + G3.add_edge(1, 10, weight=2) + G3.add_edge(1, 14, weight=3) + G3.add_edge(2, 3, weight=6) + G3.add_edge(3, 4, weight=3) + G3.add_edge(3, 10, weight=2) + G3.add_edge(3, 11, weight=1) + G3.add_edge(4, 5, weight=1) + G3.add_edge(4, 11, weight=1) + G3.add_edge(5, 6, weight=4) + G3.add_edge(5, 11, weight=2) + G3.add_edge(5, 12, weight=1) + G3.add_edge(5, 13, weight=3) + G3.add_edge(6, 7, weight=2) + G3.add_edge(6, 12, weight=3) + G3.add_edge(6, 13, weight=1) + G3.add_edge(7, 8, weight=3) + G3.add_edge(7, 9, weight=3) + G3.add_edge(7, 11, weight=5) + G3.add_edge(7, 13, weight=2) + G3.add_edge(7, 14, weight=4) + G3.add_edge(8, 9, weight=2) + G3.add_edge(9, 14, weight=1) + G3.add_edge(10, 11, weight=2) + G3.add_edge(10, 14, weight=1) + G3.add_edge(11, 12, weight=1) + G3.add_edge(11, 14, weight=7) + G3.add_edge(12, 14, weight=3) + G3.add_edge(12, 15, weight=1) + G3.add_edge(13, 14, weight=4) + G3.add_edge(13, 15, weight=1) + G3.add_edge(14, 15, weight=2) + + cls.G1 = G1 + cls.G2 = G2 + cls.G3 = G3 + cls.G1_term_nodes = [1, 2, 3, 4, 5] + cls.G2_term_nodes = [0, 2, 3] + cls.G3_term_nodes = [1, 3, 5, 6, 8, 10, 11, 12, 13] + + cls.methods = ["kou", "mehlhorn"] + + def test_connected_metric_closure(self): + G = self.G1.copy() + G.add_node(100) + pytest.raises(nx.NetworkXError, metric_closure, G) + + def test_metric_closure(self): + M = metric_closure(self.G1) + mc = [ + (1, 2, {"distance": 10, "path": [1, 2]}), + (1, 3, {"distance": 20, "path": [1, 2, 3]}), + (1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}), + (1, 5, {"distance": 12, "path": [1, 2, 7, 5]}), + (1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}), + (1, 7, {"distance": 11, "path": [1, 2, 7]}), + (2, 3, {"distance": 10, "path": [2, 3]}), + (2, 4, {"distance": 12, "path": [2, 7, 5, 4]}), + (2, 5, {"distance": 2, "path": [2, 7, 5]}), + (2, 6, {"distance": 12, "path": [2, 7, 5, 6]}), + (2, 7, {"distance": 1, "path": [2, 7]}), + (3, 4, {"distance": 10, "path": [3, 4]}), + (3, 5, {"distance": 12, "path": [3, 2, 7, 5]}), + (3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}), + (3, 7, {"distance": 11, "path": [3, 2, 7]}), + (4, 5, {"distance": 10, "path": [4, 5]}), + (4, 6, {"distance": 20, "path": [4, 5, 6]}), + (4, 7, {"distance": 11, "path": [4, 5, 7]}), + (5, 6, {"distance": 10, "path": [5, 6]}), + (5, 7, {"distance": 1, "path": [5, 7]}), + (6, 7, {"distance": 11, "path": [6, 5, 7]}), + ] + assert edges_equal(list(M.edges(data=True)), mc) + + def test_steiner_tree(self): + valid_steiner_trees = [ + [ + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + ], + [ + [ + (0, 5, {"weight": 6}), + (1, 2, {"weight": 2}), + (1, 5, {"weight": 3}), + (3, 5, {"weight": 5}), + ], + [ + (0, 5, {"weight": 6}), + (4, 2, {"weight": 4}), + (4, 5, {"weight": 1}), + (3, 5, {"weight": 5}), + ], + ], + [ + [ + (1, 10, {"weight": 2}), + (3, 10, {"weight": 2}), + (3, 11, {"weight": 1}), + (5, 12, {"weight": 1}), + (6, 13, {"weight": 1}), + (8, 9, {"weight": 2}), + (9, 14, {"weight": 1}), + (10, 14, {"weight": 1}), + (11, 12, {"weight": 1}), + (12, 15, {"weight": 1}), + (13, 15, {"weight": 1}), + ] + ], + ] + for method in self.methods: + for G, term_nodes, valid_trees in zip( + [self.G1, self.G2, self.G3], + [self.G1_term_nodes, self.G2_term_nodes, self.G3_term_nodes], + valid_steiner_trees, + ): + S = steiner_tree(G, term_nodes, method=method) + assert any( + edges_equal(list(S.edges(data=True)), valid_tree) + for valid_tree in valid_trees + ) + + def test_multigraph_steiner_tree(self): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2, 0, {"weight": 1}), + (2, 3, 0, {"weight": 999}), + (2, 3, 1, {"weight": 1}), + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + ) + terminal_nodes = [2, 4, 5] + expected_edges = [ + (2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + for method in self.methods: + S = steiner_tree(G, terminal_nodes, method=method) + assert edges_equal(S.edges(data=True, keys=True), expected_edges) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb553e1cc7129837996dfe0d5e8fb2435e0b4fd --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py @@ -0,0 +1,963 @@ +"""Unit tests for the traveling_salesman module.""" +import random + +import pytest + +import networkx as nx +import networkx.algorithms.approximation as nx_app + +pairwise = nx.utils.pairwise + + +def test_christofides_hamiltonian(): + random.seed(42) + G = nx.complete_graph(20) + for u, v in G.edges(): + G[u][v]["weight"] = random.randint(0, 10) + + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + tree = nx.minimum_spanning_tree(G, weight="weight") + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G, tree))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + +def test_christofides_incomplete_graph(): + G = nx.complete_graph(10) + G.remove_edge(0, 1) + pytest.raises(nx.NetworkXError, nx_app.christofides, G) + + +def test_christofides_ignore_selfloops(): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.christofides(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +# set up graphs for other tests +class TestBase: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + cls.DG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 3), + ("B", "C", 12), + ("B", "D", 16), + ("C", "A", 13), + ("C", "B", 12), + ("C", "D", 4), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG_cycle = ["D", "C", "B", "A", "D"] + cls.DG_cost = 31.0 + + cls.DG2 = nx.DiGraph() + cls.DG2.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 30), + ("B", "C", 2), + ("B", "D", 16), + ("C", "A", 33), + ("C", "B", 32), + ("C", "D", 34), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG2_cycle = ["D", "A", "B", "C", "D"] + cls.DG2_cost = 53.0 + + cls.unweightedUG = nx.complete_graph(5, nx.Graph()) + cls.unweightedDG = nx.complete_graph(5, nx.DiGraph()) + + cls.incompleteUG = nx.Graph() + cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + cls.incompleteDG = nx.DiGraph() + cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + + cls.UG = nx.Graph() + cls.UG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "C", 12), + ("B", "D", 16), + ("C", "D", 4), + } + ) + cls.UG_cycle = ["D", "C", "B", "A", "D"] + cls.UG_cost = 33.0 + + cls.UG2 = nx.Graph() + cls.UG2.add_weighted_edges_from( + { + ("A", "B", 1), + ("A", "C", 15), + ("A", "D", 5), + ("B", "C", 16), + ("B", "D", 8), + ("C", "D", 3), + } + ) + cls.UG2_cycle = ["D", "C", "B", "A", "D"] + cls.UG2_cost = 25.0 + + +def validate_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln + assert cost == exp_cost + + +def validate_symmetric_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln or soln == exp_soln[::-1] + assert cost == exp_cost + + +class TestGreedyTSP(TestBase): + def test_greedy(self): + cycle = nx_app.greedy_tsp(self.DG, source="D") + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0) + + cycle = nx_app.greedy_tsp(self.DG2, source="D") + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0) + + cycle = nx_app.greedy_tsp(self.UG, source="D") + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0) + + cycle = nx_app.greedy_tsp(self.UG2, source="D") + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0) + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG) + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG) + + def test_not_weighted_graph(self): + nx_app.greedy_tsp(self.unweightedUG) + nx_app.greedy_tsp(self.unweightedDG) + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + cycle = nx_app.greedy_tsp(G) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.greedy_tsp(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +class TestSimulatedAnnealingTSP(TestBase): + tsp = staticmethod(nx_app.simulated_annealing_tsp) + + def test_simulated_annealing_directed(self): + cycle = self.tsp(self.DG, "greedy", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "B", "A", "C", "D"] + cycle = self.tsp(self.DG, initial_sol, source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "A", "C", "B", "D"] + cycle = self.tsp(self.DG, initial_sol, move="1-0", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + cycle = self.tsp(self.DG2, "greedy", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + cycle = self.tsp(self.DG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + def test_simulated_annealing_undirected(self): + cycle = self.tsp(self.UG, "greedy", source="D", seed=42) + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.UG_cycle, self.UG_cost) + + cycle = self.tsp(self.UG2, "greedy", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + cycle = self.tsp(self.UG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + def test_error_on_input_order_mistake(self): + # see issue #4846 https://github.com/networkx/networkx/issues/4846 + pytest.raises(TypeError, self.tsp, self.UG, weight="weight") + pytest.raises(nx.NetworkXError, self.tsp, self.UG, "weight") + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteUG, "greedy", source=0) + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteDG, "greedy", source=0) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = self.tsp(G, "greedy") + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + def test_not_weighted_graph(self): + self.tsp(self.unweightedUG, "greedy") + self.tsp(self.unweightedDG, "greedy") + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + + cycle = self.tsp(G, "greedy", source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + cycle = self.tsp(G, [1, 2, 1], source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Simulated Annealing Version: + # set number of moves low and alpha high + cycle = self.tsp( + self.DG2, "greedy", source="D", move="1-0", alpha=1, N_inner=1, seed=42 + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG2_cost + + # Try with an incorrect initial guess + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, + initial_sol, + source="D", + move="1-0", + alpha=0.1, + N_inner=1, + max_iterations=1, + seed=42, + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG_cost + + +class TestThresholdAcceptingTSP(TestSimulatedAnnealingTSP): + tsp = staticmethod(nx_app.threshold_accepting_tsp) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Threshold Version: + # set number of moves low and number of iterations low + cycle = self.tsp( + self.DG2, + "greedy", + source="D", + move="1-0", + N_inner=1, + max_iterations=1, + seed=4, + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG2_cost + + # set threshold too low + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42 + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG_cost + + +# Tests for function traveling_salesman_problem +def test_TSP_method(): + G = nx.cycle_graph(9) + G[4][5]["weight"] = 10 + + def my_tsp_method(G, weight): + return nx_app.simulated_annealing_tsp(G, "greedy", weight, source=4, seed=1) + + path = nx_app.traveling_salesman_problem(G, method=my_tsp_method, cycle=False) + print(path) + assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5] + + +def test_TSP_unweighted(): + G = nx.cycle_graph(9) + path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False) + assert path in ([3, 4, 5, 6], [6, 5, 4, 3]) + + cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6]) + assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6]) + + +def test_TSP_weighted(): + G = nx.cycle_graph(9) + G[0][1]["weight"] = 2 + G[1][2]["weight"] = 2 + G[2][3]["weight"] = 2 + G[3][4]["weight"] = 4 + G[4][5]["weight"] = 5 + G[5][6]["weight"] = 4 + G[6][7]["weight"] = 2 + G[7][8]["weight"] = 2 + G[8][0]["weight"] = 2 + tsp = nx_app.traveling_salesman_problem + + # path between 3 and 6 + expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3]) + # cycle between 3 and 6 + expected_cycles = ( + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3], + [6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6], + ) + # path through all nodes + expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5]) + + # Check default method + cycle = tsp(G, nodes=[3, 6], weight="weight") + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", cycle=False) + assert tourpath in expected_tourpaths + + # Check all methods + methods = [ + nx_app.christofides, + nx_app.greedy_tsp, + lambda G, wt: nx_app.simulated_annealing_tsp(G, "greedy", weight=wt), + lambda G, wt: nx_app.threshold_accepting_tsp(G, "greedy", weight=wt), + ] + for method in methods: + cycle = tsp(G, nodes=[3, 6], weight="weight", method=method) + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", method=method, cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", method=method, cycle=False) + assert tourpath in expected_tourpaths + + +def test_TSP_incomplete_graph_short_path(): + G = nx.cycle_graph(9) + G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)]) + G[4][5]["weight"] = 5 + + cycle = nx_app.traveling_salesman_problem(G) + print(cycle) + assert len(cycle) == 17 and len(set(cycle)) == 12 + + # make sure that cutting one edge out of complete graph formulation + # cuts out many edges out of the path of the TSP + path = nx_app.traveling_salesman_problem(G, cycle=False) + print(path) + assert len(path) == 13 and len(set(path)) == 12 + + +def test_held_karp_ascent(): + """ + Test the Held-Karp relaxation with the ascent method + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # Adjacency matrix from page 1153 of the 1970 Held and Karp paper + # which have been edited to be directional, but also symmetric + G_array = np.array( + [ + [0, 97, 60, 73, 17, 52], + [97, 0, 41, 52, 90, 30], + [60, 41, 0, 21, 35, 41], + [73, 52, 21, 0, 95, 46], + [17, 90, 35, 95, 0, 81], + [52, 30, 41, 46, 81, 0], + ] + ) + + solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 207.00 + # Check that the z_stars are the same + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_fractional_solution(): + """ + Test the ascent method using a modified version of Figure 2 on page 1140 + in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and + Karp + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and is a complete directed graph with infinite edge weights for the + # edges not listed in the original graph + G_array = np.array( + [ + [0, 100, 100, 100000, 100000, 1], + [100, 0, 100, 100000, 1, 100000], + [100, 100, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 100, 100], + [100000, 1, 100000, 100, 0, 100], + [1, 100000, 100000, 100, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 303.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_ascent_method_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 190.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_method_asymmetric_2(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 45, 39, 92, 29, 31], + [72, 0, 4, 12, 21, 60], + [81, 6, 0, 98, 70, 53], + [49, 71, 59, 0, 98, 94], + [74, 95, 24, 43, 0, 47], + [56, 43, 3, 65, 22, 0], + ] + ) + + solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 144.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_held_karp_ascent_asymmetric_3(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced. + + In this graph their are two different optimal, integral solutions (which + are also the overall atsp solutions) to the Held Karp relaxation. However, + this particular graph has two different tours of optimal value and the + possible solutions in the held_karp_ascent function are not stored in an + ordered data structure. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 1, 5, 2, 7, 4], + [7, 0, 7, 7, 1, 4], + [4, 7, 0, 9, 2, 1], + [7, 2, 7, 0, 4, 4], + [5, 5, 4, 4, 0, 3], + [3, 9, 1, 3, 4, 0], + ] + ) + + solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)] + + solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + assert round(opt_hk, 2) == 13.00 + # Check that the z_stars are the same + solution1 = nx.DiGraph() + solution1.add_edges_from(solution1_edges) + solution2 = nx.DiGraph() + solution2.add_edges_from(solution2_edges) + assert nx.utils.edges_equal(z_star.edges, solution1.edges) or nx.utils.edges_equal( + z_star.edges, solution2.edges + ) + + +def test_held_karp_ascent_fractional_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 100, 150, 100000, 100000, 1], + [150, 0, 100, 100000, 1, 100000], + [100, 150, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 150, 100], + [100000, 2, 100000, 100, 0, 150], + [2, 100000, 100000, 150, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 5 / 12, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 5 / 12, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 5 / 12, + (3, 5): 5 / 12, + (4, 1): 5 / 6, + (4, 3): 5 / 12, + (4, 5): 5 / 12, + (5, 0): 5 / 6, + (5, 3): 5 / 12, + (5, 4): 5 / 12, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 304.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_spanning_tree_distribution(): + """ + Test that we can create an exponential distribution of spanning trees such + that the probability of each tree is proportional to the product of edge + weights. + + Results of this test have been confirmed with hypothesis testing from the + created distribution. + + This test uses the symmetric, fractional Held Karp solution. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + solution_gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of z_star + G = nx.MultiGraph() + for u, v in z_star: + if (u, v) in G.edges or (v, u) in G.edges: + continue + G.add_edge(u, v) + + gamma = tsp.spanning_tree_distribution(G, z_star) + + assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma + + +def test_asadpour_tsp(): + """ + Test the complete asadpour tsp algorithm with the fractional, symmetric + Held Karp solution. This test also uses an incomplete graph as input. + """ + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and the 0 weight edges have a weight of 1. + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + edge_list = [ + (0, 1, 100), + (0, 2, 100), + (0, 5, 1), + (1, 2, 100), + (1, 4, 1), + (2, 3, 1), + (3, 4, 100), + (3, 5, 100), + (4, 5, 100), + (1, 0, 100), + (2, 0, 100), + (5, 0, 1), + (2, 1, 100), + (4, 1, 1), + (3, 2, 1), + (4, 3, 100), + (5, 3, 100), + (5, 4, 100), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edge_list) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 19) + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + # Check that the returned list is a valid tour. Because this is an + # incomplete graph, the conditions are not as strict. We need the tour to + # + # Start and end at the same node + # Pass through every vertex at least once + # Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal + # + # For the second condition it is possible to have the tour pass through the + # same vertex more then. Imagine that the tour on the complete version takes + # an edge not in the original graph. In the output this is substituted with + # the shortest path between those vertices, allowing vertices to appear more + # than once. + # + # However, we are using a fixed random number generator so we know what the + # expected tour is. + expected_tours = [[1, 4, 5, 0, 2, 3, 2, 1], [3, 2, 0, 1, 4, 5, 3]] + + assert tour in expected_tours + + +def test_asadpour_real_world(): + """ + This test uses airline prices between the six largest cities in the US. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + + This test also uses the `source` keyword argument to ensure that the tour + always starts at city 0. + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_tours = [ + ["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"], + ["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 37, source="JFK") + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + assert tour in expected_tours + + +def test_asadpour_real_world_path(): + """ + This test uses airline prices between the six largest cities in the US. This + time using a path, not a cycle. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_paths = [ + ["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 56) + + path = nx_app.traveling_salesman_problem( + G, weight="weight", cycle=False, method=fixed_asadpour + ) + + assert path in expected_paths + + +def test_asadpour_disconnected_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + disconnected graph. + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.add_node(5) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_incomplete_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + incomplete graph + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.remove_edge(0, 1) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_empty_graph(): + """ + Test the asadpour_atsp function with an empty graph + """ + G = nx.DiGraph() + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +@pytest.mark.slow +def test_asadpour_integral_held_karp(): + """ + This test uses an integral held karp solution and the held karp function + will return a graph rather than a dict, bypassing most of the asadpour + algorithm. + + At first glance, this test probably doesn't look like it ensures that we + skip the rest of the asadpour algorithm, but it does. We are not fixing a + see for the random number generator, so if we sample any spanning trees + the approximation would be different basically every time this test is + executed but it is not since held karp is deterministic and we do not + reach the portion of the code with the dependence on random numbers. + """ + np = pytest.importorskip("numpy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + for _ in range(2): + tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp) + + assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour + + +def test_directed_tsp_impossible(): + """ + Test the asadpour algorithm with a graph without a hamiltonian circuit + """ + pytest.importorskip("numpy") + + # In this graph, once we leave node 0 we cannot return + edges = [ + (0, 1, 10), + (0, 2, 11), + (0, 3, 12), + (1, 2, 4), + (1, 3, 6), + (2, 1, 3), + (2, 3, 2), + (3, 1, 5), + (3, 2, 1), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + + pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py new file mode 100644 index 0000000000000000000000000000000000000000..461b0f2ed2dd4d043902d054e10a5f39ffb069c9 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py @@ -0,0 +1,280 @@ +import itertools + +import networkx as nx +from networkx.algorithms.approximation import ( + treewidth_min_degree, + treewidth_min_fill_in, +) +from networkx.algorithms.approximation.treewidth import ( + MinDegreeHeuristic, + min_fill_in_heuristic, +) + + +def is_tree_decomp(graph, decomp): + """Check if the given tree decomposition is valid.""" + for x in graph.nodes(): + appear_once = False + for bag in decomp.nodes(): + if x in bag: + appear_once = True + break + assert appear_once + + # Check if each connected pair of nodes are at least once together in a bag + for x, y in graph.edges(): + appear_together = False + for bag in decomp.nodes(): + if x in bag and y in bag: + appear_together = True + break + assert appear_together + + # Check if the nodes associated with vertex v form a connected subset of T + for v in graph.nodes(): + subset = [] + for bag in decomp.nodes(): + if v in bag: + subset.append(bag) + sub_graph = decomp.subgraph(subset) + assert nx.is_connected(sub_graph) + + +class TestTreewidthMinDegree: + """Unit tests for the min_degree function""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 3) + cls.small_tree.add_edge(4, 3) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1 + + cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2 + + cls.deterministic_graph.add_edge(2, 3) + cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3 + + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4 + + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(4, 6) + cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5 + + cls.deterministic_graph.add_edge(5, 6) + cls.deterministic_graph.add_edge(5, 7) + cls.deterministic_graph.add_edge(5, 8) + cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6 + + cls.deterministic_graph.add_edge(6, 7) + cls.deterministic_graph.add_edge(6, 8) + cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6 + + cls.deterministic_graph.add_edge(7, 8) + cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5 + + cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4 + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_degree(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test small tree + + Test if the computed treewidth of the known self.small_tree is 2. + As we know which value we can expect from our heuristic, values other + than two are regressions + """ + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test heuristic abort condition for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + + deg_heuristic = MinDegreeHeuristic(graph) + node = deg_heuristic.best_node(graph) + if node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_degree(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_degree(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_degree(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_degree heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + deg_heuristic = MinDegreeHeuristic(graph) + elim_node = deg_heuristic.best_node(graph) + print(f"Graph {graph}:") + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = deg_heuristic.best_node(graph) + + # check only the first 5 elements for equality + assert steps[:5] == [0, 1, 2, 3, 4] + + +class TestTreewidthMinFillIn: + """Unit tests for the treewidth_min_fill_in function.""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 2) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 4) + cls.small_tree.add_edge(1, 4) + cls.small_tree.add_edge(2, 4) + cls.small_tree.add_edge(4, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(1, 2) + cls.deterministic_graph.add_edge(1, 3) + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(2, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(3, 6) + cls.deterministic_graph.add_edge(5, 6) + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_fill_in(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test if the computed treewidth of the known self.small_tree is 2""" + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test if min_fill_in returns None for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + next_node = min_fill_in_heuristic(graph) + if next_node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_fill_in(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_fill_in(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_fill_in heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + + # check only the first 2 elements for equality + assert steps[:2] == [6, 5] diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py new file mode 100644 index 0000000000000000000000000000000000000000..5cc5a38df9a4139684005491e0183cd563487154 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py @@ -0,0 +1,68 @@ +import networkx as nx +from networkx.algorithms.approximation import min_weighted_vertex_cover + + +def is_cover(G, node_cover): + return all({u, v} & node_cover for u, v in G.edges()) + + +class TestMWVC: + """Unit tests for the approximate minimum weighted vertex cover + function, + :func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`. + + """ + + def test_unweighted_directed(self): + # Create a star graph in which half the nodes are directed in + # and half are directed out. + G = nx.DiGraph() + G.add_edges_from((0, v) for v in range(1, 26)) + G.add_edges_from((v, 0) for v in range(26, 51)) + cover = min_weighted_vertex_cover(G) + assert 1 == len(cover) + assert is_cover(G, cover) + + def test_unweighted_undirected(self): + # create a simple star graph + size = 50 + sg = nx.star_graph(size) + cover = min_weighted_vertex_cover(sg) + assert 1 == len(cover) + assert is_cover(sg, cover) + + def test_weighted(self): + wg = nx.Graph() + wg.add_node(0, weight=10) + wg.add_node(1, weight=1) + wg.add_node(2, weight=1) + wg.add_node(3, weight=1) + wg.add_node(4, weight=1) + + wg.add_edge(0, 1) + wg.add_edge(0, 2) + wg.add_edge(0, 3) + wg.add_edge(0, 4) + + wg.add_edge(1, 2) + wg.add_edge(2, 3) + wg.add_edge(3, 4) + wg.add_edge(4, 1) + + cover = min_weighted_vertex_cover(wg, weight="weight") + csum = sum(wg.nodes[node]["weight"] for node in cover) + assert 4 == csum + assert is_cover(wg, cover) + + def test_unweighted_self_loop(self): + slg = nx.Graph() + slg.add_node(0) + slg.add_node(1) + slg.add_node(2) + + slg.add_edge(0, 1) + slg.add_edge(2, 2) + + cover = min_weighted_vertex_cover(slg) + assert 2 == len(cover) + assert is_cover(slg, cover) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc320486e262883d989c4f3489ac8ea08db1137 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py @@ -0,0 +1,1442 @@ +""" +================================= +Travelling Salesman Problem (TSP) +================================= + +Implementation of approximate algorithms +for solving and approximating the TSP problem. + +Categories of algorithms which are implemented: + +- Christofides (provides a 3/2-approximation of TSP) +- Greedy +- Simulated Annealing (SA) +- Threshold Accepting (TA) +- Asadpour Asymmetric Traveling Salesman Algorithm + +The Travelling Salesman Problem tries to find, given the weight +(distance) between all points where a salesman has to visit, the +route so that: + +- The total distance (cost) which the salesman travels is minimized. +- The salesman returns to the starting point. +- Note that for a complete graph, the salesman visits each point once. + +The function `travelling_salesman_problem` allows for incomplete +graphs by finding all-pairs shortest paths, effectively converting +the problem to a complete graph problem. It calls one of the +approximate methods on that problem and then converts the result +back to the original graph using the previously found shortest paths. + +TSP is an NP-hard problem in combinatorial optimization, +important in operations research and theoretical computer science. + +http://en.wikipedia.org/wiki/Travelling_salesman_problem +""" +import math + +import networkx as nx +from networkx.algorithms.tree.mst import random_spanning_tree +from networkx.utils import not_implemented_for, pairwise, py_random_state + +__all__ = [ + "traveling_salesman_problem", + "christofides", + "asadpour_atsp", + "greedy_tsp", + "simulated_annealing_tsp", + "threshold_accepting_tsp", +] + + +def swap_two_nodes(soln, seed): + """Swap two nodes in `soln` to give a neighbor solution. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + move_one_node + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln[a], soln[b] = soln[b], soln[a] + return soln + + +def move_one_node(soln, seed): + """Move one node to another position to give a neighbor solution. + + The node to move and the position to move to are chosen randomly. + The first and last nodes are left untouched as soln must be a cycle + starting at that node. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + swap_two_nodes + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln.insert(b, soln.pop(a)) + return soln + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def christofides(G, weight="weight", tree=None): + """Approximate a solution of the traveling salesman problem + + Compute a 3/2-approximation of the traveling salesman problem + in a complete undirected graph using Christofides [1]_ algorithm. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + tree : NetworkX graph or None (default: None) + A minimum spanning tree of G. Or, if None, the minimum spanning + tree is computed using :func:`networkx.minimum_spanning_tree` + + Returns + ------- + list + List of nodes in `G` along a cycle with a 3/2-approximation of + the minimal Hamiltonian cycle. + + References + ---------- + .. [1] Christofides, Nicos. "Worst-case analysis of a new heuristic for + the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ + Pittsburgh Pa Management Sciences Research Group, 1976. + """ + # Remove selfloops if necessary + loop_nodes = nx.nodes_with_selfloops(G) + try: + node = next(loop_nodes) + except StopIteration: + pass + else: + G = G.copy() + G.remove_edge(node, node) + G.remove_edges_from((n, n) for n in loop_nodes) + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if tree is None: + tree = nx.minimum_spanning_tree(G, weight=weight) + L = G.copy() + L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)]) + MG = nx.MultiGraph() + MG.add_edges_from(tree.edges) + edges = nx.min_weight_matching(L, weight=weight) + MG.add_edges_from(edges) + return _shortcutting(nx.eulerian_circuit(MG)) + + +def _shortcutting(circuit): + """Remove duplicate nodes in the path""" + nodes = [] + for u, v in circuit: + if v in nodes: + continue + if not nodes: + nodes.append(u) + nodes.append(v) + nodes.append(nodes[0]) + return nodes + + +@nx._dispatch(edge_attrs="weight") +def traveling_salesman_problem(G, weight="weight", nodes=None, cycle=True, method=None): + """Find the shortest path in `G` connecting specified nodes + + This function allows approximate solution to the traveling salesman + problem on networks that are not complete graphs and/or where the + salesman does not need to visit all nodes. + + This function proceeds in two steps. First, it creates a complete + graph using the all-pairs shortest_paths between nodes in `nodes`. + Edge weights in the new graph are the lengths of the paths + between each pair of nodes in the original graph. + Second, an algorithm (default: `christofides` for undirected and + `asadpour_atsp` for directed) is used to approximate the minimal Hamiltonian + cycle on this new graph. The available algorithms are: + + - christofides + - greedy_tsp + - simulated_annealing_tsp + - threshold_accepting_tsp + - asadpour_atsp + + Once the Hamiltonian Cycle is found, this function post-processes to + accommodate the structure of the original graph. If `cycle` is ``False``, + the biggest weight edge is removed to make a Hamiltonian path. + Then each edge on the new complete graph used for that analysis is + replaced by the shortest_path between those nodes on the original graph. + + Parameters + ---------- + G : NetworkX graph + A possibly weighted graph + + nodes : collection of nodes (default=G.nodes) + collection (list, set, etc.) of nodes to visit + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + cycle : bool (default: True) + Indicates whether a cycle should be returned, or a path. + Note: the cycle is the approximate minimal cycle. + The path simply removes the biggest edge in that cycle. + + method : function (default: None) + A function that returns a cycle on all nodes and approximates + the solution to the traveling salesman problem on a complete + graph. The returned cycle is then used to find a corresponding + solution on `G`. `method` should be callable; take inputs + `G`, and `weight`; and return a list of nodes along the cycle. + + Provided options include :func:`christofides`, :func:`greedy_tsp`, + :func:`simulated_annealing_tsp` and :func:`threshold_accepting_tsp`. + + If `method is None`: use :func:`christofides` for undirected `G` and + :func:`threshold_accepting_tsp` for directed `G`. + + To specify parameters for these provided functions, construct lambda + functions that state the specific value. `method` must have 2 inputs. + (See examples). + + Returns + ------- + list + List of nodes in `G` along a path with an approximation of the minimal + path through `nodes`. + + + Raises + ------ + NetworkXError + If `G` is a directed graph it has to be strongly connected or the + complete version cannot be generated. + + Examples + -------- + >>> tsp = nx.approximation.traveling_salesman_problem + >>> G = nx.cycle_graph(9) + >>> G[4][5]["weight"] = 5 # all other weights are 1 + >>> tsp(G, nodes=[3, 6]) + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3] + >>> path = tsp(G, cycle=False) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + Build (curry) your own function to provide parameter values to the methods. + + >>> SA_tsp = nx.approximation.simulated_annealing_tsp + >>> method = lambda G, wt: SA_tsp(G, "greedy", weight=wt, temp=500) + >>> path = tsp(G, cycle=False, method=method) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + """ + if method is None: + if G.is_directed(): + method = asadpour_atsp + else: + method = christofides + if nodes is None: + nodes = list(G.nodes) + + dist = {} + path = {} + for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight): + dist[n] = d + path[n] = p + + if G.is_directed(): + # If the graph is not strongly connected, raise an exception + if not nx.is_strongly_connected(G): + raise nx.NetworkXError("G is not strongly connected") + GG = nx.DiGraph() + else: + GG = nx.Graph() + for u in nodes: + for v in nodes: + if u == v: + continue + GG.add_edge(u, v, weight=dist[u][v]) + best_GG = method(GG, weight) + + if not cycle: + # find and remove the biggest edge + (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]]) + pos = best_GG.index(u) + 1 + while best_GG[pos] != v: + pos = best_GG[pos:].index(u) + 1 + best_GG = best_GG[pos:-1] + best_GG[:pos] + + best_path = [] + for u, v in pairwise(best_GG): + best_path.extend(path[u][v][:-1]) + best_path.append(v) + return best_path + + +@not_implemented_for("undirected") +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def asadpour_atsp(G, weight="weight", seed=None, source=None): + """ + Returns an approximate solution to the traveling salesman problem. + + This approximate solution is one of the best known approximations for the + asymmetric traveling salesman problem developed by Asadpour et al, + [1]_. The algorithm first solves the Held-Karp relaxation to find a lower + bound for the weight of the cycle. Next, it constructs an exponential + distribution of undirected spanning trees where the probability of an + edge being in the tree corresponds to the weight of that edge using a + maximum entropy rounding scheme. Next we sample that distribution + $2 \\lceil \\ln n \\rceil$ times and save the minimum sampled tree once the + direction of the arcs is added back to the edges. Finally, we augment + then short circuit that graph to find the approximate tour for the + salesman. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. The + distance between all paris of nodes should be included and the triangle + inequality should hold. That is, the direct edge between any two nodes + should be the path of least cost. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + source : node label (default=`None`) + If given, return the cycle starting and ending at the given node. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman can follow to minimize + the total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete or has less than two nodes, the algorithm raises + an exception. + + NetworkXError + If `source` is not `None` and is not a node in `G`, the algorithm raises + an exception. + + NetworkXNotImplemented + If `G` is an undirected graph. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + Examples + -------- + >>> import networkx as nx + >>> import networkx.algorithms.approximation as approx + >>> G = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.set_edge_attributes(G, {(0, 1): 2, (1, 2): 2, (2, 0): 2, (0, 2): 1, (2, 1): 1, (1, 0): 1}, "weight") + >>> tour = approx.asadpour_atsp(G,source=0) + >>> tour + [0, 2, 1, 0] + """ + from math import ceil, exp + from math import log as ln + + # Check that G is a complete graph + N = len(G) - 1 + if N < 2: + raise nx.NetworkXError("G must have at least two nodes") + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G is not a complete DiGraph") + # Check that the source vertex, if given, is in the graph + if source is not None and source not in G.nodes: + raise nx.NetworkXError("Given source node not in G.") + + opt_hk, z_star = held_karp_ascent(G, weight) + + # Test to see if the ascent method found an integer solution or a fractional + # solution. If it is integral then z_star is a nx.Graph, otherwise it is + # a dict + if not isinstance(z_star, dict): + # Here we are using the shortcutting method to go from the list of edges + # returned from eulerian_circuit to a list of nodes + return _shortcutting(nx.eulerian_circuit(z_star, source=source)) + + # Create the undirected support of z_star + z_support = nx.MultiGraph() + for u, v in z_star: + if (u, v) not in z_support.edges: + edge_weight = min(G[u][v][weight], G[v][u][weight]) + z_support.add_edge(u, v, **{weight: edge_weight}) + + # Create the exponential distribution of spanning trees + gamma = spanning_tree_distribution(z_support, z_star) + + # Write the lambda values to the edges of z_support + z_support = nx.Graph(z_support) + lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()} + nx.set_edge_attributes(z_support, lambda_dict, "weight") + del gamma, lambda_dict + + # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one + minimum_sampled_tree = None + minimum_sampled_tree_weight = math.inf + for _ in range(2 * ceil(ln(G.number_of_nodes()))): + sampled_tree = random_spanning_tree(z_support, "weight", seed=seed) + sampled_tree_weight = sampled_tree.size(weight) + if sampled_tree_weight < minimum_sampled_tree_weight: + minimum_sampled_tree = sampled_tree.copy() + minimum_sampled_tree_weight = sampled_tree_weight + + # Orient the edges in that tree to keep the cost of the tree the same. + t_star = nx.MultiDiGraph() + for u, v, d in minimum_sampled_tree.edges(data=weight): + if d == G[u][v][weight]: + t_star.add_edge(u, v, **{weight: d}) + else: + t_star.add_edge(v, u, **{weight: d}) + + # Find the node demands needed to neutralize the flow of t_star in G + node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star} + nx.set_node_attributes(G, node_demands, "demand") + + # Find the min_cost_flow + flow_dict = nx.min_cost_flow(G, "demand") + + # Build the flow into t_star + for source, values in flow_dict.items(): + for target in values: + if (source, target) not in t_star.edges and values[target] > 0: + # IF values[target] > 0 we have to add that many edges + for _ in range(values[target]): + t_star.add_edge(source, target) + + # Return the shortcut eulerian circuit + circuit = nx.eulerian_circuit(t_star, source=source) + return _shortcutting(circuit) + + +@nx._dispatch(edge_attrs="weight") +def held_karp_ascent(G, weight="weight"): + """ + Minimizes the Held-Karp relaxation of the TSP for `G` + + Solves the Held-Karp relaxation of the input complete digraph and scales + the output solution for use in the Asadpour [1]_ ASTP algorithm. + + The Held-Karp relaxation defines the lower bound for solutions to the + ATSP, although it does return a fractional solution. This is used in the + Asadpour algorithm as an initial solution which is later rounded to a + integral tree within the spanning tree polytopes. This function solves + the relaxation with the branch and bound method in [2]_. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. + The distance between all paris of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + Returns + ------- + OPT : float + The cost for the optimal solution to the Held-Karp relaxation + z : dict or nx.Graph + A symmetrized and scaled version of the optimal solution to the + Held-Karp relaxation for use in the Asadpour algorithm. + + If an integral solution is found, then that is an optimal solution for + the ATSP problem and that is returned instead. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + .. [2] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + import numpy as np + from scipy import optimize + + def k_pi(): + """ + Find the set of minimum 1-Arborescences for G at point pi. + + Returns + ------- + Set + The set of minimum 1-Arborescences + """ + # Create a copy of G without vertex 1. + G_1 = G.copy() + minimum_1_arborescences = set() + minimum_1_arborescence_weight = math.inf + + # node is node '1' in the Held and Karp paper + n = next(G.__iter__()) + G_1.remove_node(n) + + # Iterate over the spanning arborescences of the graph until we know + # that we have found the minimum 1-arborescences. My proposed strategy + # is to find the most extensive root to connect to from 'node 1' and + # the least expensive one. We then iterate over arborescences until + # the cost of the basic arborescence is the cost of the minimum one + # plus the difference between the most and least expensive roots, + # that way the cost of connecting 'node 1' will by definition not by + # minimum + min_root = {"node": None, weight: math.inf} + max_root = {"node": None, weight: -math.inf} + for u, v, d in G.edges(n, data=True): + if d[weight] < min_root[weight]: + min_root = {"node": v, weight: d[weight]} + if d[weight] > max_root[weight]: + max_root = {"node": v, weight: d[weight]} + + min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight]) + min_root[weight] = min_root[weight] + min_in_edge[2][weight] + max_root[weight] = max_root[weight] + min_in_edge[2][weight] + + min_arb_weight = math.inf + for arb in nx.ArborescenceIterator(G_1): + arb_weight = arb.size(weight) + if min_arb_weight == math.inf: + min_arb_weight = arb_weight + elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]: + break + # We have to pick the root node of the arborescence for the out + # edge of the first vertex as that is the only node without an + # edge directed into it. + for N, deg in arb.in_degree: + if deg == 0: + # root found + arb.add_edge(n, N, **{weight: G[n][N][weight]}) + arb_weight += G[n][N][weight] + break + + # We can pick the minimum weight in-edge for the vertex with + # a cycle. If there are multiple edges with the same, minimum + # weight, We need to add all of them. + # + # Delete the edge (N, v) so that we cannot pick it. + edge_data = G[N][n] + G.remove_edge(N, n) + min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2] + min_edges = [ + (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight + ] + for u, v, d in min_edges: + new_arb = arb.copy() + new_arb.add_edge(u, v, **{weight: d}) + new_arb_weight = arb_weight + d + # Check to see the weight of the arborescence, if it is a + # new minimum, clear all of the old potential minimum + # 1-arborescences and add this is the only one. If its + # weight is above the known minimum, do not add it. + if new_arb_weight < minimum_1_arborescence_weight: + minimum_1_arborescences.clear() + minimum_1_arborescence_weight = new_arb_weight + # We have a 1-arborescence, add it to the set + if new_arb_weight == minimum_1_arborescence_weight: + minimum_1_arborescences.add(new_arb) + G.add_edge(N, n, **edge_data) + + return minimum_1_arborescences + + def direction_of_ascent(): + """ + Find the direction of ascent at point pi. + + See [1]_ for more information. + + Returns + ------- + dict + A mapping from the nodes of the graph which represents the direction + of ascent. + + References + ---------- + .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + # 1. Set d equal to the zero n-vector. + d = {} + for n in G: + d[n] = 0 + del n + # 2. Find a 1-Arborescence T^k such that k is in K(pi, d). + minimum_1_arborescences = k_pi() + while True: + # Reduce K(pi) to K(pi, d) + # Find the arborescence in K(pi) which increases the lest in + # direction d + min_k_d_weight = math.inf + min_k_d = None + for arborescence in minimum_1_arborescences: + weighted_cost = 0 + for n, deg in arborescence.degree: + weighted_cost += d[n] * (deg - 2) + if weighted_cost < min_k_d_weight: + min_k_d_weight = weighted_cost + min_k_d = arborescence + + # 3. If sum of d_i * v_{i, k} is greater than zero, terminate + if min_k_d_weight > 0: + return d, min_k_d + # 4. d_i = d_i + v_{i, k} + for n, deg in min_k_d.degree: + d[n] += deg - 2 + # Check that we do not need to terminate because the direction + # of ascent does not exist. This is done with linear + # programming. + c = np.full(len(minimum_1_arborescences), -1, dtype=int) + a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int) + b_eq = np.zeros(len(G) + 1, dtype=int) + b_eq[len(G)] = 1 + for arb_count, arborescence in enumerate(minimum_1_arborescences): + n_count = len(G) - 1 + for n, deg in arborescence.degree: + a_eq[n_count][arb_count] = deg - 2 + n_count -= 1 + a_eq[len(G)][arb_count] = 1 + program_result = optimize.linprog(c, A_eq=a_eq, b_eq=b_eq) + # If the constants exist, then the direction of ascent doesn't + if program_result.success: + # There is no direction of ascent + return None, minimum_1_arborescences + + # 5. GO TO 2 + + def find_epsilon(k, d): + """ + Given the direction of ascent at pi, find the maximum distance we can go + in that direction. + + Parameters + ---------- + k_xy : set + The set of 1-arborescences which have the minimum rate of increase + in the direction of ascent + + d : dict + The direction of ascent + + Returns + ------- + float + The distance we can travel in direction `d` + """ + min_epsilon = math.inf + for e_u, e_v, e_w in G.edges(data=weight): + if (e_u, e_v) in k.edges: + continue + # Now, I have found a condition which MUST be true for the edges to + # be a valid substitute. The edge in the graph which is the + # substitute is the one with the same terminated end. This can be + # checked rather simply. + # + # Find the edge within k which is the substitute. Because k is a + # 1-arborescence, we know that they is only one such edges + # leading into every vertex. + if len(k.in_edges(e_v, data=weight)) > 1: + raise Exception + sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__()) + k.add_edge(e_u, e_v, **{weight: e_w}) + k.remove_edge(sub_u, sub_v) + if ( + max(d for n, d in k.in_degree()) <= 1 + and len(G) == k.number_of_edges() + and nx.is_weakly_connected(k) + ): + # Ascent method calculation + if d[sub_u] == d[e_u] or sub_w == e_w: + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + continue + epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u]) + if 0 < epsilon < min_epsilon: + min_epsilon = epsilon + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + + return min_epsilon + + # I have to know that the elements in pi correspond to the correct elements + # in the direction of ascent, even if the node labels are not integers. + # Thus, I will use dictionaries to made that mapping. + pi_dict = {} + for n in G: + pi_dict[n] = 0 + del n + original_edge_weights = {} + for u, v, d in G.edges(data=True): + original_edge_weights[(u, v)] = d[weight] + dir_ascent, k_d = direction_of_ascent() + while dir_ascent is not None: + max_distance = find_epsilon(k_d, dir_ascent) + for n, v in dir_ascent.items(): + pi_dict[n] += max_distance * v + for u, v, d in G.edges(data=True): + d[weight] = original_edge_weights[(u, v)] + pi_dict[u] + dir_ascent, k_d = direction_of_ascent() + # k_d is no longer an individual 1-arborescence but rather a set of + # minimal 1-arborescences at the maximum point of the polytope and should + # be reflected as such + k_max = k_d + + # Search for a cycle within k_max. If a cycle exists, return it as the + # solution + for k in k_max: + if len([n for n in k if k.degree(n) == 2]) == G.order(): + # Tour found + return k.size(weight), k + + # Write the original edge weights back to G and every member of k_max at + # the maximum point. Also average the number of times that edge appears in + # the set of minimal 1-arborescences. + x_star = {} + size_k_max = len(k_max) + for u, v, d in G.edges(data=True): + edge_count = 0 + d[weight] = original_edge_weights[(u, v)] + for k in k_max: + if (u, v) in k.edges(): + edge_count += 1 + k[u][v][weight] = original_edge_weights[(u, v)] + x_star[(u, v)] = edge_count / size_k_max + # Now symmetrize the edges in x_star and scale them according to (5) in + # reference [1] + z_star = {} + scale_factor = (G.order() - 1) / G.order() + for u, v in x_star: + frequency = x_star[(u, v)] + x_star[(v, u)] + if frequency > 0: + z_star[(u, v)] = scale_factor * frequency + del x_star + # Return the optimal weight and the z dict + return next(k_max.__iter__()).size(weight), z_star + + +@nx._dispatch +def spanning_tree_distribution(G, z): + """ + Find the asadpour exponential distribution of spanning trees. + + Solves the Maximum Entropy Convex Program in the Asadpour algorithm [1]_ + using the approach in section 7 to build an exponential distribution of + undirected spanning trees. + + This algorithm ensures that the probability of any edge in a spanning + tree is proportional to the sum of the probabilities of the tress + containing that edge over the sum of the probabilities of all spanning + trees of the graph. + + Parameters + ---------- + G : nx.MultiGraph + The undirected support graph for the Held Karp relaxation + + z : dict + The output of `held_karp_ascent()`, a scaled version of the Held-Karp + solution. + + Returns + ------- + gamma : dict + The probability distribution which approximately preserves the marginal + probabilities of `z`. + """ + from math import exp + from math import log as ln + + def q(e): + """ + The value of q(e) is described in the Asadpour paper is "the + probability that edge e will be included in a spanning tree T that is + chosen with probability proportional to exp(gamma(T))" which + basically means that it is the total probability of the edge appearing + across the whole distribution. + + Parameters + ---------- + e : tuple + The `(u, v)` tuple describing the edge we are interested in + + Returns + ------- + float + The probability that a spanning tree chosen according to the + current values of gamma will include edge `e`. + """ + # Create the laplacian matrices + for u, v, d in G.edges(data=True): + d[lambda_key] = exp(gamma[(u, v)]) + G_Kirchhoff = nx.total_spanning_tree_weight(G, lambda_key) + G_e = nx.contracted_edge(G, e, self_loops=False) + G_e_Kirchhoff = nx.total_spanning_tree_weight(G_e, lambda_key) + + # Multiply by the weight of the contracted edge since it is not included + # in the total weight of the contracted graph. + return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff + + # initialize gamma to the zero dict + gamma = {} + for u, v, _ in G.edges: + gamma[(u, v)] = 0 + + # set epsilon + EPSILON = 0.2 + + # pick an edge attribute name that is unlikely to be in the graph + lambda_key = "spanning_tree_distribution's secret attribute name for lambda" + + while True: + # We need to know that know that no values of q_e are greater than + # (1 + epsilon) * z_e, however changing one gamma value can increase the + # value of a different q_e, so we have to complete the for loop without + # changing anything for the condition to be meet + in_range_count = 0 + # Search for an edge with q_e > (1 + epsilon) * z_e + for u, v in gamma: + e = (u, v) + q_e = q(e) + z_e = z[e] + if q_e > (1 + EPSILON) * z_e: + delta = ln( + (q_e * (1 - (1 + EPSILON / 2) * z_e)) + / ((1 - q_e) * (1 + EPSILON / 2) * z_e) + ) + gamma[e] -= delta + # Check that delta had the desired effect + new_q_e = q(e) + desired_q_e = (1 + EPSILON / 2) * z_e + if round(new_q_e, 8) != round(desired_q_e, 8): + raise nx.NetworkXError( + f"Unable to modify probability for edge ({u}, {v})" + ) + else: + in_range_count += 1 + # Check if the for loop terminated without changing any gamma + if in_range_count == len(gamma): + break + + # Remove the new edge attributes + for _, _, d in G.edges(data=True): + if lambda_key in d: + del d[lambda_key] + + return gamma + + +@nx._dispatch(edge_attrs="weight") +def greedy_tsp(G, weight="weight", source=None): + """Return a low cost cycle starting at `source` and its cost. + + This approximates a solution to the traveling salesman problem. + It finds a cycle of all the nodes that a salesman can visit in order + to visit many nodes while minimizing total distance. + It uses a simple greedy algorithm. + In essence, this function returns a large cycle given a source point + for which the total cost of the cycle is minimized. + + Parameters + ---------- + G : Graph + The Graph should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete, the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.greedy_tsp(G, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + This implementation of a greedy algorithm is based on the following: + + - The algorithm adds a node to the solution at every iteration. + - The algorithm selects a node not already in the cycle whose connection + to the previous node adds the least cost to the cycle. + + A greedy algorithm does not always give the best solution. + However, it can construct a first feasible solution which can + be passed as a parameter to an iterative improvement algorithm such + as Simulated Annealing, or Threshold Accepting. + + Time complexity: It has a running time $O(|V|^2)$ + """ + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if source is None: + source = nx.utils.arbitrary_element(G) + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + nodeset = set(G) + nodeset.remove(source) + cycle = [source] + next_node = source + while nodeset: + nbrdict = G[next_node] + next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1)) + cycle.append(next_node) + nodeset.remove(next_node) + cycle.append(cycle[0]) + return cycle + + +@py_random_state(9) +@nx._dispatch(edge_attrs="weight") +def simulated_annealing_tsp( + G, + init_cycle, + weight="weight", + source=None, + temp=100, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.01, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses simulated annealing to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, simulated + annealing perturbs that solution, occasionally accepting changes that make + the solution worse to escape from a locally optimal solution. The chance + of accepting such changes decreases over the iterations to encourage + an optimal result. In summary, the function returns a cycle starting + at `source` for which the total cost is minimized. It also returns the cost. + + The chance of accepting a proposed change is related to a parameter called + the temperature (annealing has a physical analogue of steel hardening + as it cools). As the temperature is reduced, the chance of moves that + increase cost goes down. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list of all nodes or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + temp : int, optional (default=100) + The algorithm's temperature parameter. It represents the initial + value of temperature + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.01) + Percentage of temperature decrease in each iteration + of outer loop + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.simulated_annealing_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.simulated_annealing_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Simulated Annealing is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. If not provided, it is + constructed by a simple greedy algorithm. At every iteration, the + algorithm selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of a + neighbor solution. + If $c(x') - c(x) <= 0$ then the neighbor solution becomes the current + solution for the next iteration. Otherwise, the algorithm accepts + the neighbor solution with probability $p = exp - ([c(x') - c(x)] / temp)$. + Otherwise the current solution is retained. + + `temp` is a parameter of the algorithm and represents temperature. + + Time complexity: + For $N_i$ iterations of the inner loop and $N_o$ iterations of the + outer loop, this algorithm has running time $O(N_i * N_o * |V|)$. + + For more information and how the algorithm is inspired see: + http://en.wikipedia.org/wiki/Simulated_annealing + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations and temp > 0: + count += 1 + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= 0: + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + else: + # Accept even a worse solution with probability p. + p = math.exp(-delta / temp) + if p >= seed.random(): + cycle = adj_sol + cost = adj_cost + temp -= temp * alpha + + return best_cycle + + +@py_random_state(9) +@nx._dispatch(edge_attrs="weight") +def threshold_accepting_tsp( + G, + init_cycle, + weight="weight", + source=None, + threshold=1, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.1, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses threshold accepting methods to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, threshold + accepting methods perturb that solution, accepting any changes that make + the solution no worse than increasing by a threshold amount. Improvements + in cost are accepted, but so are changes leading to small increases in cost. + This allows the solution to leave suboptimal local minima in solution space. + The threshold is decreased slowly as iterations proceed helping to ensure + an optimum. In summary, the function returns a cycle starting at `source` + for which the total cost is minimized. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + threshold : int, optional (default=1) + The algorithm's threshold parameter. It represents the initial + threshold's value + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.1) + Percentage of threshold decrease when there is at + least one acceptance of a neighbor solution. + If no inner loop moves are accepted the threshold remains unchanged. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.threshold_accepting_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.threshold_accepting_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Threshold Accepting is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. This solution can be + constructed by a simple greedy algorithm. At every iteration, it + selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of + neighbor solution. + If $c(x') - c(x) <= threshold$ then the neighbor solution becomes the current + solution for the next iteration, where the threshold is named threshold. + + In comparison to the Simulated Annealing algorithm, the Threshold + Accepting algorithm does not accept very low quality solutions + (due to the presence of the threshold value). In the case of + Simulated Annealing, even a very low quality solution can + be accepted with probability $p$. + + Time complexity: + It has a running time $O(m * n * |V|)$ where $m$ and $n$ are the number + of times the outer and inner loop run respectively. + + For more information and how algorithm is inspired see: + https://doi.org/10.1016/0021-9991(90)90201-B + + See Also + -------- + simulated_annealing_tsp + + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle is not all and only nodes.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = list(G.neighbors(source))[0] + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations: + count += 1 + accepted = False + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= threshold: + accepted = True + + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + if accepted: + threshold -= threshold * alpha + + return best_cycle diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py new file mode 100644 index 0000000000000000000000000000000000000000..ce673b6eda43e5f1fd0ab862f96e7cf8e08b62f3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py @@ -0,0 +1,252 @@ +"""Functions for computing treewidth decomposition. + +Treewidth of an undirected graph is a number associated with the graph. +It can be defined as the size of the largest vertex set (bag) in a tree +decomposition of the graph minus one. + +`Wikipedia: Treewidth `_ + +The notions of treewidth and tree decomposition have gained their +attractiveness partly because many graph and network problems that are +intractable (e.g., NP-hard) on arbitrary graphs become efficiently +solvable (e.g., with a linear time algorithm) when the treewidth of the +input graphs is bounded by a constant [1]_ [2]_. + +There are two different functions for computing a tree decomposition: +:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`. + +.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth + computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275. + http://dx.doi.org/10.1016/j.ic.2009.03.008 + +.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information + and Computing Sciences, Utrecht University. + Technical Report UU-CS-2005-018. + http://www.cs.uu.nl + +.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*. + https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf + +""" + +import itertools +import sys +from heapq import heapify, heappop, heappush + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def treewidth_min_degree(G): + """Returns a treewidth decomposition using the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree, i.e., first + the node with the lowest degree is chosen, then the graph is updated + and the corresponding node is removed. Next, a new node with the lowest + degree is chosen, and so on. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + deg_heuristic = MinDegreeHeuristic(G) + return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph)) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def treewidth_min_fill_in(G): + """Returns a treewidth decomposition using the Minimum Fill-in heuristic. + + The heuristic chooses a node from the graph, where the number of edges + added turning the neighbourhood of the chosen node into clique is as + small as possible. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + return treewidth_decomp(G, min_fill_in_heuristic) + + +class MinDegreeHeuristic: + """Implements the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree + (number of neighbours), i.e., first the node with the lowest degree is + chosen, then the graph is updated and the corresponding node is + removed. Next, a new node with the lowest degree is chosen, and so on. + """ + + def __init__(self, graph): + self._graph = graph + + # nodes that have to be updated in the heap before each iteration + self._update_nodes = [] + + self._degreeq = [] # a heapq with 3-tuples (degree,unique_id,node) + self.count = itertools.count() + + # build heap with initial degrees + for n in graph: + self._degreeq.append((len(graph[n]), next(self.count), n)) + heapify(self._degreeq) + + def best_node(self, graph): + # update nodes in self._update_nodes + for n in self._update_nodes: + # insert changed degrees into degreeq + heappush(self._degreeq, (len(graph[n]), next(self.count), n)) + + # get the next valid (minimum degree) node + while self._degreeq: + (min_degree, _, elim_node) = heappop(self._degreeq) + if elim_node not in graph or len(graph[elim_node]) != min_degree: + # outdated entry in degreeq + continue + elif min_degree == len(graph) - 1: + # fully connected: abort condition + return None + + # remember to update nodes in the heap before getting the next node + self._update_nodes = graph[elim_node] + return elim_node + + # the heap is empty: abort + return None + + +def min_fill_in_heuristic(graph): + """Implements the Minimum Degree heuristic. + + Returns the node from the graph, where the number of edges added when + turning the neighbourhood of the chosen node into clique is as small as + possible. This algorithm chooses the nodes using the Minimum Fill-In + heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses + additional constant memory.""" + + if len(graph) == 0: + return None + + min_fill_in_node = None + + min_fill_in = sys.maxsize + + # sort nodes by degree + nodes_by_degree = sorted(graph, key=lambda x: len(graph[x])) + min_degree = len(graph[nodes_by_degree[0]]) + + # abort condition (handle complete graph) + if min_degree == len(graph) - 1: + return None + + for node in nodes_by_degree: + num_fill_in = 0 + nbrs = graph[node] + for nbr in nbrs: + # count how many nodes in nbrs current nbr is not connected to + # subtract 1 for the node itself + num_fill_in += len(nbrs - graph[nbr]) - 1 + if num_fill_in >= 2 * min_fill_in: + break + + num_fill_in /= 2 # divide by 2 because of double counting + + if num_fill_in < min_fill_in: # update min-fill-in node + if num_fill_in == 0: + return node + min_fill_in = num_fill_in + min_fill_in_node = node + + return min_fill_in_node + + +@nx._dispatch +def treewidth_decomp(G, heuristic=min_fill_in_heuristic): + """Returns a treewidth decomposition using the passed heuristic. + + Parameters + ---------- + G : NetworkX graph + heuristic : heuristic function + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + + # make dict-of-sets structure + graph = {n: set(G[n]) - {n} for n in G} + + # stack containing nodes and neighbors in the order from the heuristic + node_stack = [] + + # get first node from heuristic + elim_node = heuristic(graph) + while elim_node is not None: + # connect all neighbours with each other + nbrs = graph[elim_node] + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + # push node and its current neighbors on stack + node_stack.append((elim_node, nbrs)) + + # remove node from graph + for u in graph[elim_node]: + graph[u].remove(elim_node) + + del graph[elim_node] + elim_node = heuristic(graph) + + # the abort condition is met; put all remaining nodes into one bag + decomp = nx.Graph() + first_bag = frozenset(graph.keys()) + decomp.add_node(first_bag) + + treewidth = len(first_bag) - 1 + + while node_stack: + # get node and its neighbors from the stack + (curr_node, nbrs) = node_stack.pop() + + # find a bag all neighbors are in + old_bag = None + for bag in decomp.nodes: + if nbrs <= bag: + old_bag = bag + break + + if old_bag is None: + # no old_bag was found: just connect to the first_bag + old_bag = first_bag + + # create new node for decomposition + nbrs.add(curr_node) + new_bag = frozenset(nbrs) + + # update treewidth + treewidth = max(treewidth, len(new_bag) - 1) + + # add edge to decomposition (implicitly also adds the new node) + decomp.add_edge(old_bag, new_bag) + + return treewidth, decomp diff --git a/phivenv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py b/phivenv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd7a123d02009e9fb825c512289b762614494f6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py @@ -0,0 +1,82 @@ +"""Functions for computing an approximate minimum weight vertex cover. + +A |vertex cover|_ is a subset of nodes such that each edge in the graph +is incident to at least one node in the subset. + +.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover +.. |vertex cover| replace:: *vertex cover* + +""" +import networkx as nx + +__all__ = ["min_weighted_vertex_cover"] + + +@nx._dispatch(node_attrs="weight") +def min_weighted_vertex_cover(G, weight=None): + r"""Returns an approximate minimum weighted vertex cover. + + The set of nodes returned by this function is guaranteed to be a + vertex cover, and the total weight of the set is guaranteed to be at + most twice the total weight of the minimum weight vertex cover. In + other words, + + .. math:: + + w(S) \leq 2 * w(S^*), + + where $S$ is the vertex cover returned by this function, + $S^*$ is the vertex cover of minimum weight out of all vertex + covers of the graph, and $w$ is the function that computes the + sum of the weights of each node in that given set. + + Parameters + ---------- + G : NetworkX graph + + weight : string, optional (default = None) + If None, every node has weight 1. If a string, use this node + attribute as the node weight. A node without this attribute is + assumed to have weight 1. + + Returns + ------- + min_weighted_cover : set + Returns a set of nodes whose weight sum is no more than twice + the weight sum of the minimum weight vertex cover. + + Notes + ----- + For a directed graph, a vertex cover has the same definition: a set + of nodes such that each edge in the graph is incident to at least + one node in the set. Whether the node is the head or tail of the + directed edge is ignored. + + This is the local-ratio algorithm for computing an approximate + vertex cover. The algorithm greedily reduces the costs over edges, + iteratively building a cover. The worst-case runtime of this + implementation is $O(m \log n)$, where $n$ is the number + of nodes and $m$ the number of edges in the graph. + + References + ---------- + .. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for + approximating the weighted vertex cover problem." + *Annals of Discrete Mathematics*, 25, 27–46 + + + """ + cost = dict(G.nodes(data=weight, default=1)) + # While there are uncovered edges, choose an uncovered and update + # the cost of the remaining edges. + cover = set() + for u, v in G.edges(): + if u in cover or v in cover: + continue + if cost[u] <= cost[v]: + cover.add(u) + cost[v] -= cost[u] + else: + cover.add(v) + cost[u] -= cost[v] + return cover diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9888609cbc43d4ba2121fcd0feda0985d1aebd --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.assortativity.connectivity import * +from networkx.algorithms.assortativity.correlation import * +from networkx.algorithms.assortativity.mixing import * +from networkx.algorithms.assortativity.neighbor_degree import * +from networkx.algorithms.assortativity.pairs import * diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e69749386740256b5e637447ea734878046f9b9b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ee4e4f7e72a97351183d7d7981c7b7a7281cf3e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0438991e93543dc34975c158701bf2d40eccb6bc Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a182849edfde4407b7b226e6b092ea6911544447 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83d5c38c85e908ffc2d8d7e080311616cde97072 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99f53994939c22b1db9ee282f56ec7b2fbf7bb29 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..bd433ded595711ed526b99f598a9663223bf2555 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py @@ -0,0 +1,122 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["average_degree_connectivity"] + + +@nx._dispatch(edge_attrs="weight") +def average_degree_connectivity( + G, source="in+out", target="in+out", nodes=None, weight=None +): + r"""Compute the average degree connectivity of graph. + + The average degree connectivity is the average nearest neighbor degree of + nodes with degree k. For weighted graphs, an analogous measure can + be computed using the weighted average neighbors degree defined in + [1]_, for a node `i`, as + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, + `w_{ij}` is the weight of the edge that links `i` and `j`, + and `N(i)` are the neighbors of node `i`. + + Parameters + ---------- + G : NetworkX graph + + source : "in"|"out"|"in+out" (default:"in+out") + Directed graphs only. Use "in"- or "out"-degree for source node. + + target : "in"|"out"|"in+out" (default:"in+out" + Directed graphs only. Use "in"- or "out"-degree for target node. + + nodes : list or iterable (optional) + Compute neighbor connectivity for these nodes. The default is all + nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d : dict + A dictionary keyed by degree k with the value of average connectivity. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', + 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[1, 2]["weight"] = 3 + >>> nx.average_degree_connectivity(G) + {1: 2.0, 2: 1.5} + >>> nx.average_degree_connectivity(G, weight="weight") + {1: 2.0, 2: 1.75} + + See Also + -------- + average_neighbor_degree + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + # First, determine the type of neighbors and the type of degree to use. + if G.is_directed(): + if source not in ("in", "out", "in+out"): + raise nx.NetworkXError('source must be one of "in", "out", or "in+out"') + if target not in ("in", "out", "in+out"): + raise nx.NetworkXError('target must be one of "in", "out", or "in+out"') + direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree} + neighbor_funcs = { + "out": G.successors, + "in": G.predecessors, + "in+out": G.neighbors, + } + source_degree = direction[source] + target_degree = direction[target] + neighbors = neighbor_funcs[source] + # `reverse` indicates whether to look at the in-edge when + # computing the weight of an edge. + reverse = source == "in" + else: + if source != "in+out" or target != "in+out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = G.degree + target_degree = G.degree + neighbors = G.neighbors + reverse = False + dsum = defaultdict(int) + dnorm = defaultdict(int) + # Check if `source_nodes` is actually a single node in the graph. + source_nodes = source_degree(nodes) + if nodes in G: + source_nodes = [(nodes, source_degree(nodes))] + for n, k in source_nodes: + nbrdeg = target_degree(neighbors(n)) + if weight is None: + s = sum(d for n, d in nbrdeg) + else: # weight nbr degree by weight of (n,nbr) edge + if reverse: + s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg) + else: + s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg) + dnorm[k] += source_degree(n, weight=weight) + dsum[k] += s + + # normalize + return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..35ea78d6d523a38937c8fcfa6e8f0951f8194a23 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py @@ -0,0 +1,302 @@ +"""Node assortativity coefficients and correlation measures. +""" +import networkx as nx +from networkx.algorithms.assortativity.mixing import ( + attribute_mixing_matrix, + degree_mixing_matrix, +) +from networkx.algorithms.assortativity.pairs import node_degree_xy + +__all__ = [ + "degree_pearson_correlation_coefficient", + "degree_assortativity_coefficient", + "attribute_assortativity_coefficient", + "numeric_assortativity_coefficient", +] + + +@nx._dispatch(edge_attrs="weight") +def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute degree assortativity only for nodes in container. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_assortativity_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + See Also + -------- + attribute_assortativity_coefficient + numeric_assortativity_coefficient + degree_mixing_dict + degree_mixing_matrix + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , where e is the joint + probability distribution (mixing matrix) of the degrees. If G is + directed than the matrix e is the joint probability of the + user-specified degree type for the source and target. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + if nodes is None: + nodes = G.nodes + + degrees = None + + if G.is_directed(): + indeg = ( + {d for _, d in G.in_degree(nodes, weight=weight)} + if "in" in (x, y) + else set() + ) + outdeg = ( + {d for _, d in G.out_degree(nodes, weight=weight)} + if "out" in (x, y) + else set() + ) + degrees = set.union(indeg, outdeg) + else: + degrees = {d for _, d in G.degree(nodes, weight=weight)} + + mapping = {d: i for i, d, in enumerate(degrees)} + M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping) + + return _numeric_ac(M, mapping=mapping) + + +@nx._dispatch(edge_attrs="weight") +def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + This is the same as degree_assortativity_coefficient but uses the + potentially faster scipy.stats.pearsonr function. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute pearson correlation of degrees only for specified nodes. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_pearson_correlation_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + Notes + ----- + This calls scipy.stats.pearsonr. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + import scipy as sp + + xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + x, y = zip(*xy) + return sp.stats.pearsonr(x, y)[0] + + +@nx._dispatch(node_attrs="attribute") +def attribute_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key + + nodes: list or iterable (optional) + Compute attribute assortativity for nodes in container. + The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.attribute_assortativity_coefficient(G, "color")) + 1.0 + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)), + where M is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + M = attribute_mixing_matrix(G, attribute, nodes) + return attribute_ac(M) + + +@nx._dispatch(node_attrs="attribute") +def numeric_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for numerical node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given numeric attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Compute numeric assortativity only for attributes of nodes in + container. The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], size=2) + >>> G.add_nodes_from([2, 3], size=3) + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.numeric_assortativity_coefficient(G, "size")) + 1.0 + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation + coefficient of the specified (scalar valued) attribute across edges. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + """ + if nodes is None: + nodes = G.nodes + vals = {G.nodes[n][attribute] for n in nodes} + mapping = {d: i for i, d, in enumerate(vals)} + M = attribute_mixing_matrix(G, attribute, nodes, mapping) + return _numeric_ac(M, mapping) + + +def attribute_ac(M): + """Compute assortativity for attribute matrix M. + + Parameters + ---------- + M : numpy.ndarray + 2D ndarray representing the attribute mixing matrix. + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)), + where e is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + if M.sum() != 1.0: + M = M / M.sum() + s = (M @ M).sum() + t = M.trace() + r = (t - s) / (1 - s) + return r + + +def _numeric_ac(M, mapping): + # M is a 2D numpy array + # numeric assortativity coefficient, pearsonr + import numpy as np + + if M.sum() != 1.0: + M = M / M.sum() + x = np.array(list(mapping.keys())) + y = x # x and y have the same support + idx = list(mapping.values()) + a = M.sum(axis=0) + b = M.sum(axis=1) + vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2 + xy = np.outer(x, y) + ab = np.outer(a[idx], b[idx]) + return (xy * (M - ab)).sum() / np.sqrt(vara * varb) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..66b98797e69f473507347ea412cea01501426889 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py @@ -0,0 +1,250 @@ +""" +Mixing matrices for node attributes and degree. +""" +import networkx as nx +from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy +from networkx.utils import dict_to_numpy_array + +__all__ = [ + "attribute_mixing_matrix", + "attribute_mixing_dict", + "degree_mixing_matrix", + "degree_mixing_dict", + "mixing_dict", +] + + +@nx._dispatch(node_attrs="attribute") +def attribute_mixing_dict(G, attribute, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Unse nodes in container to build the dict. The default is all nodes. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edge(1, 3) + >>> d = nx.attribute_mixing_dict(G, "color") + >>> print(d["red"]["blue"]) + 1 + >>> print(d["blue"]["red"]) # d symmetric for undirected graphs + 1 + + Returns + ------- + d : dictionary + Counts or joint probability of occurrence of attribute pairs. + """ + xy_iter = node_attribute_xy(G, attribute, nodes) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatch(node_attrs="attribute") +def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Use only nodes in container to build the matrix. The default is + all nodes. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + Returns + ------- + m: numpy array + Counts or joint probability of occurrence of attribute pairs. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Definitions of attribute mixing matrix vary on whether the matrix + should include rows for attribute values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> gender = {0: 'male', 1: 'female', 2: 'female'} + >>> nx.set_node_attributes(G, gender, 'gender') + >>> mapping = {'male': 0, 'female': 1} + >>> mix_mat = nx.attribute_mixing_matrix(G, 'gender', mapping=mapping) + >>> # mixing from male nodes to female nodes + >>> mix_mat[mapping['male'], mapping['female']] + 0.25 + """ + d = attribute_mixing_dict(G, attribute, nodes) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +@nx._dispatch(edge_attrs="weight") +def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for degree. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or joint probability of occurrence of degree pairs. + """ + xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatch(edge_attrs="weight") +def degree_mixing_matrix( + G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None +): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + nodes: list or iterable (optional) + Build the matrix using only nodes in container. + The default is all nodes. + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node degree to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Returns + ------- + m: numpy array + Counts, or joint probability, of occurrence of node degree. + + Notes + ----- + Definitions of degree mixing matrix vary on whether the matrix + should include rows for degree values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. See examples. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> mix_mat = nx.degree_mixing_matrix(G) + >>> mix_mat[0, 1] # mixing from node degree 1 to node degree 3 + 0.5 + + If you want every possible degree to appear as a row, even if no nodes + have that degree, use `mapping` as follows, + + >>> max_degree = max(deg for n, deg in G.degree) + >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping + >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping) + >>> mix_mat[3, 1] # mixing from node degree 3 to node degree 1 + 0.5 + """ + d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def mixing_dict(xy, normalized=False): + """Returns a dictionary representation of mixing matrix. + + Parameters + ---------- + xy : list or container of two-tuples + Pairs of (x,y) items. + + attribute : string + Node attribute key + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or Joint probability of occurrence of values in xy. + """ + d = {} + psum = 0.0 + for x, y in xy: + if x not in d: + d[x] = {} + if y not in d: + d[y] = {} + v = d[x].get(y, 0) + d[x][y] = v + 1 + psum += 1 + + if normalized: + for _, jdict in d.items(): + for j in jdict: + jdict[j] /= psum + return d diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..a8980da766f1e63e06990b35a3b403df5486cd50 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,160 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +@nx._dispatch(edge_attrs="weight") +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..a3580d40324619c506146011f07e61f6f9b67ea0 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py @@ -0,0 +1,118 @@ +"""Generators of x-y pairs of node data.""" +import networkx as nx + +__all__ = ["node_attribute_xy", "node_degree_xy"] + + +@nx._dispatch(node_attrs="attribute") +def node_attribute_xy(G, attribute, nodes=None): + """Returns iterator of node-attribute pairs for all edges in G. + + Parameters + ---------- + G: NetworkX graph + + attribute: key + The node attribute key. + + nodes: list or iterable (optional) + Use only edges that are incident to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (attribute, attribute) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node(1, color="red") + >>> G.add_node(2, color="blue") + >>> G.add_edge(1, 2) + >>> list(nx.node_attribute_xy(G, "color")) + [('red', 'blue')] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + if nodes is None: + nodes = set(G) + else: + nodes = set(nodes) + Gnodes = G.nodes + for u, nbrsdict in G.adjacency(): + if u not in nodes: + continue + uattr = Gnodes[u].get(attribute, None) + if G.is_multigraph(): + for v, keys in nbrsdict.items(): + vattr = Gnodes[v].get(attribute, None) + for _ in keys: + yield (uattr, vattr) + else: + for v in nbrsdict: + vattr = Gnodes[v].get(attribute, None) + yield (uattr, vattr) + + +@nx._dispatch(edge_attrs="weight") +def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): + """Generate node degree-degree pairs for edges in G. + + Parameters + ---------- + G: NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Use only edges that are adjacency to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (degree, degree) values. + + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> list(nx.node_degree_xy(G, x="out", y="in")) + [(1, 1)] + >>> list(nx.node_degree_xy(G, x="in", y="out")) + [(0, 0)] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + nodes = set(G) if nodes is None else set(nodes) + if G.is_directed(): + direction = {"out": G.out_degree, "in": G.in_degree} + xdeg = direction[x] + ydeg = direction[y] + else: + xdeg = ydeg = G.degree + + for u, degu in xdeg(nodes, weight=weight): + # use G.edges to treat multigraphs correctly + neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) + for _, degv in ydeg(neighbors, weight=weight): + yield degu, degv diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94af528fdd21bbe0b7f2eaf70b37fb4872d27bbe Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55860643b7b7595069d0ac021711ff90e75c505c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c644217480231a73474f3bbab24c1d19b59599b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719ccaf7730202fd28d0343ca69f8de6d71eed02 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd7d35500ca2ca9a5b928c7cdb4decde8c42491 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23c3ac32782afae807dbeeac7469ea31bc2a21b2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8cdc349972c8c7d2197f0f3ffb9d8ca1271855c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46d6300649d3b4658a7263cad04354988b4da312 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py @@ -0,0 +1,81 @@ +import networkx as nx + + +class BaseTestAttributeMixing: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_nodes_from([0, 1], fish="one") + G.add_nodes_from([2, 3], fish="two") + G.add_nodes_from([4], fish="red") + G.add_nodes_from([5], fish="blue") + G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.G = G + + D = nx.DiGraph() + D.add_nodes_from([0, 1], fish="one") + D.add_nodes_from([2, 3], fish="two") + D.add_nodes_from([4], fish="red") + D.add_nodes_from([5], fish="blue") + D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.D = D + + M = nx.MultiGraph() + M.add_nodes_from([0, 1], fish="one") + M.add_nodes_from([2, 3], fish="two") + M.add_nodes_from([4], fish="red") + M.add_nodes_from([5], fish="blue") + M.add_edges_from([(0, 1), (0, 1), (2, 3)]) + cls.M = M + + S = nx.Graph() + S.add_nodes_from([0, 1], fish="one") + S.add_nodes_from([2, 3], fish="two") + S.add_nodes_from([4], fish="red") + S.add_nodes_from([5], fish="blue") + S.add_edge(0, 0) + S.add_edge(2, 2) + cls.S = S + + N = nx.Graph() + N.add_nodes_from([0, 1], margin=-2) + N.add_nodes_from([2, 3], margin=-2) + N.add_nodes_from([4], margin=-3) + N.add_nodes_from([5], margin=-4) + N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.N = N + + F = nx.Graph() + F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + F.add_edge(0, 2, weight=1) + nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin") + cls.F = F + + K = nx.Graph() + K.add_nodes_from([1, 2], margin=-1) + K.add_nodes_from([3], margin=1) + K.add_nodes_from([4], margin=2) + K.add_edges_from([(3, 4), (1, 2), (1, 3)]) + cls.K = K + + +class BaseTestDegreeMixing: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.D = nx.DiGraph() + cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) + cls.D2 = nx.DiGraph() + cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)]) + cls.M = nx.MultiGraph() + nx.add_path(cls.M, range(4)) + cls.M.add_edge(0, 1) + cls.S = nx.Graph() + cls.S.add_edges_from([(0, 0), (1, 1)]) + cls.W = nx.Graph() + cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + cls.W.add_edge(0, 2, weight=1) + S1 = nx.star_graph(4) + S2 = nx.star_graph(4) + cls.DS = nx.disjoint_union(S1, S2) + cls.DS.add_edge(4, 5) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..21c6287bbe6b0bfc9aa41201b593f342b2d3976e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -0,0 +1,143 @@ +from itertools import permutations + +import pytest + +import networkx as nx + + +class TestNeighborConnectivity: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.5} + nd = nx.average_degree_connectivity(D) + assert nd == answer + + answer = {1: 2.0, 2: 1.5} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="weight") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="weight") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, weight="weight", source="in", target="in" + ) + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, source="in", target="out", weight="weight" + ) + assert nd == answer + + def test_weight_keyword(self): + G = nx.path_graph(4) + G[1][2]["other"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="other") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G, weight=None) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="other") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_degree_connectivity(G)[5] + assert nd == 1.8 + nd = nx.average_degree_connectivity(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_zero_deg(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(1, 4) + c = nx.average_degree_connectivity(G) + assert c == {1: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="in", target="in") + assert c == {0: 0, 1: 0} + c = nx.average_degree_connectivity(G, source="in", target="out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="in", target="in+out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="out", target="out") + assert c == {0: 0, 3: 0} + c = nx.average_degree_connectivity(G, source="out", target="in") + assert c == {0: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="out", target="in+out") + assert c == {0: 0, 3: 1} + + def test_in_out_weight(self): + G = nx.DiGraph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(3, 1, weight=1) + for s, t in permutations(["in", "out", "in+out"], 2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight") + assert c == cw + + def test_invalid_source(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, source="bogus") + + def test_invalid_target(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, target="bogus") + + def test_invalid_undirected_graph(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, target="bogus") + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, source="bogus") + + def test_single_node(self): + # TODO Is this really the intended behavior for providing a + # single node as the argument `nodes`? Shouldn't the function + # just return the connectivity value itself? + G = nx.trivial_graph() + conn = nx.average_degree_connectivity(G, nodes=0) + assert conn == {0: 0} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..5203f9449fd022525b97a19cbe78498e33fb09a3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py @@ -0,0 +1,123 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx +from networkx.algorithms.assortativity.correlation import attribute_ac + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingCorrelation(BaseTestDegreeMixing): + def test_degree_assortativity_undirected(self): + r = nx.degree_assortativity_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_assortativity_node_kwargs(self): + G = nx.Graph() + edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)] + G.add_edges_from(edges) + r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4]) + np.testing.assert_almost_equal(r, -1.0, decimal=4) + + def test_degree_assortativity_directed(self): + r = nx.degree_assortativity_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_assortativity_directed2(self): + """Test degree assortativity for a directed graph where the set of + in/out degree does not equal the total degree.""" + r = nx.degree_assortativity_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_assortativity_multigraph(self): + r = nx.degree_assortativity_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_pearson_assortativity_undirected(self): + r = nx.degree_pearson_correlation_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_pearson_assortativity_directed(self): + r = nx.degree_pearson_correlation_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_pearson_assortativity_directed2(self): + """Test degree assortativity with Pearson for a directed graph where + the set of in/out degree does not equal the total degree.""" + r = nx.degree_pearson_correlation_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_pearson_assortativity_multigraph(self): + r = nx.degree_pearson_correlation_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_assortativity_weighted(self): + r = nx.degree_assortativity_coefficient(self.W, weight="weight") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_degree_assortativity_double_star(self): + r = nx.degree_assortativity_coefficient(self.DS) + np.testing.assert_almost_equal(r, -0.9339, decimal=4) + + +class TestAttributeMixingCorrelation(BaseTestAttributeMixing): + def test_attribute_assortativity_undirected(self): + r = nx.attribute_assortativity_coefficient(self.G, "fish") + assert r == 6.0 / 22.0 + + def test_attribute_assortativity_directed(self): + r = nx.attribute_assortativity_coefficient(self.D, "fish") + assert r == 1.0 / 3.0 + + def test_attribute_assortativity_multigraph(self): + r = nx.attribute_assortativity_coefficient(self.M, "fish") + assert r == 1.0 + + def test_attribute_assortativity_coefficient(self): + # from "Mixing patterns in networks" + # fmt: off + a = np.array([[0.258, 0.016, 0.035, 0.013], + [0.012, 0.157, 0.058, 0.019], + [0.013, 0.023, 0.306, 0.035], + [0.005, 0.007, 0.024, 0.016]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.623, decimal=3) + + def test_attribute_assortativity_coefficient2(self): + # fmt: off + a = np.array([[0.18, 0.02, 0.01, 0.03], + [0.02, 0.20, 0.03, 0.02], + [0.01, 0.03, 0.16, 0.01], + [0.03, 0.02, 0.01, 0.22]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.68, decimal=2) + + def test_attribute_assortativity(self): + a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]]) + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.029, decimal=3) + + def test_attribute_assortativity_negative(self): + r = nx.numeric_assortativity_coefficient(self.N, "margin") + np.testing.assert_almost_equal(r, -0.2903, decimal=4) + + def test_assortativity_node_kwargs(self): + G = nx.Graph() + G.add_nodes_from([0, 1], size=2) + G.add_nodes_from([2, 3], size=3) + G.add_edges_from([(0, 1), (2, 3)]) + r = nx.numeric_assortativity_coefficient(G, "size", nodes=[0, 3]) + np.testing.assert_almost_equal(r, 1.0, decimal=4) + + def test_attribute_assortativity_float(self): + r = nx.numeric_assortativity_coefficient(self.F, "margin") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_attribute_assortativity_mixed(self): + r = nx.numeric_assortativity_coefficient(self.K, "margin") + np.testing.assert_almost_equal(r, 0.4340, decimal=4) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..9af09867235b9092837b517ca542e8a85eb602ac --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py @@ -0,0 +1,176 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingDict(BaseTestDegreeMixing): + def test_degree_mixing_dict_undirected(self): + d = nx.degree_mixing_dict(self.P4) + d_result = {1: {2: 2}, 2: {1: 2, 2: 2}} + assert d == d_result + + def test_degree_mixing_dict_undirected_normalized(self): + d = nx.degree_mixing_dict(self.P4, normalized=True) + d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}} + assert d == d_result + + def test_degree_mixing_dict_directed(self): + d = nx.degree_mixing_dict(self.D) + print(d) + d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}} + assert d == d_result + + def test_degree_mixing_dict_multigraph(self): + d = nx.degree_mixing_dict(self.M) + d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}} + assert d == d_result + + def test_degree_mixing_dict_weighted(self): + d = nx.degree_mixing_dict(self.W, weight="weight") + d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}} + assert d == d_result + + +class TestDegreeMixingMatrix(BaseTestDegreeMixing): + def test_degree_mixing_matrix_undirected(self): + # fmt: off + a_result = np.array([[0, 2], + [2, 2]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.P4, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.P4) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_directed(self): + # fmt: off + a_result = np.array([[0, 0, 2], + [1, 0, 1], + [0, 0, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.D, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.D) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_multigraph(self): + # fmt: off + a_result = np.array([[0, 1, 0], + [1, 0, 3], + [0, 3, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.M, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.M) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_selfloop(self): + # fmt: off + a_result = np.array([[2]]) + # fmt: on + a = nx.degree_mixing_matrix(self.S, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.S) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_weighted(self): + a_result = np.array([[0.0, 1.0], [1.0, 6.0]]) + a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.W, weight="weight") + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_degree_mixing_matrix_mapping(self): + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + mapping = {0.5: 1, 1.5: 0} + a = nx.degree_mixing_matrix( + self.W, weight="weight", normalized=False, mapping=mapping + ) + np.testing.assert_equal(a, a_result) + + +class TestAttributeMixingDict(BaseTestAttributeMixing): + def test_attribute_mixing_dict_undirected(self): + d = nx.attribute_mixing_dict(self.G, "fish") + d_result = { + "one": {"one": 2, "red": 1}, + "two": {"two": 2, "blue": 1}, + "red": {"one": 1}, + "blue": {"two": 1}, + } + assert d == d_result + + def test_attribute_mixing_dict_directed(self): + d = nx.attribute_mixing_dict(self.D, "fish") + d_result = { + "one": {"one": 1, "red": 1}, + "two": {"two": 1, "blue": 1}, + "red": {}, + "blue": {}, + } + assert d == d_result + + def test_attribute_mixing_dict_multigraph(self): + d = nx.attribute_mixing_dict(self.M, "fish") + d_result = {"one": {"one": 4}, "two": {"two": 2}} + assert d == d_result + + +class TestAttributeMixingMatrix(BaseTestAttributeMixing): + def test_attribute_mixing_matrix_undirected(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.G, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_directed(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.D, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_multigraph(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.M, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_negative(self): + mapping = {-2: 0, -3: 1, -4: 2} + a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.N, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_attribute_mixing_matrix_float(self): + mapping = {0.5: 1, 1.5: 0} + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.F, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1252d532079d4de6de4659943ce008eb9018b3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py @@ -0,0 +1,108 @@ +import pytest + +import networkx as nx + + +class TestAverageNeighbor: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D) + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "out") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2} + nd = nx.average_neighbor_degree(G, weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + print(D.edges(data=True)) + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "out", "out", weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in", weight="weight") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "in", "out", weight="weight") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in", weight="weight") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, source="in+out", weight="weight") + assert nd == {0: 1.0, 1: 1.0, 2: 0.8, 3: 1.0} + nd = nx.average_neighbor_degree(D, target="in+out", weight="weight") + assert nd == {0: 2.0, 1: 2.0, 2: 1.0, 3: 0.0} + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight") + assert nd == answer + + def test_degree_k4(self): + G = nx.complete_graph(4) + answer = {0: 3, 1: 3, 2: 3, 3: 3} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in") + assert nd == answer + + def test_degree_k4_nodes(self): + G = nx.complete_graph(4) + answer = {1: 3.0, 2: 3.0} + nd = nx.average_neighbor_degree(G, nodes=[1, 2]) + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_neighbor_degree(G)[5] + assert nd == 1.8 + nd = nx.average_neighbor_degree(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_error_invalid_source_target(self): + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") + G = G.to_directed() + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") diff --git a/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..3984292be84dd7b306066809fb3c50a7cf0424f4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py @@ -0,0 +1,87 @@ +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestAttributeMixingXY(BaseTestAttributeMixing): + def test_node_attribute_xy_undirected(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish")) + attrxy_result = sorted( + [ + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ("one", "red"), + ("red", "one"), + ("blue", "two"), + ("two", "blue"), + ] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_undirected_nodes(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"])) + attrxy_result = sorted([]) + assert attrxy == attrxy_result + + def test_node_attribute_xy_directed(self): + attrxy = sorted(nx.node_attribute_xy(self.D, "fish")) + attrxy_result = sorted( + [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_multigraph(self): + attrxy = sorted(nx.node_attribute_xy(self.M, "fish")) + attrxy_result = [ + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ] + assert attrxy == attrxy_result + + def test_node_attribute_xy_selfloop(self): + attrxy = sorted(nx.node_attribute_xy(self.S, "fish")) + attrxy_result = [("one", "one"), ("two", "two")] + assert attrxy == attrxy_result + + +class TestDegreeMixingXY(BaseTestDegreeMixing): + def test_node_degree_xy_undirected(self): + xy = sorted(nx.node_degree_xy(self.P4)) + xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_undirected_nodes(self): + xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1])) + xy_result = sorted([(1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_directed(self): + xy = sorted(nx.node_degree_xy(self.D)) + xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)]) + assert xy == xy_result + + def test_node_degree_xy_multigraph(self): + xy = sorted(nx.node_degree_xy(self.M)) + xy_result = sorted( + [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)] + ) + assert xy == xy_result + + def test_node_degree_xy_selfloop(self): + xy = sorted(nx.node_degree_xy(self.S)) + xy_result = sorted([(2, 2), (2, 2)]) + assert xy == xy_result + + def test_node_degree_xy_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=10) + xy = sorted(nx.node_degree_xy(G, weight="weight")) + xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)]) + assert xy == xy_result diff --git a/phivenv/Lib/site-packages/networkx/algorithms/asteroidal.py b/phivenv/Lib/site-packages/networkx/algorithms/asteroidal.py new file mode 100644 index 0000000000000000000000000000000000000000..65355fe625338e532cfc3dd3baa59c7340358e59 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/asteroidal.py @@ -0,0 +1,170 @@ +""" +Algorithms for asteroidal triples and asteroidal numbers in graphs. + +An asteroidal triple in a graph G is a set of three non-adjacent vertices +u, v and w such that there exist a path between any two of them that avoids +closed neighborhood of the third. More formally, v_j, v_k belongs to the same +connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood +of v_i. A graph which does not contain any asteroidal triples is called +an AT-free graph. The class of AT-free graphs is a graph class for which +many NP-complete problems are solvable in polynomial time. Amongst them, +independent set and coloring. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_at_free", "find_asteroidal_triple"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def find_asteroidal_triple(G): + r"""Find an asteroidal triple in the given graph. + + An asteroidal triple is a triple of non-adjacent vertices such that + there exists a path between any two of them which avoids the closed + neighborhood of the third. It checks all independent triples of vertices + and whether they are an asteroidal triple or not. This is done with the + help of a data structure called a component structure. + A component structure encodes information about which vertices belongs to + the same connected component when the closed neighborhood of a given vertex + is removed from the graph. The algorithm used to check is the trivial + one, outlined in [1]_, which has a runtime of + :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the + creation of the component structure. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not + + Returns + ------- + list or None + An asteroidal triple is returned as a list of nodes. If no asteroidal + triple exists, i.e. the graph is AT-free, then None is returned. + The returned value depends on the certificate parameter. The default + option is a bool which is True if the graph is AT-free, i.e. the + given graph contains no asteroidal triples, and False otherwise, i.e. + if the graph contains at least one asteroidal triple. + + Notes + ----- + The component structure and the algorithm is described in [1]_. The current + implementation implements the trivial algorithm for simple graphs. + + References + ---------- + .. [1] Ekkehard Köhler, + "Recognizing Graphs without asteroidal triples", + Journal of Discrete Algorithms 2, pages 439-452, 2004. + https://www.sciencedirect.com/science/article/pii/S157086670400019X + """ + V = set(G.nodes) + + if len(V) < 6: + # An asteroidal triple cannot exist in a graph with 5 or less vertices. + return None + + component_structure = create_component_structure(G) + E_complement = set(nx.complement(G).edges) + + for e in E_complement: + u = e[0] + v = e[1] + u_neighborhood = set(G[u]).union([u]) + v_neighborhood = set(G[v]).union([v]) + union_of_neighborhoods = u_neighborhood.union(v_neighborhood) + for w in V - union_of_neighborhoods: + # Check for each pair of vertices whether they belong to the + # same connected component when the closed neighborhood of the + # third is removed. + if ( + component_structure[u][v] == component_structure[u][w] + and component_structure[v][u] == component_structure[v][w] + and component_structure[w][u] == component_structure[w][v] + ): + return [u, v, w] + return None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_at_free(G): + """Check if a graph is AT-free. + + The method uses the `find_asteroidal_triple` method to recognize + an AT-free graph. If no asteroidal triple is found the graph is + AT-free and True is returned. If at least one asteroidal triple is + found the graph is not AT-free and False is returned. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not. + + Returns + ------- + bool + True if G is AT-free and False otherwise. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> nx.is_at_free(G) + True + + >>> G = nx.cycle_graph(6) + >>> nx.is_at_free(G) + False + """ + return find_asteroidal_triple(G) is None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def create_component_structure(G): + r"""Create component structure for G. + + A *component structure* is an `nxn` array, denoted `c`, where `n` is + the number of vertices, where each row and column corresponds to a vertex. + + .. math:: + c_{uv} = \begin{cases} 0, if v \in N[u] \\ + k, if v \in component k of G \setminus N[u] \end{cases} + + Where `k` is an arbitrary label for each component. The structure is used + to simplify the detection of asteroidal triples. + + Parameters + ---------- + G : NetworkX Graph + Undirected, simple graph. + + Returns + ------- + component_structure : dictionary + A dictionary of dictionaries, keyed by pairs of vertices. + + """ + V = set(G.nodes) + component_structure = {} + for v in V: + label = 0 + closed_neighborhood = set(G[v]).union({v}) + row_dict = {} + for u in closed_neighborhood: + row_dict[u] = 0 + + G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood) + for cc in nx.connected_components(G_reduced): + label += 1 + for u in cc: + row_dict[u] = label + + component_structure[v] = row_dict + + return component_structure diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd60020b122d38b8d13569d8f636ca45d771fb31 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py @@ -0,0 +1,87 @@ +r""" This module provides functions and operations for bipartite +graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in +`E` that only connect nodes from opposite sets. It is common in the literature +to use an spatial analogy referring to the two node sets as top and bottom nodes. + +The bipartite algorithms are not imported into the networkx namespace +at the top level so the easiest way to use them is with: + +>>> from networkx.algorithms import bipartite + +NetworkX does not have a custom bipartite graph class but the Graph() +or DiGraph() classes can be used to represent bipartite graphs. However, +you have to keep track of which set each node belongs to, and make +sure that there is no edge between nodes of the same set. The convention used +in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to +identify the sets each node belongs to. This convention is not enforced in +the source code of bipartite functions, it's only a recommendation. + +For example: + +>>> B = nx.Graph() +>>> # Add nodes with the node attribute "bipartite" +>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0) +>>> B.add_nodes_from(["a", "b", "c"], bipartite=1) +>>> # Add edges only between nodes of opposite node sets +>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + +Many algorithms of the bipartite module of NetworkX require, as an argument, a +container with all the nodes that belong to one set, in addition to the bipartite +graph `B`. The functions in the bipartite package do not check that the node set +is actually correct nor that the input graph is actually bipartite. +If `B` is connected, you can find the two node sets using a two-coloring +algorithm: + +>>> nx.is_connected(B) +True +>>> bottom_nodes, top_nodes = bipartite.sets(B) + +However, if the input graph is not connected, there are more than one possible +colorations. This is the reason why we require the user to pass a container +with all nodes of one bipartite node set as an argument to most bipartite +functions. In the face of ambiguity, we refuse the temptation to guess and +raise an :exc:`AmbiguousSolution ` +Exception if the input graph for +:func:`bipartite.sets ` +is disconnected. + +Using the `bipartite` node attribute, you can easily get the two node sets: + +>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0} +>>> bottom_nodes = set(B) - top_nodes + +So you can easily use the bipartite algorithms that require, as an argument, a +container with all nodes that belong to one node set: + +>>> print(round(bipartite.density(B, bottom_nodes), 2)) +0.5 +>>> G = bipartite.projected_graph(B, top_nodes) + +All bipartite graph generators in NetworkX build bipartite graphs with the +`bipartite` node attribute. Thus, you can use the same approach: + +>>> RB = bipartite.random_graph(5, 7, 0.2) +>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0} +>>> RB_bottom = set(RB) - RB_top +>>> list(RB_top) +[0, 1, 2, 3, 4] +>>> list(RB_bottom) +[5, 6, 7, 8, 9, 10, 11] + +For other bipartite graph generators see +:mod:`Generators `. + +""" + +from networkx.algorithms.bipartite.basic import * +from networkx.algorithms.bipartite.centrality import * +from networkx.algorithms.bipartite.cluster import * +from networkx.algorithms.bipartite.covering import * +from networkx.algorithms.bipartite.edgelist import * +from networkx.algorithms.bipartite.matching import * +from networkx.algorithms.bipartite.matrix import * +from networkx.algorithms.bipartite.projection import * +from networkx.algorithms.bipartite.redundancy import * +from networkx.algorithms.bipartite.spectral import * +from networkx.algorithms.bipartite.generators import * +from networkx.algorithms.bipartite.extendability import * diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fbf567aef161b716a830007ca06c8573e6b58d7 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f50757c25e08b115388ffcc281897ace98323e25 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55a2f500d5447f8838c8df5c92d8bad802c0b378 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad14d4ccffd1a948f53ad33a16ea995d6165e80f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4ad985152da5140f39b76e5115c4ebc001d6a00 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..182bf56e2f226fda1e91215bcdfab9d32600bcea Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2255622ea09a1b0c4bd3593e255283c0da3d1743 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb67e011f7dc063ddefbaf351a04cba55355ce2d Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1ac3f70cba80542e9f14cd9f3fdd6c8376f6caa Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..635d9d5850d08019a7c9952337c363e71fa514d6 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..956376287697980b140d4b6a3bd9073a38a54c44 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9250b0dff48c5c21a136dbf37949b9eb843f9e33 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1810f9b94488e832fa8216681185f4b228a63d8c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/basic.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..8b9120e27aa9663e557680a8372465be5ed433e2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/basic.py @@ -0,0 +1,321 @@ +""" +========================== +Bipartite Graph Algorithms +========================== +""" +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.exception import AmbiguousSolution + +__all__ = [ + "is_bipartite", + "is_bipartite_node_set", + "color", + "sets", + "density", + "degrees", +] + + +@nx._dispatch +def color(G): + """Returns a two-coloring of the graph. + + Raises an exception if the graph is not bipartite. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + color : dictionary + A dictionary keyed by node with a 1 or 0 as data for each node color. + + Raises + ------ + NetworkXError + If the graph is not two-colorable. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> c = bipartite.color(G) + >>> print(c) + {0: 1, 1: 0, 2: 1, 3: 0} + + You can use this to set a node attribute indicating the bipartite set: + + >>> nx.set_node_attributes(G, c, "bipartite") + >>> print(G.nodes[0]["bipartite"]) + 1 + >>> print(G.nodes[1]["bipartite"]) + 0 + """ + if G.is_directed(): + import itertools + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + neighbors = G.neighbors + + color = {} + for n in G: # handle disconnected graphs + if n in color or len(G[n]) == 0: # skip isolates + continue + queue = [n] + color[n] = 1 # nodes seen with color (1 or 0) + while queue: + v = queue.pop() + c = 1 - color[v] # opposite color of node v + for w in neighbors(v): + if w in color: + if color[w] == color[v]: + raise nx.NetworkXError("Graph is not bipartite.") + else: + color[w] = c + queue.append(w) + # color isolates with 0 + color.update(dict.fromkeys(nx.isolates(G), 0)) + return color + + +@nx._dispatch +def is_bipartite(G): + """Returns True if graph G is bipartite, False if not. + + Parameters + ---------- + G : NetworkX graph + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> print(bipartite.is_bipartite(G)) + True + + See Also + -------- + color, is_bipartite_node_set + """ + try: + color(G) + return True + except nx.NetworkXError: + return False + + +@nx._dispatch +def is_bipartite_node_set(G, nodes): + """Returns True if nodes and G/nodes are a bipartition of G. + + Parameters + ---------- + G : NetworkX graph + + nodes: list or container + Check if nodes are a one of a bipartite set. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X = set([1, 3]) + >>> bipartite.is_bipartite_node_set(G, X) + True + + Notes + ----- + An exception is raised if the input nodes are not distinct, because in this + case some bipartite algorithms will yield incorrect results. + For connected graphs the bipartite sets are unique. This function handles + disconnected graphs. + """ + S = set(nodes) + + if len(S) < len(nodes): + # this should maybe just return False? + raise AmbiguousSolution( + "The input node set contains duplicates.\n" + "This may lead to incorrect results when using it in bipartite algorithms.\n" + "Consider using set(nodes) as the input" + ) + + for CC in (G.subgraph(c).copy() for c in connected_components(G)): + X, Y = sets(CC) + if not ( + (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S)) + ): + return False + return True + + +@nx._dispatch +def sets(G, top_nodes=None): + """Returns bipartite node sets of graph G. + + Raises an exception if the graph is not bipartite or if the input + graph is disconnected and thus more than one valid solution exists. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + Parameters + ---------- + G : NetworkX graph + + top_nodes : container, optional + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + X : set + Nodes from one side of the bipartite graph. + Y : set + Nodes from the other side. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + NetworkXError + Raised if the input graph is not bipartite. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X, Y = bipartite.sets(G) + >>> list(X) + [0, 2] + >>> list(Y) + [1, 3] + + See Also + -------- + color + + """ + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + if top_nodes is not None: + X = set(top_nodes) + Y = set(G) - X + else: + if not is_connected(G): + msg = "Disconnected graph: Ambiguous solution for bipartite sets." + raise nx.AmbiguousSolution(msg) + c = color(G) + X = {n for n, is_top in c.items() if is_top} + Y = {n for n, is_top in c.items() if not is_top} + return (X, Y) + + +@nx._dispatch(graphs="B") +def density(B, nodes): + """Returns density of bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + Returns + ------- + d : float + The bipartite density + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> X = set([0, 1, 2]) + >>> bipartite.density(G, X) + 1.0 + >>> Y = set([3, 4]) + >>> bipartite.density(G, Y) + 1.0 + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color + """ + n = len(B) + m = nx.number_of_edges(B) + nb = len(nodes) + nt = n - nb + if m == 0: # includes cases n==0 and n==1 + d = 0.0 + else: + if B.is_directed(): + d = m / (2 * nb * nt) + else: + d = m / (nb * nt) + return d + + +@nx._dispatch(graphs="B", edge_attrs="weight") +def degrees(B, nodes, weight=None): + """Returns the degrees of the two node sets in the bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + (degX,degY) : tuple of dictionaries + The degrees of the two bipartite sets as dictionaries keyed by node. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> Y = set([3, 4]) + >>> degX, degY = bipartite.degrees(G, Y) + >>> dict(degX) + {0: 2, 1: 2, 2: 2} + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color, density + """ + bottom = set(nodes) + top = set(B) - bottom + return (B.degree(top, weight), B.degree(bottom, weight)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..a904da3528f208387e6b845dd18bb8b6253cd799 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py @@ -0,0 +1,290 @@ +import networkx as nx + +__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"] + + +@nx._dispatch(name="bipartite_degree_centrality") +def degree_centrality(G, nodes): + r"""Compute the degree centrality for nodes in a bipartite network. + + The degree centrality for a node `v` is the fraction of nodes + connected to it. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + centrality : dictionary + Dictionary keyed by node with bipartite degree centrality as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.degree_centrality(G, nodes=top_nodes) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both bipartite node + sets. See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + For unipartite networks, the degree centrality values are + normalized by dividing by the maximum possible degree (which is + `n-1` where `n` is the number of nodes in G). + + In the bipartite case, the maximum possible degree of a node in a + bipartite node set is the number of nodes in the opposite node set + [1]_. The degree centrality for a node `v` in the bipartite + sets `U` with `n` nodes and `V` with `m` nodes is + + .. math:: + + d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U , + + d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V , + + + where `deg(v)` is the degree of node `v`. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + s = 1.0 / len(bottom) + centrality = {n: d * s for n, d in G.degree(top)} + s = 1.0 / len(top) + centrality.update({n: d * s for n, d in G.degree(bottom)}) + return centrality + + +@nx._dispatch(name="bipartite_betweenness_centrality") +def betweenness_centrality(G, nodes): + r"""Compute betweenness centrality for nodes in a bipartite network. + + Betweenness centrality of a node `v` is the sum of the + fraction of all-pairs shortest paths that pass through `v`. + + Values of betweenness are normalized by the maximum possible + value which for bipartite graphs is limited by the relative size + of the two node sets [1]_. + + Let `n` be the number of nodes in the node set `U` and + `m` be the number of nodes in the node set `V`, then + nodes in `U` are normalized by dividing by + + .. math:: + + \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] , + + where + + .. math:: + + s = (n - 1) \div m , t = (n - 1) \mod m , + + and nodes in `V` are normalized by dividing by + + .. math:: + + \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] , + + where, + + .. math:: + + p = (m - 1) \div n , r = (m - 1) \mod n . + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + betweenness : dictionary + Dictionary keyed by node with bipartite betweenness centrality + as the value. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> top_nodes = {1, 2} + >>> nx.bipartite.betweenness_centrality(G, nodes=top_nodes) + {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + + See Also + -------- + degree_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + s, t = divmod(n - 1, m) + bet_max_top = ( + ((m**2) * ((s + 1) ** 2)) + + (m * (s + 1) * (2 * t - s - 1)) + - (t * ((2 * s) - t + 3)) + ) / 2.0 + p, r = divmod(m - 1, n) + bet_max_bot = ( + ((n**2) * ((p + 1) ** 2)) + + (n * (p + 1) * (2 * r - p - 1)) + - (r * ((2 * p) - r + 3)) + ) / 2.0 + betweenness = nx.betweenness_centrality(G, normalized=False, weight=None) + for node in top: + betweenness[node] /= bet_max_top + for node in bottom: + betweenness[node] /= bet_max_bot + return betweenness + + +@nx._dispatch(name="bipartite_closeness_centrality") +def closeness_centrality(G, nodes, normalized=True): + r"""Compute the closeness centrality for nodes in a bipartite network. + + The closeness of a node is the distance to all other nodes in the + graph or in the case that the graph is not connected to all other nodes + in the connected component containing that node. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + normalized : bool, optional + If True (default) normalize by connected component size. + + Returns + ------- + closeness : dictionary + Dictionary keyed by node with bipartite closeness centrality + as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.closeness_centrality(G, nodes=top_nodes) + {0: 1.5, 1: 1.2, 2: 1.2, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + degree_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + Closeness centrality is normalized by the minimum distance possible. + In the bipartite case the minimum distance for a node in one bipartite + node set is 1 from all nodes in the other node set and 2 from all + other nodes in its own set [1]_. Thus the closeness centrality + for node `v` in the two bipartite sets `U` with + `n` nodes and `V` with `m` nodes is + + .. math:: + + c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U, + + c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V, + + where `d` is the sum of the distances from `v` to all + other nodes. + + Higher values of closeness indicate higher centrality. + + As in the unipartite case, setting normalized=True causes the + values to normalized further to n-1 / size(G)-1 where n is the + number of nodes in the connected part of graph containing the + node. If the graph is not completely connected, this algorithm + computes the closeness centrality for each connected part + separately. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + closeness = {} + path_length = nx.single_source_shortest_path_length + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + for node in top: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (m + 2 * (n - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + for node in bottom: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (n + 2 * (m - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + return closeness diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f10d7efd117c724bd3435dd0aab49f083347d672 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py @@ -0,0 +1,280 @@ +"""Functions for computing clustering of pairs + +""" + +import itertools + +import networkx as nx + +__all__ = [ + "clustering", + "average_clustering", + "latapy_clustering", + "robins_alexander_clustering", +] + + +def cc_dot(nu, nv): + return len(nu & nv) / len(nu | nv) + + +def cc_max(nu, nv): + return len(nu & nv) / max(len(nu), len(nv)) + + +def cc_min(nu, nv): + return len(nu & nv) / min(len(nu), len(nv)) + + +modes = {"dot": cc_dot, "min": cc_min, "max": cc_max} + + +@nx._dispatch +def latapy_clustering(G, nodes=None, mode="dot"): + r"""Compute a bipartite clustering coefficient for nodes. + + The bipartite clustering coefficient is a measure of local density + of connections defined as [1]_: + + .. math:: + + c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|} + + where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, + and `c_{uv}` is the pairwise clustering coefficient between nodes + `u` and `v`. + + The mode selects the function for `c_{uv}` which can be: + + `dot`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|} + + `min`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)} + + `max`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)} + + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute bipartite clustering for these nodes. The default + is all nodes in G. + + mode : string + The pairwise bipartite clustering method to be used in the computation. + It must be "dot", "max", or "min". + + Returns + ------- + clustering : dictionary + A dictionary keyed by node with the clustering coefficient value. + + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) # path graphs are bipartite + >>> c = bipartite.clustering(G) + >>> c[0] + 0.5 + >>> c = bipartite.clustering(G, mode="min") + >>> c[0] + 1.0 + + See Also + -------- + robins_alexander_clustering + average_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if not nx.algorithms.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph is not bipartite") + + try: + cc_func = modes[mode] + except KeyError as err: + raise nx.NetworkXError( + "Mode for bipartite clustering must be: dot, min or max" + ) from err + + if nodes is None: + nodes = G + ccs = {} + for v in nodes: + cc = 0.0 + nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v} + for u in nbrs2: + cc += cc_func(set(G[u]), set(G[v])) + if cc > 0.0: # len(nbrs2)>0 + cc /= len(nbrs2) + ccs[v] = cc + return ccs + + +clustering = latapy_clustering + + +@nx._dispatch(name="bipartite_average_clustering") +def average_clustering(G, nodes=None, mode="dot"): + r"""Compute the average bipartite clustering coefficient. + + A clustering coefficient for the whole graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where `n` is the number of nodes in `G`. + + Similar measures for the two bipartite sets can be defined [1]_ + + .. math:: + + C_X = \frac{1}{|X|}\sum_{v \in X} c_v, + + where `X` is a bipartite set of `G`. + + Parameters + ---------- + G : graph + a bipartite graph + + nodes : list or iterable, optional + A container of nodes to use in computing the average. + The nodes should be either the entire graph (the default) or one of the + bipartite sets. + + mode : string + The pairwise bipartite clustering method. + It must be "dot", "max", or "min" + + Returns + ------- + clustering : float + The average bipartite clustering for the given set of nodes or the + entire graph if no nodes are specified. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.star_graph(3) # star graphs are bipartite + >>> bipartite.average_clustering(G) + 0.75 + >>> X, Y = bipartite.sets(G) + >>> bipartite.average_clustering(G, X) + 0.0 + >>> bipartite.average_clustering(G, Y) + 1.0 + + See Also + -------- + clustering + + Notes + ----- + The container of nodes passed to this function must contain all of the nodes + in one of the bipartite sets ("top" or "bottom") in order to compute + the correct average bipartite clustering coefficients. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if nodes is None: + nodes = G + ccs = latapy_clustering(G, nodes=nodes, mode=mode) + return sum(ccs[v] for v in nodes) / len(nodes) + + +@nx._dispatch +def robins_alexander_clustering(G): + r"""Compute the bipartite clustering of G. + + Robins and Alexander [1]_ defined bipartite clustering coefficient as + four times the number of four cycles `C_4` divided by the number of + three paths `L_3` in a bipartite graph: + + .. math:: + + CC_4 = \frac{4 * C_4}{L_3} + + Parameters + ---------- + G : graph + a bipartite graph + + Returns + ------- + clustering : float + The Robins and Alexander bipartite clustering for the input graph. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.davis_southern_women_graph() + >>> print(round(bipartite.robins_alexander_clustering(G), 3)) + 0.468 + + See Also + -------- + latapy_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking + directors: Network structure and distance in bipartite graphs. + Computational & Mathematical Organization Theory 10(1), 69–94. + + """ + if G.order() < 4 or G.size() < 3: + return 0 + L_3 = _threepaths(G) + if L_3 == 0: + return 0 + C_4 = _four_cycles(G) + return (4.0 * C_4) / L_3 + + +def _four_cycles(G): + cycles = 0 + for v in G: + for u, w in itertools.combinations(G[v], 2): + cycles += len((set(G[u]) & set(G[w])) - {v}) + return cycles / 4 + + +def _threepaths(G): + paths = 0 + for v in G: + for u in G[v]: + for w in set(G[u]) - {v}: + paths += len(set(G[w]) - {v, u}) + # Divide by two because we count each three path twice + # one for each possible starting point + return paths / 2 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/covering.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/covering.py new file mode 100644 index 0000000000000000000000000000000000000000..8669b4b1681805d8599686f7fd2fedb1f01839b7 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/covering.py @@ -0,0 +1,57 @@ +""" Functions related to graph covers.""" + +import networkx as nx +from networkx.algorithms.bipartite.matching import hopcroft_karp_matching +from networkx.algorithms.covering import min_edge_cover as _min_edge_cover +from networkx.utils import not_implemented_for + +__all__ = ["min_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(name="bipartite_min_edge_cover") +def min_edge_cover(G, matching_algorithm=None): + """Returns a set of edges which constitutes + the minimum edge cover of the graph. + + The smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching in a + given bipartite graph. The function must take one input, the + graph ``G``, and return a dictionary mapping each node to its + mate. If not specified, + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + will be used. Other possibilities include + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`, + + Returns + ------- + set + A set of the edges in a minimum edge cover of the graph, given as + pairs of nodes. It contains both the edges `(u, v)` and `(v, u)` + for given nodes `u` and `v` among the edges of minimum edge cover. + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + A minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + """ + if G.order() == 0: # Special case for the empty graph + return set() + if matching_algorithm is None: + matching_algorithm = hopcroft_karp_matching + return _min_edge_cover(G, matching_algorithm=matching_algorithm) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..5305aca3bdbe0af10a86689a8caccf315e1426bb --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py @@ -0,0 +1,359 @@ +""" +******************** +Bipartite Edge Lists +******************** +Read and write NetworkX graphs as bipartite edge lists. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green + +For each edge (u, v) the node u is assigned to part 0 and the node v to part 1. +""" +__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"] + +import networkx as nx +from networkx.utils import not_implemented_for, open_file + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write a bipartite graph as a list of edges. + + Parameters + ---------- + G : Graph + A NetworkX bipartite graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> nx.write_edgelist(G, "test.edgelist") + >>> fh = open("test.edgelist", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist", data=False) + >>> nx.write_edgelist(G, "test.edgelist", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"]) + + See Also + -------- + write_edgelist + generate_edgelist + """ + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@not_implemented_for("directed") +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the bipartite graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + The graph is assumed to have node attribute `part` set to 0,1 representing + the two graph parts + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> G[1][2]["weight"] = 3 + >>> G[2][3]["capacity"] = 12 + >>> for line in bipartite.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 2 1 + 2 3 + + >>> for line in bipartite.generate_edgelist(G): + ... print(line) + 0 1 {} + 2 1 {'weight': 3} + 2 3 {'capacity': 12} + + >>> for line in bipartite.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 2 1 3 + 2 3 + """ + try: + part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0] + except BaseException as err: + raise AttributeError("Missing node attribute `bipartite`") from err + if data is True or data is False: + for n in part0: + for edge in G.edges(n, data=data): + yield delimiter.join(map(str, edge)) + else: + for n in part0: + for u, v, d in G.edges(n, data=True): + edge = [u, v] + try: + edge.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, edge)) + + +@nx._dispatch(name="bipartite_parse_edgelist", graphs=None) +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a bipartite graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines + delimiter : string, optional + Separator for node labels + create_using: NetworkX graph container, optional + Use given NetworkX graph for holding nodes or edges. + nodetype : Python type, optional + Convert nodes to this type. + data : bool or list of (label,type) tuples + If False generate no edge data or if True use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The bipartite graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> from networkx.algorithms import bipartite + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.nodes(data=True)) + [(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})] + >>> sorted(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + # split line, should have 2 or more + s = line.strip().split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes {u},{v} " f"to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + edgedata = dict(literal_eval(" ".join(d))) + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({d})" f"to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except BaseException as err: + raise TypeError( + f"Failed to convert {edge_key} data " + f"{edge_value} to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_node(u, bipartite=0) + G.add_node(v, bipartite=1) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +@nx._dispatch(name="bipartite_read_edgelist", graphs=None) +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a bipartite graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be uncompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : Graph container, optional, + Use specified container to build graph. The default is networkx.Graph, + an undirected graph. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> bipartite.write_edgelist(G, "test.edgelist") + >>> G = bipartite.read_edgelist("test.edgelist") + + >>> fh = open("test.edgelist", "rb") + >>> G = bipartite.read_edgelist(fh) + >>> fh.close() + + >>> G = bipartite.read_edgelist("test.edgelist", nodetype=int) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.edgelist", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = bipartite.read_edgelist( + ... "test.edgelist", nodetype=int, data=(("weight", float),) + ... ) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py new file mode 100644 index 0000000000000000000000000000000000000000..10dd5473b5cf5f4490bcb65086371ba58751a270 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py @@ -0,0 +1,105 @@ +""" Provides a function for computing the extendability of a graph which is +undirected, simple, connected and bipartite and contains at least one perfect matching.""" + + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["maximal_extendability"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def maximal_extendability(G): + """Computes the extendability of a graph. + + The extendability of a graph is defined as the maximum $k$ for which `G` + is $k$-extendable. Graph `G` is $k$-extendable if and only if `G` has a + perfect matching and every set of $k$ independent edges can be extended + to a perfect matching in `G`. + + Parameters + ---------- + G : NetworkX Graph + A fully-connected bipartite graph without self-loops + + Returns + ------- + extendability : int + + Raises + ------ + NetworkXError + If the graph `G` is disconnected. + If the graph `G` is not bipartite. + If the graph `G` does not contain a perfect matching. + If the residual graph of `G` is not strongly connected. + + Notes + ----- + Definition: + Let `G` be a simple, connected, undirected and bipartite graph with a perfect + matching M and bipartition (U,V). The residual graph of `G`, denoted by $G_M$, + is the graph obtained from G by directing the edges of M from V to U and the + edges that do not belong to M from U to V. + + Lemma [1]_ : + Let M be a perfect matching of `G`. `G` is $k$-extendable if and only if its residual + graph $G_M$ is strongly connected and there are $k$ vertex-disjoint directed + paths between every vertex of U and every vertex of V. + + Assuming that input graph `G` is undirected, simple, connected, bipartite and contains + a perfect matching M, this function constructs the residual graph $G_M$ of G and + returns the minimum value among the maximum vertex-disjoint directed paths between + every vertex of U and every vertex of V in $G_M$. By combining the definitions + and the lemma, this value represents the extendability of the graph `G`. + + Time complexity O($n^3$ $m^2$)) where $n$ is the number of vertices + and $m$ is the number of edges. + + References + ---------- + .. [1] "A polynomial algorithm for the extendability problem in bipartite graphs", + J. Lakhal, L. Litzler, Information Processing Letters, 1998. + .. [2] "On n-extendible graphs", M. D. Plummer, Discrete Mathematics, 31:201–210, 1980 + https://doi.org/10.1016/0012-365X(80)90037-0 + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G is not connected") + + if not nx.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph G is not bipartite") + + U, V = nx.bipartite.sets(G) + + maximum_matching = nx.bipartite.hopcroft_karp_matching(G) + + if not nx.is_perfect_matching(G, maximum_matching): + raise nx.NetworkXError("Graph G does not contain a perfect matching") + + # list of edges in perfect matching, directed from V to U + pm = [(node, maximum_matching[node]) for node in V & maximum_matching.keys()] + + # Direct all the edges of G, from V to U if in matching, else from U to V + directed_edges = [ + (x, y) if (x in V and (x, y) in pm) or (x in U and (y, x) not in pm) else (y, x) + for x, y in G.edges + ] + + # Construct the residual graph of G + residual_G = nx.DiGraph() + residual_G.add_nodes_from(G) + residual_G.add_edges_from(directed_edges) + + if not nx.is_strongly_connected(residual_G): + raise nx.NetworkXError("The residual graph of G is not strongly connected") + + # For node-pairs between V & U, keep min of max number of node-disjoint paths + # Variable $k$ stands for the extendability of graph G + k = float("Inf") + for u in U: + for v in V: + num_paths = sum(1 for _ in nx.node_disjoint_paths(residual_G, u, v)) + k = k if k < num_paths else num_paths + return k diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/generators.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/generators.py new file mode 100644 index 0000000000000000000000000000000000000000..9cea597875b324688a92e321d17b13cc1cfb8878 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/generators.py @@ -0,0 +1,603 @@ +""" +Generators and functions for bipartite graphs. +""" +import math +import numbers +from functools import reduce + +import networkx as nx +from networkx.utils import nodes_or_number, py_random_state + +__all__ = [ + "configuration_model", + "havel_hakimi_graph", + "reverse_havel_hakimi_graph", + "alternating_havel_hakimi_graph", + "preferential_attachment_graph", + "random_graph", + "gnmk_random_graph", + "complete_bipartite_graph", +] + + +@nodes_or_number([0, 1]) +@nx._dispatch(graphs=None) +def complete_bipartite_graph(n1, n2, create_using=None): + """Returns the complete bipartite graph `K_{n_1,n_2}`. + + The graph is composed of two partitions with nodes 0 to (n1 - 1) + in the first and nodes n1 to (n1 + n2 - 1) in the second. + Each node in the first is connected to each node in the second. + + Parameters + ---------- + n1, n2 : integer or iterable container of nodes + If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. + If a container, the elements are the nodes. + create_using : NetworkX graph instance, (default: nx.Graph) + Return graph of this type. + + Notes + ----- + Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are + containers of nodes. If only one of n1 or n2 are integers, that + integer is replaced by `range` of that integer. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.complete_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + n1, top = n1 + n2, bottom = n2 + if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): + bottom = [n1 + i for i in bottom] + G.add_nodes_from(top, bipartite=0) + G.add_nodes_from(bottom, bipartite=1) + if len(G) != len(top) + len(bottom): + raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") + G.add_edges_from((u, v) for u in top for v in bottom) + G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})" + return G + + +@py_random_state(3) +@nx._dispatch(name="bipartite_configuration_model", graphs=None) +def configuration_model(aseq, bseq, create_using=None, seed=None): + """Returns a random bipartite graph from two given degree sequences. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in set B by choosing + randomly from the possible free stubs, one in A and one in B. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.configuration_model + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length and sum of each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build lists of degree-repeated vertex numbers + stubs = [[v] * aseq[v] for v in range(lena)] + astubs = [x for subseq in stubs for x in subseq] + + stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)] + bstubs = [x for subseq in stubs for x in subseq] + + # shuffle lists + seed.shuffle(astubs) + seed.shuffle(bstubs) + + G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma)) + + G.name = "bipartite_configuration_model" + return G + + +@nx._dispatch(name="bipartite_havel_hakimi_graph", graphs=None) +def havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to the highest degree + nodes in set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + astubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to largest degree nodes in the b set + bstubs.sort() + for target in bstubs[-degree:]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_havel_hakimi_graph" + return G + + +@nx._dispatch(graphs=None) +def reverse_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in the set B by connecting + the highest degree nodes in set A to the lowest degree nodes in + set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.reverse_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(lena)] + bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)] + astubs.sort() + bstubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to the smallest degree nodes in the b set + for target in bstubs[0:degree]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_reverse_havel_hakimi_graph" + return G + + +@nx._dispatch(graphs=None) +def alternating_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using + an alternating Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to alternatively the + highest and the lowest degree nodes in set B until all stubs are + connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.alternating_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + while astubs: + astubs.sort() + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + bstubs.sort() + small = bstubs[0 : degree // 2] # add these low degree targets + large = bstubs[(-degree + degree // 2) :] # now high degree targets + stubs = [x for z in zip(large, small) for x in z] # combine, sorry + if len(stubs) < len(small) + len(large): # check for zip truncation + stubs.append(large.pop()) + for target in stubs: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_alternating_havel_hakimi_graph" + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def preferential_attachment_graph(aseq, p, create_using=None, seed=None): + """Create a bipartite graph with a preferential attachment model from + a given single degree sequence. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes starting with node len(aseq). + The number of nodes in set B is random. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + p : float + Probability that a new bottom node is added. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] Guillaume, J.L. and Latapy, M., + Bipartite graphs as models of complex networks. + Physica A: Statistical Mechanics and its Applications, + 2006, 371(2), pp.795-813. + .. [2] Jean-Loup Guillaume and Matthieu Latapy, + Bipartite structure of all complex networks, + Inf. Process. Lett. 90, 2004, pg. 215-221 + https://doi.org/10.1016/j.ipl.2004.03.007 + + Notes + ----- + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.preferential_attachment_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + if p > 1: + raise nx.NetworkXError(f"probability {p} > 1") + + naseq = len(aseq) + G = _add_nodes_with_bipartite_label(G, naseq, 0) + vv = [[v] * aseq[v] for v in range(naseq)] + while vv: + while vv[0]: + source = vv[0][0] + vv[0].remove(source) + if seed.random() < p or len(G) == naseq: + target = len(G) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + else: + bb = [[b] * G.degree(b) for b in range(naseq, len(G))] + # flatten the list of lists into a list. + bbstubs = reduce(lambda x, y: x + y, bb) + # choose preferentially a bottom node. + target = seed.choice(bbstubs) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + vv.remove(vv[0]) + G.name = "bipartite_preferential_attachment_model" + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def random_graph(n, m, p, seed=None, directed=False): + """Returns a bipartite random graph. + + This is a bipartite version of the binomial (Erdős-Rényi) graph. + The graph is composed of two partitions. Set A has nodes 0 to + (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Notes + ----- + The bipartite random graph algorithm chooses each of the n*m (undirected) + or 2*nm (directed) possible edges with probability p. + + This algorithm is $O(n+m)$ where $m$ is the expected number of edges. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.random_graph + + See Also + -------- + gnp_random_graph, configuration_model + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"fast_gnp_random_graph({n},{m},{p})" + + if p <= 0: + return G + if p >= 1: + return nx.complete_bipartite_graph(n, m) + + lp = math.log(1.0 - p) + + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(v, n + w) + + if directed: + # use the same algorithm to + # add edges from the "m" to "n" set + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(n + w, v) + + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def gnmk_random_graph(n, m, k, seed=None, directed=False): + """Returns a random bipartite graph G_{n,m,k}. + + Produces a bipartite graph chosen randomly out of the set of all graphs + with n top nodes, m bottom nodes, and k edges. + The graph is composed of two sets of nodes. + Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + k : int + The number of edges + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Examples + -------- + from nx.algorithms import bipartite + G = bipartite.gnmk_random_graph(10,20,50) + + See Also + -------- + gnm_random_graph + + Notes + ----- + If k > m * n then a complete bipartite graph is returned. + + This graph is a bipartite version of the `G_{nm}` random graph model. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.gnmk_random_graph + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"bipartite_gnm_random_graph({n},{m},{k})" + if n == 1 or m == 1: + return G + max_edges = n * m # max_edges for bipartite networks + if k >= max_edges: # Maybe we should raise an exception here + return nx.complete_bipartite_graph(n, m, create_using=G) + + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + bottom = list(set(G) - set(top)) + edge_count = 0 + while edge_count < k: + # generate random edge,u,v + u = seed.choice(top) + v = seed.choice(bottom) + if v in G[u]: + continue + else: + G.add_edge(u, v) + edge_count += 1 + return G + + +def _add_nodes_with_bipartite_label(G, lena, lenb): + G.add_nodes_from(range(lena + lenb)) + b = dict(zip(range(lena), [0] * lena)) + b.update(dict(zip(range(lena, lena + lenb), [1] * lenb))) + nx.set_node_attributes(G, b, "bipartite") + return G diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matching.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..17d55614bcddae9318ae5993ee0b21573d037878 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matching.py @@ -0,0 +1,589 @@ +# This module uses material from the Wikipedia article Hopcroft--Karp algorithm +# , accessed on +# January 3, 2015, which is released under the Creative Commons +# Attribution-Share-Alike License 3.0 +# . That article includes +# pseudocode, which has been translated into the corresponding Python code. +# +# Portions of this module use code from David Eppstein's Python Algorithms and +# Data Structures (PADS) library, which is dedicated to the public domain (for +# proof, see ). +"""Provides functions for computing maximum cardinality matchings and minimum +weight full matchings in a bipartite graph. + +If you don't care about the particular implementation of the maximum matching +algorithm, simply use the :func:`maximum_matching`. If you do care, you can +import one of the named maximum matching algorithms directly. + +For example, to find a maximum matching in the complete bipartite graph with +two vertices on the left and three vertices on the right: + +>>> G = nx.complete_bipartite_graph(2, 3) +>>> left, right = nx.bipartite.sets(G) +>>> list(left) +[0, 1] +>>> list(right) +[2, 3, 4] +>>> nx.bipartite.maximum_matching(G) +{0: 2, 1: 3, 2: 0, 3: 1} + +The dictionary returned by :func:`maximum_matching` includes a mapping for +vertices in both the left and right vertex sets. + +Similarly, :func:`minimum_weight_full_matching` produces, for a complete +weighted bipartite graph, a matching whose cardinality is the cardinality of +the smaller of the two partitions, and for which the sum of the weights of the +edges included in the matching is minimal. + +""" +import collections +import itertools + +import networkx as nx +from networkx.algorithms.bipartite import sets as bipartite_sets +from networkx.algorithms.bipartite.matrix import biadjacency_matrix + +__all__ = [ + "maximum_matching", + "hopcroft_karp_matching", + "eppstein_matching", + "to_vertex_cover", + "minimum_weight_full_matching", +] + +INFINITY = float("inf") + + +@nx._dispatch +def hopcroft_karp_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + A matching is a set of edges that do not share any nodes. A maximum + cardinality matching is a matching with the most edges possible. It + is not always unique. Finding a matching in a bipartite graph can be + treated as a networkx flow problem. + + The functions ``hopcroft_karp_matching`` and ``maximum_matching`` + are aliases of the same function. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container of nodes + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with the `Hopcroft--Karp matching algorithm + `_ for + bipartite graphs. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + maximum_matching + hopcroft_karp_matching + eppstein_matching + + References + ---------- + .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for + Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing** + 2.4 (1973), pp. 225--231. . + + """ + + # First we define some auxiliary search functions. + # + # If you are a human reading these auxiliary search functions, the "global" + # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined + # below the functions, so that they are initialized close to the initial + # invocation of the search functions. + def breadth_first_search(): + for v in left: + if leftmatches[v] is None: + distances[v] = 0 + queue.append(v) + else: + distances[v] = INFINITY + distances[None] = INFINITY + while queue: + v = queue.popleft() + if distances[v] < distances[None]: + for u in G[v]: + if distances[rightmatches[u]] is INFINITY: + distances[rightmatches[u]] = distances[v] + 1 + queue.append(rightmatches[u]) + return distances[None] is not INFINITY + + def depth_first_search(v): + if v is not None: + for u in G[v]: + if distances[rightmatches[u]] == distances[v] + 1: + if depth_first_search(rightmatches[u]): + rightmatches[u] = v + leftmatches[v] = u + return True + distances[v] = INFINITY + return False + return True + + # Initialize the "global" variables that maintain state during the search. + left, right = bipartite_sets(G, top_nodes) + leftmatches = {v: None for v in left} + rightmatches = {v: None for v in right} + distances = {} + queue = collections.deque() + + # Implementation note: this counter is incremented as pairs are matched but + # it is currently not used elsewhere in the computation. + num_matched_pairs = 0 + while breadth_first_search(): + for v in left: + if leftmatches[v] is None: + if depth_first_search(v): + num_matched_pairs += 1 + + # Strip the entries matched to `None`. + leftmatches = {k: v for k, v in leftmatches.items() if v is not None} + rightmatches = {k: v for k, v in rightmatches.items() if v is not None} + + # At this point, the left matches and the right matches are inverses of one + # another. In other words, + # + # leftmatches == {v, k for k, v in rightmatches.items()} + # + # Finally, we combine both the left matches and right matches. + return dict(itertools.chain(leftmatches.items(), rightmatches.items())) + + +@nx._dispatch +def eppstein_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matching`, such that + ``matching[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matching`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with David Eppstein's version of the algorithm + Hopcroft--Karp algorithm (see :func:`hopcroft_karp_matching`), which + originally appeared in the `Python Algorithms and Data Structures library + (PADS) `_. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + + hopcroft_karp_matching + + """ + # Due to its original implementation, a directed graph is needed + # so that the two sets of bipartite nodes can be distinguished + left, right = bipartite_sets(G, top_nodes) + G = nx.DiGraph(G.edges(left)) + # initialize greedy matching (redundant, but faster than full search) + matching = {} + for u in G: + for v in G[u]: + if v not in matching: + matching[v] = u + break + while True: + # structure residual graph into layers + # pred[u] gives the neighbor in the previous layer for u in U + # preds[v] gives a list of neighbors in the previous layer for v in V + # unmatched gives a list of unmatched vertices in final layer of V, + # and is also used as a flag value for pred[u] when u is in the first + # layer + preds = {} + unmatched = [] + pred = {u: unmatched for u in G} + for v in matching: + del pred[matching[v]] + layer = list(pred) + + # repeatedly extend layering structure by another pair of layers + while layer and not unmatched: + newLayer = {} + for u in layer: + for v in G[u]: + if v not in preds: + newLayer.setdefault(v, []).append(u) + layer = [] + for v in newLayer: + preds[v] = newLayer[v] + if v in matching: + layer.append(matching[v]) + pred[matching[v]] = v + else: + unmatched.append(v) + + # did we finish layering without finding any alternating paths? + if not unmatched: + # TODO - The lines between --- were unused and were thus commented + # out. This whole commented chunk should be reviewed to determine + # whether it should be built upon or completely removed. + # --- + # unlayered = {} + # for u in G: + # # TODO Why is extra inner loop necessary? + # for v in G[u]: + # if v not in preds: + # unlayered[v] = None + # --- + # TODO Originally, this function returned a three-tuple: + # + # return (matching, list(pred), list(unlayered)) + # + # For some reason, the documentation for this function + # indicated that the second and third elements of the returned + # three-tuple would be the vertices in the left and right vertex + # sets, respectively, that are also in the maximum independent set. + # However, what I think the author meant was that the second + # element is the list of vertices that were unmatched and the third + # element was the list of vertices that were matched. Since that + # seems to be the case, they don't really need to be returned, + # since that information can be inferred from the matching + # dictionary. + + # All the matched nodes must be a key in the dictionary + for key in matching.copy(): + matching[matching[key]] = key + return matching + + # recursively search backward through layers to find alternating paths + # recursion returns true if found path, false otherwise + def recurse(v): + if v in preds: + L = preds.pop(v) + for u in L: + if u in pred: + pu = pred.pop(u) + if pu is unmatched or recurse(pu): + matching[v] = u + return True + return False + + for v in unmatched: + recurse(v) + + +def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets): + """Returns True if and only if the vertex `v` is connected to one of + the target vertices by an alternating path in `G`. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `v` is a vertex in `G`. + + `matched_edges` is a set of edges present in a maximum matching in `G`. + + `unmatched_edges` is a set of edges not present in a maximum + matching in `G`. + + `targets` is a set of vertices. + + """ + + def _alternating_dfs(u, along_matched=True): + """Returns True if and only if `u` is connected to one of the + targets by an alternating path. + + `u` is a vertex in the graph `G`. + + If `along_matched` is True, this step of the depth-first search + will continue only through edges in the given matching. Otherwise, it + will continue only through edges *not* in the given matching. + + """ + visited = set() + # Follow matched edges when depth is even, + # and follow unmatched edges when depth is odd. + initial_depth = 0 if along_matched else 1 + stack = [(u, iter(G[u]), initial_depth)] + while stack: + parent, children, depth = stack[-1] + valid_edges = matched_edges if depth % 2 else unmatched_edges + try: + child = next(children) + if child not in visited: + if (parent, child) in valid_edges or (child, parent) in valid_edges: + if child in targets: + return True + visited.add(child) + stack.append((child, iter(G[child]), depth + 1)) + except StopIteration: + stack.pop() + return False + + # Check for alternating paths starting with edges in the matching, then + # check for alternating paths starting with edges not in the + # matching. + return _alternating_dfs(v, along_matched=True) or _alternating_dfs( + v, along_matched=False + ) + + +def _connected_by_alternating_paths(G, matching, targets): + """Returns the set of vertices that are connected to one of the target + vertices by an alternating path in `G` or are themselves a target. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `matching` is a dictionary representing a maximum matching in `G`, as + returned by, for example, :func:`maximum_matching`. + + `targets` is a set of vertices. + + """ + # Get the set of matched edges and the set of unmatched edges. Only include + # one version of each undirected edge (for example, include edge (1, 2) but + # not edge (2, 1)). Using frozensets as an intermediary step we do not + # require nodes to be orderable. + edge_sets = {frozenset((u, v)) for u, v in matching.items()} + matched_edges = {tuple(edge) for edge in edge_sets} + unmatched_edges = { + (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets + } + + return { + v + for v in G + if v in targets + or _is_connected_by_alternating_path( + G, v, matched_edges, unmatched_edges, targets + ) + } + + +@nx._dispatch +def to_vertex_cover(G, matching, top_nodes=None): + """Returns the minimum vertex cover corresponding to the given maximum + matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + matching : dictionary + + A dictionary whose keys are vertices in `G` and whose values are the + distinct neighbors comprising the maximum matching for `G`, as returned + by, for example, :func:`maximum_matching`. The dictionary *must* + represent the maximum matching. + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + vertex_cover : :class:`set` + + The minimum vertex cover in `G`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented using the procedure guaranteed by `Konig's + theorem + `_, + which proves an equivalence between a maximum matching and a minimum vertex + cover in bipartite graphs. + + Since a minimum vertex cover is the complement of a maximum independent set + for any graph, one can compute the maximum independent set of a bipartite + graph this way: + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> matching = nx.bipartite.maximum_matching(G) + >>> vertex_cover = nx.bipartite.to_vertex_cover(G, matching) + >>> independent_set = set(G) - vertex_cover + >>> print(list(independent_set)) + [2, 3, 4] + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + """ + # This is a Python implementation of the algorithm described at + # . + L, R = bipartite_sets(G, top_nodes) + # Let U be the set of unmatched vertices in the left vertex set. + unmatched_vertices = set(G) - set(matching) + U = unmatched_vertices & L + # Let Z be the set of vertices that are either in U or are connected to U + # by alternating paths. + Z = _connected_by_alternating_paths(G, matching, U) + # At this point, every edge either has a right endpoint in Z or a left + # endpoint not in Z. This gives us the vertex cover. + return (L - Z) | (R & Z) + + +#: Returns the maximum cardinality matching in the given bipartite graph. +#: +#: This function is simply an alias for :func:`hopcroft_karp_matching`. +maximum_matching = hopcroft_karp_matching + + +@nx._dispatch(edge_attrs="weight") +def minimum_weight_full_matching(G, top_nodes=None, weight="weight"): + r"""Returns a minimum weight full matching of the bipartite graph `G`. + + Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights + :math:`w : E \to \mathbb{R}`. This function then produces a matching + :math:`M \subseteq E` with cardinality + + .. math:: + \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert), + + which minimizes the sum of the weights of the edges included in the + matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such + matching exists. + + When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly + referred to as a perfect matching; here, since we allow + :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we + follow Karp [1]_ and refer to the matching as *full*. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. + + weight : string, optional (default='weight') + + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + ValueError + Raised if no full matching exists. + + ImportError + Raised if SciPy is not available. + + Notes + ----- + The problem of determining a minimum weight full matching is also known as + the rectangular linear assignment problem. This implementation defers the + calculation of the assignment to SciPy. + + References + ---------- + .. [1] Richard Manning Karp: + An algorithm to Solve the m x n Assignment Problem in Expected Time + O(mn log n). + Networks, 10(2):143–152, 1980. + + """ + import numpy as np + import scipy as sp + + left, right = nx.bipartite.sets(G, top_nodes) + U = list(left) + V = list(right) + # We explicitly create the biadjacency matrix having infinities + # where edges are missing (as opposed to zeros, which is what one would + # get by using toarray on the sparse matrix). + weights_sparse = biadjacency_matrix( + G, row_order=U, column_order=V, weight=weight, format="coo" + ) + weights = np.full(weights_sparse.shape, np.inf) + weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data + left_matches = sp.optimize.linear_sum_assignment(weights) + d = {U[u]: V[v] for u, v in zip(*left_matches)} + # d will contain the matching from edges in left to right; we need to + # add the ones from right to left as well. + d.update({v: u for u, v in d.items()}) + return d diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..e567967757451d8fa87b3736a8e95b9c59ce17ca --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py @@ -0,0 +1,167 @@ +""" +==================== +Biadjacency matrices +==================== +""" +import itertools + +import networkx as nx +from networkx.convert_matrix import _generate_weighted_edges + +__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"] + + +@nx._dispatch(edge_attrs="weight") +def biadjacency_matrix( + G, row_order, column_order=None, dtype=None, weight="weight", format="csr" +): + r"""Returns the biadjacency matrix of the bipartite graph G. + + Let `G = (U, V, E)` be a bipartite graph with node sets + `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency + matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1` + if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is + not `None` and matches the name of an edge attribute, its value is + used instead of 1. + + Parameters + ---------- + G : graph + A NetworkX graph + + row_order : list of nodes + The rows of the matrix are ordered according to the list of nodes. + + column_order : list, optional + The columns of the matrix are ordered according to the list of nodes. + If column_order is None, then the ordering of columns is arbitrary. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [2]_ for details. + + Returns + ------- + M : SciPy sparse array + Biadjacency matrix representation of the bipartite graph G. + + Notes + ----- + No attempt is made to check that the input graph is bipartite. + + For directed bipartite graphs only successors are considered as neighbors. + To obtain an adjacency matrix with ones (or weight values) for both + predecessors and successors you have to generate two biadjacency matrices + where the rows of one of them are the columns of the other, and then add + one to the transpose of the other. + + See Also + -------- + adjacency_matrix + from_biadjacency_matrix + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + .. [2] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + nlen = len(row_order) + if nlen == 0: + raise nx.NetworkXError("row_order is empty list") + if len(row_order) != len(set(row_order)): + msg = "Ambiguous ordering: `row_order` contained duplicates." + raise nx.NetworkXError(msg) + if column_order is None: + column_order = list(set(G) - set(row_order)) + mlen = len(column_order) + if len(column_order) != len(set(column_order)): + msg = "Ambiguous ordering: `column_order` contained duplicates." + raise nx.NetworkXError(msg) + + row_index = dict(zip(row_order, itertools.count())) + col_index = dict(zip(column_order, itertools.count())) + + if G.number_of_edges() == 0: + row, col, data = [], [], [] + else: + row, col, data = zip( + *( + (row_index[u], col_index[v], d.get(weight, 1)) + for u, v, d in G.edges(row_order, data=True) + if u in row_index and v in col_index + ) + ) + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, mlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err + + +@nx._dispatch(graphs=None) +def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"): + r"""Creates a new bipartite graph from a biadjacency matrix given as a + SciPy sparse array. + + Parameters + ---------- + A: scipy sparse array + A biadjacency matrix representation of a graph + + create_using: NetworkX graph + Use specified graph for result. The default is Graph() + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + The nodes are labeled with the attribute `bipartite` set to an integer + 0 or 1 representing membership in part 0 or part 1 of the bipartite graph. + + If `create_using` is an instance of :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph` and the entries of `A` are of + type :class:`int`, then this function returns a multigraph (of the same + type as `create_using`) with parallel edges. In this case, `edge_attribute` + will be ignored. + + See Also + -------- + biadjacency_matrix + from_numpy_array + + References + ---------- + [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n), bipartite=0) + G.add_nodes_from(range(n, n + m), bipartite=1) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A)) + # If the entries in the adjacency matrix are integers and the graph is a + # multigraph, then create parallel edges, each with weight 1, for each + # entry in the adjacency matrix. Otherwise, create one edge for each + # positive entry in the adjacency matrix and set the weight of that edge to + # be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph(): + chain = itertools.chain.from_iterable + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/projection.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/projection.py new file mode 100644 index 0000000000000000000000000000000000000000..57f960e13b3befbb575b7b318883cc81aafbecd8 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/projection.py @@ -0,0 +1,528 @@ +"""One-mode (unipartite) projections of bipartite graphs.""" +import networkx as nx +from networkx.exception import NetworkXAlgorithmError +from networkx.utils import not_implemented_for + +__all__ = [ + "projected_graph", + "weighted_projected_graph", + "collaboration_weighted_projected_graph", + "overlap_weighted_projected_graph", + "generic_weighted_projected_graph", +] + + +@nx._dispatch(graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True) +def projected_graph(B, nodes, multigraph=False): + r"""Returns the projection of B onto one of its node sets. + + Returns the graph G that is the projection of the bipartite graph B + onto the specified nodes. They retain their attributes and are connected + in G if they have a common neighbor in B. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + multigraph: bool (default=False) + If True return a multigraph where the multiple edges represent multiple + shared neighbors. They edge key in the multigraph is assigned to the + label of the neighbor. + + Returns + ------- + Graph : NetworkX graph or multigraph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges()) + [(1, 3)] + + If nodes `a`, and `b` are connected through both nodes 1 and 2 then + building a multigraph results in two edges in the projection onto + [`a`, `b`]: + + >>> B = nx.Graph() + >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)]) + >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True) + >>> print([sorted((u, v)) for u, v in G.edges()]) + [['a', 'b'], ['a', 'b']] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + Returns a simple graph that is the projection of the bipartite graph B + onto the set of nodes given in list nodes. If multigraph=True then + a multigraph is returned with an edge for every shared neighbor. + + Directed graphs are allowed as input. The output will also then + be a directed graph with edges if there is a directed path between + the nodes. + + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + """ + if B.is_multigraph(): + raise nx.NetworkXError("not defined for multigraphs") + if B.is_directed(): + directed = True + if multigraph: + G = nx.MultiDiGraph() + else: + G = nx.DiGraph() + else: + directed = False + if multigraph: + G = nx.MultiGraph() + else: + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u} + if multigraph: + for n in nbrs2: + if directed: + links = set(B[u]) & set(B.pred[n]) + else: + links = set(B[u]) & set(B[n]) + for l in links: + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) + else: + G.add_edges_from((u, n) for n in nbrs2) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def weighted_projected_graph(B, nodes, ratio=False): + r"""Returns a weighted projection of B onto one of its node sets. + + The weighted projected graph is the projection of the bipartite + network B onto the specified nodes with weights representing the + number of shared neighbors or the ratio between actual shared + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Distinct nodes to project onto (the "bottom" nodes). + + ratio: Bool (default=False) + If True, edge weight is the ratio between actual shared neighbors + and maximum possible shared neighbors (i.e., the size of the other + node set). If False, edges weight is the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 1})] + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 0.5})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite, or that + the input nodes are distinct. However, if the length of the input nodes is + greater than or equal to the nodes in the graph B, an exception is raised. + If the nodes are not distinct but don't raise this error, the output weights + will be incorrect. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + n_top = len(B) - len(nodes) + + if n_top < 1: + raise NetworkXAlgorithmError( + f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n" + "They are either not a valid bipartite partition or contain duplicates" + ) + + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + common = unbrs & vnbrs + if not ratio: + weight = len(common) + else: + weight = len(common) / n_top + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def collaboration_weighted_projected_graph(B, nodes): + r"""Newman's weighted projection of B onto one of its node sets. + + The collaboration weighted projection is the projection of the + bipartite network B onto the specified nodes with weights assigned + using Newman's collaboration model [1]_: + + .. math:: + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite + graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> B.add_edge(1, 5) + >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) + >>> list(G) + [0, 2, 4, 5] + >>> for edge in sorted(G.edges(data=True)): + ... print(edge) + ... + (0, 2, {'weight': 0.5}) + (0, 5, {'weight': 0.5}) + (2, 4, {'weight': 1.0}) + (2, 5, {'weight': 0.5}) + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, + M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u} + for v in nbrs2: + vnbrs = set(pred[v]) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def overlap_weighted_projected_graph(B, nodes, jaccard=True): + r"""Overlap weighted projection of B onto one of its node sets. + + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing + the Jaccard index between the neighborhoods of the two nodes in the + original bipartite network [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original + bipartite graph [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + jaccard: Bool (default=True) + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) + >>> list(G) + [0, 2, 4] + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + if jaccard: + wt = len(unbrs & vnbrs) / len(unbrs | vnbrs) + else: + wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B", preserve_all_attrs=True) +def generic_weighted_projected_graph(B, nodes, weight_function=None): + r"""Weighted projection of B with a user-specified weight function. + + The bipartite network B is projected on to the specified nodes + with weights computed by a user-specified function. This function + must accept as a parameter the neighborhood sets of two nodes and + return an integer or a float. + + The nodes retain their attributes and are connected in the resulting graph + if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + weight_function : function + This function must accept as parameters the same input graph + that this function, and two nodes; and return an integer or a float. + The default function computes the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> # Define some custom weight functions + >>> def jaccard(G, u, v): + ... unbrs = set(G[u]) + ... vnbrs = set(G[v]) + ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + ... + >>> def my_weight(G, u, v, weight="weight"): + ... w = 0 + ... for nbr in set(G[u]) & set(G[v]): + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) + ... return w + ... + >>> # A complete bipartite graph with 4 nodes and 4 edges + >>> B = nx.complete_bipartite_graph(2, 2) + >>> # Add some arbitrary weight to the edges + >>> for i, (u, v) in enumerate(B.edges()): + ... B.edges[u, v]["weight"] = i + 1 + ... + >>> for edge in B.edges(data=True): + ... print(edge) + ... + (0, 2, {'weight': 1}) + (0, 3, {'weight': 2}) + (1, 2, {'weight': 3}) + (1, 3, {'weight': 4}) + >>> # By default, the weight is the number of shared neighbors + >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 2})] + >>> # To specify a custom weight function use the weight_function parameter + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=jaccard + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 1.0})] + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=my_weight + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 10})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + projected_graph + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + if weight_function is None: + + def weight_function(G, u, v): + # Notice that we use set(pred[v]) for handling the directed case. + return len(set(G[u]) & set(pred[v])) + + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u} + for v in nbrs2: + weight = weight_function(B, u, v) + G.add_edge(u, v, weight=weight) + return G diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..04b3ae9ca756facd5854caac37cf7fcf2ec6d431 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py @@ -0,0 +1,111 @@ +"""Node redundancy for bipartite graphs.""" +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError + +__all__ = ["node_redundancy"] + + +@nx._dispatch +def node_redundancy(G, nodes=None): + r"""Computes the node redundancy coefficients for the nodes in the bipartite + graph `G`. + + The redundancy coefficient of a node `v` is the fraction of pairs of + neighbors of `v` that are both linked to other nodes. In a one-mode + projection these nodes would be linked together even if `v` were + not there. + + More formally, for any vertex `v`, the *redundancy coefficient of `v`* is + defined by + + .. math:: + + rc(v) = \frac{|\{\{u, w\} \subseteq N(v), + \: \exists v' \neq v,\: (v',u) \in E\: + \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}}, + + where `N(v)` is the set of neighbors of `v` in `G`. + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute redundancy for these nodes. The default is all nodes in G. + + Returns + ------- + redundancy : dictionary + A dictionary keyed by node with the node redundancy value. + + Examples + -------- + Compute the redundancy coefficient of each node in a graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> rc[0] + 1.0 + + Compute the average redundancy for the graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> sum(rc.values()) / len(G) + 1.0 + + Compute the average redundancy for a set of nodes:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> nodes = [0, 2] + >>> sum(rc[n] for n in nodes) / len(nodes) + 1.0 + + Raises + ------ + NetworkXError + If any of the nodes in the graph (or in `nodes`, if specified) has + (out-)degree less than two (which would result in division by zero, + according to the definition of the redundancy coefficient). + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + + """ + if nodes is None: + nodes = G + if any(len(G[v]) < 2 for v in nodes): + raise NetworkXError( + "Cannot compute redundancy coefficient for a node" + " that has fewer than two neighbors." + ) + # TODO This can be trivially parallelized. + return {v: _node_redundancy(G, v) for v in nodes} + + +def _node_redundancy(G, v): + """Returns the redundancy of the node `v` in the bipartite graph `G`. + + If `G` is a graph with `n` nodes, the redundancy of a node is the ratio + of the "overlap" of `v` to the maximum possible overlap of `v` + according to its degree. The overlap of `v` is the number of pairs of + neighbors that have mutual neighbors themselves, other than `v`. + + `v` must have at least two neighbors in `G`. + + """ + n = len(G[v]) + overlap = sum( + 1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v} + ) + return (2 * overlap) / (n * (n - 1)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b414243ac8faa2cb10f17a1e93f894b4202034 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py @@ -0,0 +1,68 @@ +""" +Spectral bipartivity measure. +""" +import networkx as nx + +__all__ = ["spectral_bipartivity"] + + +@nx._dispatch(edge_attrs="weight") +def spectral_bipartivity(G, nodes=None, weight="weight"): + """Returns the spectral bipartivity. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or container optional(default is all nodes) + Nodes to return value of spectral bipartivity contribution. + + weight : string or None optional (default = 'weight') + Edge data key to use for edge weights. If None, weights set to 1. + + Returns + ------- + sb : float or dict + A single number if the keyword nodes is not specified, or + a dictionary keyed by node with the spectral bipartivity contribution + of that node as the value. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> bipartite.spectral_bipartivity(G) + 1.0 + + Notes + ----- + This implementation uses Numpy (dense) matrices which are not efficient + for storing large sparse graphs. + + See Also + -------- + color + + References + ---------- + .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of + bipartivity in complex networks", PhysRev E 72, 046105 (2005) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist, weight=weight) + expA = sp.linalg.expm(A) + expmA = sp.linalg.expm(-A) + coshA = 0.5 * (expA + expmA) + if nodes is None: + # return single number for entire graph + return coshA.diagonal().sum() / expA.diagonal().sum() + else: + # contribution for individual nodes + index = dict(zip(nodelist, range(len(nodelist)))) + sb = {} + for n in nodes: + i = index[n] + sb[n] = coshA[i, i] / expA[i, i] + return sb diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d400187963370cd205606aa38f10034a2e04e23c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45c53b748917bcdefa4598872746e03c0e955a8b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94642c4c97ef887cff93313b9e930afed26f1d30 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d76c5ffdf0f70c49c771bf48b53b09cb8a26bb47 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..042a8398a29ab921bdf00d79acbdb070b48b8949 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a07e55180cbee391cbf2d4232405745b9d4b01f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..795de02419a245edb6c68a993bbe2cecf2bec366 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb9a0119981383fa8f447b3074c06a5f76efd2f8 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45cb39e8f833afaa86f9c0a30c947bcbe8510670 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb31d5c72e9fdc36e278a5308c2d9cac51da7177 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..668787621cff256777ce7ba8be4b68228ee1af5c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9142ba6282c813912f6645dfb2247c217cde4f95 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92387bb496cd6d523efd04d46555ecff0c76d5b8 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..655506b4f74110b57cb37db277e2be50bb0be8f4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py @@ -0,0 +1,125 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteBasic: + def test_is_bipartite(self): + assert bipartite.is_bipartite(nx.path_graph(4)) + assert bipartite.is_bipartite(nx.DiGraph([(1, 0)])) + assert not bipartite.is_bipartite(nx.complete_graph(3)) + + def test_bipartite_color(self): + G = nx.path_graph(4) + c = bipartite.color(G) + assert c == {0: 1, 1: 0, 2: 1, 3: 0} + + def test_not_bipartite_color(self): + with pytest.raises(nx.NetworkXError): + c = bipartite.color(nx.complete_graph(4)) + + def test_bipartite_directed(self): + G = bipartite.random_graph(10, 10, 0.1, directed=True) + assert bipartite.is_bipartite(G) + + def test_bipartite_sets(self): + G = nx.path_graph(4) + X, Y = bipartite.sets(G) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_directed(self): + G = nx.path_graph(4) + D = G.to_directed() + X, Y = bipartite.sets(D) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_given_top_nodes(self): + G = nx.path_graph(4) + top_nodes = [0, 2] + X, Y = bipartite.sets(G, top_nodes) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + G = nx.path_graph(4) + G.add_edges_from([(5, 6), (6, 7)]) + X, Y = bipartite.sets(G) + + def test_is_bipartite_node_set(self): + G = nx.path_graph(4) + + with pytest.raises(nx.AmbiguousSolution): + bipartite.is_bipartite_node_set(G, [1, 1, 2, 3]) + + assert bipartite.is_bipartite_node_set(G, [0, 2]) + assert bipartite.is_bipartite_node_set(G, [1, 3]) + assert not bipartite.is_bipartite_node_set(G, [1, 2]) + G.add_edge(10, 20) + assert bipartite.is_bipartite_node_set(G, [0, 2, 10]) + assert bipartite.is_bipartite_node_set(G, [0, 2, 20]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 10]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 20]) + + def test_bipartite_density(self): + G = nx.path_graph(5) + X, Y = bipartite.sets(G) + density = len(list(G.edges())) / (len(X) * len(Y)) + assert bipartite.density(G, X) == density + D = nx.DiGraph(G.edges()) + assert bipartite.density(D, X) == density / 2.0 + assert bipartite.density(nx.Graph(), {}) == 0.0 + + def test_bipartite_degrees(self): + G = nx.path_graph(5) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y) + assert dict(u) == {1: 2, 3: 2} + assert dict(d) == {0: 1, 2: 2, 4: 1} + + def test_bipartite_weighted_degrees(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=0.1, other=0.2) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y, weight="weight") + assert dict(u) == {1: 1.1, 3: 2} + assert dict(d) == {0: 0.1, 2: 2, 4: 1} + u, d = bipartite.degrees(G, Y, weight="other") + assert dict(u) == {1: 1.2, 3: 2} + assert dict(d) == {0: 0.2, 2: 2, 4: 1} + + def test_biadjacency_matrix_weight(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + pytest.importorskip("scipy") + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..19fb5d117be94c688616a394ea3322e93bfa3e00 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py @@ -0,0 +1,192 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteCentrality: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.K3 = nx.complete_bipartite_graph(3, 3) + cls.C4 = nx.cycle_graph(4) + cls.davis = nx.davis_southern_women_graph() + cls.top_nodes = [ + n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0 + ] + + def test_degree_centrality(self): + d = bipartite.degree_centrality(self.P4, [1, 3]) + answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5} + assert d == answer + d = bipartite.degree_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert d == answer + d = bipartite.degree_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert d == answer + + def test_betweenness_centrality(self): + c = bipartite.betweenness_centrality(self.P4, [1, 3]) + answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0} + assert c == answer + c = bipartite.betweenness_centrality(self.K3, [0, 1, 2]) + answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125} + assert c == answer + c = bipartite.betweenness_centrality(self.C4, [0, 2]) + answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + assert c == answer + + def test_closeness_centrality(self): + c = bipartite.closeness_centrality(self.P4, [1, 3]) + answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3} + assert c == answer + c = bipartite.closeness_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert c == answer + c = bipartite.closeness_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert c == answer + G = nx.Graph() + G.add_node(0) + G.add_node(1) + c = bipartite.closeness_centrality(G, [0]) + assert c == {0: 0.0, 1: 0.0} + c = bipartite.closeness_centrality(G, [1]) + assert c == {0: 0.0, 1: 0.0} + + def test_bipartite_closeness_centrality_unconnected(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(6, 7) + c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False) + answer = { + 0: 10.0 / 7, + 2: 10.0 / 7, + 4: 10.0 / 7, + 6: 10.0, + 1: 10.0 / 7, + 3: 10.0 / 7, + 5: 10.0 / 7, + 7: 10.0, + } + assert c == answer + + def test_davis_degree_centrality(self): + G = self.davis + deg = bipartite.degree_centrality(G, self.top_nodes) + answer = { + "E8": 0.78, + "E9": 0.67, + "E7": 0.56, + "Nora Fayette": 0.57, + "Evelyn Jefferson": 0.57, + "Theresa Anderson": 0.57, + "E6": 0.44, + "Sylvia Avondale": 0.50, + "Laura Mandeville": 0.50, + "Brenda Rogers": 0.50, + "Katherina Rogers": 0.43, + "E5": 0.44, + "Helen Lloyd": 0.36, + "E3": 0.33, + "Ruth DeSand": 0.29, + "Verne Sanderson": 0.29, + "E12": 0.33, + "Myra Liddel": 0.29, + "E11": 0.22, + "Eleanor Nye": 0.29, + "Frances Anderson": 0.29, + "Pearl Oglethorpe": 0.21, + "E4": 0.22, + "Charlotte McDowd": 0.29, + "E10": 0.28, + "Olivia Carleton": 0.14, + "Flora Price": 0.14, + "E2": 0.17, + "E1": 0.17, + "Dorothy Murchison": 0.14, + "E13": 0.17, + "E14": 0.17, + } + for node, value in answer.items(): + assert value == pytest.approx(deg[node], abs=1e-2) + + def test_davis_betweenness_centrality(self): + G = self.davis + bet = bipartite.betweenness_centrality(G, self.top_nodes) + answer = { + "E8": 0.24, + "E9": 0.23, + "E7": 0.13, + "Nora Fayette": 0.11, + "Evelyn Jefferson": 0.10, + "Theresa Anderson": 0.09, + "E6": 0.07, + "Sylvia Avondale": 0.07, + "Laura Mandeville": 0.05, + "Brenda Rogers": 0.05, + "Katherina Rogers": 0.05, + "E5": 0.04, + "Helen Lloyd": 0.04, + "E3": 0.02, + "Ruth DeSand": 0.02, + "Verne Sanderson": 0.02, + "E12": 0.02, + "Myra Liddel": 0.02, + "E11": 0.02, + "Eleanor Nye": 0.01, + "Frances Anderson": 0.01, + "Pearl Oglethorpe": 0.01, + "E4": 0.01, + "Charlotte McDowd": 0.01, + "E10": 0.01, + "Olivia Carleton": 0.01, + "Flora Price": 0.01, + "E2": 0.00, + "E1": 0.00, + "Dorothy Murchison": 0.00, + "E13": 0.00, + "E14": 0.00, + } + for node, value in answer.items(): + assert value == pytest.approx(bet[node], abs=1e-2) + + def test_davis_closeness_centrality(self): + G = self.davis + clos = bipartite.closeness_centrality(G, self.top_nodes) + answer = { + "E8": 0.85, + "E9": 0.79, + "E7": 0.73, + "Nora Fayette": 0.80, + "Evelyn Jefferson": 0.80, + "Theresa Anderson": 0.80, + "E6": 0.69, + "Sylvia Avondale": 0.77, + "Laura Mandeville": 0.73, + "Brenda Rogers": 0.73, + "Katherina Rogers": 0.73, + "E5": 0.59, + "Helen Lloyd": 0.73, + "E3": 0.56, + "Ruth DeSand": 0.71, + "Verne Sanderson": 0.71, + "E12": 0.56, + "Myra Liddel": 0.69, + "E11": 0.54, + "Eleanor Nye": 0.67, + "Frances Anderson": 0.67, + "Pearl Oglethorpe": 0.67, + "E4": 0.54, + "Charlotte McDowd": 0.60, + "E10": 0.55, + "Olivia Carleton": 0.59, + "Flora Price": 0.59, + "E2": 0.52, + "E1": 0.52, + "Dorothy Murchison": 0.65, + "E13": 0.52, + "E14": 0.52, + } + for node, value in answer.items(): + assert value == pytest.approx(clos[node], abs=1e-2) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..72e2dbadd64e9e768d1541b2ce742c2b62278929 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py @@ -0,0 +1,84 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.algorithms.bipartite.cluster import cc_dot, cc_max, cc_min + + +def test_pairwise_bipartite_cc_functions(): + # Test functions for different kinds of bipartite clustering coefficients + # between pairs of nodes using 3 example graphs from figure 5 p. 40 + # Latapy et al (2008) + G1 = nx.Graph([(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7)]) + G2 = nx.Graph([(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (1, 5)]) + G3 = nx.Graph( + [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)] + ) + result = { + 0: [1 / 3.0, 2 / 3.0, 2 / 5.0], + 1: [1 / 2.0, 2 / 3.0, 2 / 3.0], + 2: [2 / 8.0, 2 / 5.0, 2 / 5.0], + } + for i, G in enumerate([G1, G2, G3]): + assert bipartite.is_bipartite(G) + assert cc_dot(set(G[0]), set(G[1])) == result[i][0] + assert cc_min(set(G[0]), set(G[1])) == result[i][1] + assert cc_max(set(G[0]), set(G[1])) == result[i][2] + + +def test_star_graph(): + G = nx.star_graph(3) + # all modes are the same + answer = {0: 0, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="min") == answer + assert bipartite.clustering(G, mode="max") == answer + + +def test_not_bipartite(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.complete_graph(4)) + + +def test_bad_mode(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.path_graph(4), mode="foo") + + +def test_path_graph(): + G = nx.path_graph(4) + answer = {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="max") == answer + answer = {0: 1, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="min") == answer + + +def test_average_path_graph(): + G = nx.path_graph(4) + assert bipartite.average_clustering(G, mode="dot") == 0.5 + assert bipartite.average_clustering(G, mode="max") == 0.5 + assert bipartite.average_clustering(G, mode="min") == 1 + + +def test_ra_clustering_davis(): + G = nx.davis_southern_women_graph() + cc4 = round(bipartite.robins_alexander_clustering(G), 3) + assert cc4 == 0.468 + + +def test_ra_clustering_square(): + G = nx.path_graph(4) + G.add_edge(0, 3) + assert bipartite.robins_alexander_clustering(G) == 1.0 + + +def test_ra_clustering_zero(): + G = nx.Graph() + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_nodes_from(range(4)) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edges_from([(0, 1), (2, 3), (3, 4)]) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edge(1, 2) + assert bipartite.robins_alexander_clustering(G) == 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py new file mode 100644 index 0000000000000000000000000000000000000000..9507e13492acbe505aa3394a24dbc41c095a037c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py @@ -0,0 +1,33 @@ +import networkx as nx +from networkx.algorithms import bipartite + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert bipartite.min_edge_cover(G) == set() + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)} + + def test_bipartite_default(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..b388465ef4ba7bea6761ae1622f4f8ce20ba2657 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py @@ -0,0 +1,229 @@ +""" + Unit tests for bipartite edgelists. +""" +import io +import os +import tempfile + +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_nodes_from(["a", "c", "e"], bipartite=0) + cls.G.add_nodes_from(["b", "d", "f"], bipartite=1) + cls.G.add_node("g", bipartite=0) + cls.DG = nx.DiGraph(cls.G) + cls.MG = nx.MultiGraph() + cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)]) + cls.MG.add_node(1, bipartite=0) + cls.MG.add_node(2, bipartite=1) + + def test_read_edgelist_1(self): + s = b"""\ +# comment line +1 2 +# comment line +2 3 +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + def test_read_edgelist_3(self): + s = b"""\ +# comment line +1 2 {'weight':2.0} +# comment line +2 3 {'weight':3.0} +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True) + assert edges_equal( + G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] + ) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n3 2\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n3 2 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n3 2 3.0\n" + + def test_unicode(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_latin1_issue(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + pytest.raises( + UnicodeEncodeError, bipartite.write_edgelist, G, fname, encoding="latin-1" + ) + os.close(fd) + os.unlink(fname) + + def test_latin1(self): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname, encoding="latin-1") + H = bipartite.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_edgelist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + H2 = bipartite.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_integers(self): + G = nx.convert_node_labels_to_integers(self.G) + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_multigraph(self): + G = self.MG + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_empty_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + bytesIO = io.BytesIO() + bipartite.write_edgelist(nx.DiGraph(), bytesIO) + + def test_raise_attribute(self): + with pytest.raises(AttributeError): + G = nx.path_graph(4) + bytesIO = io.BytesIO() + bipartite.write_edgelist(G, bytesIO) + + def test_parse_edgelist(self): + """Tests for conditions specific to + parse_edge_list method""" + + # ignore strings of length less than 2 + lines = ["1 2", "2 3", "3 1", "4", " "] + G = bipartite.parse_edgelist(lines, nodetype=int) + assert list(G.nodes) == [1, 2, 3] + + # Exception raised when node is not convertible + # to specified data type + with pytest.raises(TypeError, match=".*Failed to convert nodes"): + lines = ["a b", "b c", "c a"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when format of data is not + # convertible to dictionary object + with pytest.raises(TypeError, match=".*Failed to convert edge data"): + lines = ["1 2 3", "2 3 4", "3 1 2"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when edge data and data + # keys are not of same length + with pytest.raises(IndexError): + lines = ["1 2 3 4", "2 3 4"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) + + # Exception raised when edge data is not + # convertible to specified data type + with pytest.raises(TypeError, match=".*Failed to convert key data"): + lines = ["1 2 3 a", "2 3 4 b"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ae34e4c1b0a04a9929a6468894c7efe6f74c53 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py @@ -0,0 +1,326 @@ +import pytest + +import networkx as nx + + +def test_selfloops_raises(): + G = nx.ladder_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_disconnected_raises(): + G = nx.ladder_graph(3) + G.add_node("a") + with pytest.raises(nx.NetworkXError, match=".*not connected"): + nx.bipartite.maximal_extendability(G) + + +def test_not_bipartite_raises(): + G = nx.complete_graph(5) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_no_perfect_matching_raises(): + G = nx.Graph([(0, 1), (0, 2)]) + with pytest.raises(nx.NetworkXError, match=".*not contain a perfect matching"): + nx.bipartite.maximal_extendability(G) + + +def test_ladder_graph_is_1(): + G = nx.ladder_graph(3) + assert nx.bipartite.maximal_extendability(G) == 1 + + +def test_cubical_graph_is_2(): + G = nx.cubical_graph() + assert nx.bipartite.maximal_extendability(G) == 2 + + +def test_k_is_3(): + G = nx.Graph( + [ + (1, 6), + (1, 7), + (1, 8), + (1, 9), + (2, 6), + (2, 7), + (2, 8), + (2, 10), + (3, 6), + (3, 8), + (3, 9), + (3, 10), + (4, 7), + (4, 8), + (4, 9), + (4, 10), + (5, 6), + (5, 7), + (5, 9), + (5, 10), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 3 + + +def test_k_is_4(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (11, 1), + (11, 2), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 3), + (12, 5), + (12, 6), + (12, 7), + (13, 2), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 4 + + +def test_k_is_5(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (8, 6), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (10, 7), + (11, 1), + (11, 2), + (11, 3), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 2), + (12, 4), + (12, 5), + (12, 6), + (12, 7), + (13, 1), + (13, 3), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 2), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 5 + + +def test_k_is_6(): + G = nx.Graph( + [ + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 6), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (10, 8), + (11, 1), + (11, 2), + (11, 3), + (11, 4), + (11, 5), + (11, 7), + (11, 8), + (12, 1), + (12, 2), + (12, 3), + (12, 4), + (12, 6), + (12, 7), + (12, 8), + (13, 1), + (13, 2), + (13, 3), + (13, 5), + (13, 6), + (13, 7), + (13, 8), + (14, 1), + (14, 2), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + (14, 8), + (15, 1), + (15, 3), + (15, 4), + (15, 5), + (15, 6), + (15, 7), + (15, 8), + (16, 2), + (16, 3), + (16, 4), + (16, 5), + (16, 6), + (16, 7), + (16, 8), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 6 + + +def test_k_is_7(): + G = nx.Graph( + [ + (1, 11), + (1, 12), + (1, 13), + (1, 14), + (1, 15), + (1, 16), + (1, 17), + (1, 18), + (2, 11), + (2, 12), + (2, 13), + (2, 14), + (2, 15), + (2, 16), + (2, 17), + (2, 19), + (3, 11), + (3, 12), + (3, 13), + (3, 14), + (3, 15), + (3, 16), + (3, 17), + (3, 20), + (4, 11), + (4, 12), + (4, 13), + (4, 14), + (4, 15), + (4, 16), + (4, 17), + (4, 18), + (4, 19), + (4, 20), + (5, 11), + (5, 12), + (5, 13), + (5, 14), + (5, 15), + (5, 16), + (5, 17), + (5, 18), + (5, 19), + (5, 20), + (6, 11), + (6, 12), + (6, 13), + (6, 14), + (6, 15), + (6, 16), + (6, 17), + (6, 18), + (6, 19), + (6, 20), + (7, 11), + (7, 12), + (7, 13), + (7, 14), + (7, 15), + (7, 16), + (7, 17), + (7, 18), + (7, 19), + (7, 20), + (8, 11), + (8, 12), + (8, 13), + (8, 14), + (8, 15), + (8, 16), + (8, 17), + (8, 18), + (8, 19), + (8, 20), + (9, 11), + (9, 12), + (9, 13), + (9, 14), + (9, 15), + (9, 16), + (9, 17), + (9, 18), + (9, 19), + (9, 20), + (10, 11), + (10, 12), + (10, 13), + (10, 14), + (10, 15), + (10, 16), + (10, 17), + (10, 18), + (10, 19), + (10, 20), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 7 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3b84cece23ba6f3de2a1e454d01548af2e1390 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py @@ -0,0 +1,400 @@ +import numbers + +import pytest + +import networkx as nx + +from ..generators import ( + alternating_havel_hakimi_graph, + complete_bipartite_graph, + configuration_model, + gnmk_random_graph, + havel_hakimi_graph, + preferential_attachment_graph, + random_graph, + reverse_havel_hakimi_graph, +) + +""" +Generators - Bipartite +---------------------- +""" + + +class TestGeneratorsBipartite: + def test_complete_bipartite_graph(self): + G = complete_bipartite_graph(0, 0) + assert nx.is_isomorphic(G, nx.null_graph()) + + for i in [1, 5]: + G = complete_bipartite_graph(i, 0) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + G = complete_bipartite_graph(0, i) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + + G = complete_bipartite_graph(2, 2) + assert nx.is_isomorphic(G, nx.cycle_graph(4)) + + G = complete_bipartite_graph(1, 5) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + G = complete_bipartite_graph(5, 1) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + # complete_bipartite_graph(m1,m2) is a connected graph with + # m1+m2 nodes and m1*m2 edges + for m1, m2 in [(5, 11), (7, 3)]: + G = complete_bipartite_graph(m1, m2) + assert nx.number_of_nodes(G) == m1 + m2 + assert nx.number_of_edges(G) == m1 * m2 + + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3) # default to Graph + assert sorted(mG.edges()) == sorted(G.edges()) + assert not mG.is_multigraph() + assert not mG.is_directed() + + # specify nodes rather than number of nodes + for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]: + G = complete_bipartite_graph(n1, n2) + if isinstance(n1, numbers.Integral): + if isinstance(n2, numbers.Integral): + n2 = range(n1, n1 + n2) + n1 = range(n1) + elif isinstance(n2, numbers.Integral): + n2 = range(n2) + edges = {(u, v) for u in n1 for v in n2} + assert edges == set(G.edges) + assert G.size() == len(edges) + + # raise when node sets are not distinct + for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]: + pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2) + + def test_configuration_model(self): + aseq = [] + bseq = [] + G = configuration_model(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = configuration_model(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq) + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2, 2] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = configuration_model(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph() + ) + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + configuration_model, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 4 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_reverse_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_alternating_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_preferential_attachment(self): + aseq = [3, 2, 1, 1] + G = preferential_attachment_graph(aseq, 0.5) + assert G.is_multigraph() + assert not G.is_directed() + + G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + + def test_random_graph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_random_digraph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9, directed=True) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_gnmk_random_graph(self): + n = 10 + m = 20 + edges = 100 + # set seed because sometimes it is not connected + # which raises an error in bipartite.sets(G) below. + G = gnmk_random_graph(n, m, edges, seed=1234) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) + + def test_gnmk_random_graph_complete(self): + n = 10 + m = 20 + edges = 200 + G = gnmk_random_graph(n, m, edges) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..7ed7cdcb43429f92c95a8d78da48f5d2771db77b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py @@ -0,0 +1,326 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module.""" +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms.bipartite.matching import ( + eppstein_matching, + hopcroft_karp_matching, + maximum_matching, + minimum_weight_full_matching, + to_vertex_cover, +) + + +class TestMatching: + """Tests for bipartite matching algorithms.""" + + def setup_method(self): + """Creates a bipartite graph for use in testing matching algorithms. + + The bipartite graph has a maximum cardinality matching that leaves + vertex 1 and vertex 10 unmatched. The first six numbers are the left + vertices and the next six numbers are the right vertices. + + """ + self.simple_graph = nx.complete_bipartite_graph(2, 3) + self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1} + + edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)] + self.top_nodes = set(range(6)) + self.graph = nx.Graph() + self.graph.add_nodes_from(range(12)) + self.graph.add_edges_from(edges) + + # Example bipartite graph from issue 2127 + G = nx.Graph() + G.add_nodes_from( + [ + (1, "C"), + (1, "B"), + (0, "G"), + (1, "F"), + (1, "E"), + (0, "C"), + (1, "D"), + (1, "I"), + (0, "A"), + (0, "D"), + (0, "F"), + (0, "E"), + (0, "H"), + (1, "G"), + (1, "A"), + (0, "I"), + (0, "B"), + (1, "H"), + ] + ) + G.add_edge((1, "C"), (0, "A")) + G.add_edge((1, "B"), (0, "A")) + G.add_edge((0, "G"), (1, "I")) + G.add_edge((0, "G"), (1, "H")) + G.add_edge((1, "F"), (0, "A")) + G.add_edge((1, "F"), (0, "C")) + G.add_edge((1, "F"), (0, "E")) + G.add_edge((1, "E"), (0, "A")) + G.add_edge((1, "E"), (0, "C")) + G.add_edge((0, "C"), (1, "D")) + G.add_edge((0, "C"), (1, "I")) + G.add_edge((0, "C"), (1, "G")) + G.add_edge((0, "C"), (1, "H")) + G.add_edge((1, "D"), (0, "A")) + G.add_edge((1, "I"), (0, "A")) + G.add_edge((1, "I"), (0, "E")) + G.add_edge((0, "A"), (1, "G")) + G.add_edge((0, "A"), (1, "H")) + G.add_edge((0, "E"), (1, "G")) + G.add_edge((0, "E"), (1, "H")) + self.disconnected_graph = G + + def check_match(self, matching): + """Asserts that the matching is what we expect from the bipartite graph + constructed in the :meth:`setup` fixture. + + """ + # For the sake of brevity, rename `matching` to `M`. + M = matching + matched_vertices = frozenset(itertools.chain(*M.items())) + # Assert that the maximum number of vertices (10) is matched. + assert matched_vertices == frozenset(range(12)) - {1, 10} + # Assert that no vertex appears in two edges, or in other words, that + # the matching (u, v) and (v, u) both appear in the matching + # dictionary. + assert all(u == M[M[u]] for u in range(12) if u in M) + + def check_vertex_cover(self, vertices): + """Asserts that the given set of vertices is the vertex cover we + expected from the bipartite graph constructed in the :meth:`setup` + fixture. + + """ + # By Konig's theorem, the number of edges in a maximum matching equals + # the number of vertices in a minimum vertex cover. + assert len(vertices) == 5 + # Assert that the set is truly a vertex cover. + for u, v in self.graph.edges(): + assert u in vertices or v in vertices + # TODO Assert that the vertices are the correct ones. + + def test_eppstein_matching(self): + """Tests that David Eppstein's implementation of the Hopcroft--Karp + algorithm produces a maximum cardinality matching. + + """ + self.check_match(eppstein_matching(self.graph, self.top_nodes)) + + def test_hopcroft_karp_matching(self): + """Tests that the Hopcroft--Karp algorithm produces a maximum + cardinality matching in a bipartite graph. + + """ + self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes)) + + def test_to_vertex_cover(self): + """Test for converting a maximum matching to a minimum vertex cover.""" + matching = maximum_matching(self.graph, self.top_nodes) + vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes) + self.check_vertex_cover(vertex_cover) + + def test_eppstein_matching_simple(self): + match = eppstein_matching(self.simple_graph) + assert match == self.simple_solution + + def test_hopcroft_karp_matching_simple(self): + match = hopcroft_karp_matching(self.simple_graph) + assert match == self.simple_solution + + def test_eppstein_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = eppstein_matching(self.disconnected_graph) + + def test_hopcroft_karp_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = hopcroft_karp_matching(self.disconnected_graph) + + def test_issue_2127(self): + """Test from issue 2127""" + # Build the example DAG + G = nx.DiGraph() + G.add_edge("A", "C") + G.add_edge("A", "B") + G.add_edge("C", "E") + G.add_edge("C", "D") + G.add_edge("E", "G") + G.add_edge("E", "F") + G.add_edge("G", "I") + G.add_edge("G", "H") + + tc = nx.transitive_closure(G) + btc = nx.Graph() + + # Create a bipartite graph based on the transitive closure of G + for v in tc.nodes(): + btc.add_node((0, v)) + btc.add_node((1, v)) + + for u, v in tc.edges(): + btc.add_edge((0, u), (1, v)) + + top_nodes = {n for n in btc if n[0] == 0} + matching = hopcroft_karp_matching(btc, top_nodes) + vertex_cover = to_vertex_cover(btc, matching, top_nodes) + independent_set = set(G) - {v for _, v in vertex_cover} + assert {"B", "D", "F", "I", "H"} == independent_set + + def test_vertex_cover_issue_2384(self): + G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_vertex_cover_issue_3306(self): + G = nx.Graph() + edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)] + G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges]) + + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_unorderable_nodes(self): + a = object() + b = object() + c = object() + d = object() + e = object() + G = nx.Graph([(a, d), (b, d), (b, e), (c, d)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + +def test_eppstein_matching(): + """Test in accordance to issue #1927""" + G = nx.Graph() + G.add_nodes_from(["a", 2, 3, 4], bipartite=0) + G.add_nodes_from([1, "b", "c"], bipartite=1) + G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)]) + matching = eppstein_matching(G) + assert len(matching) == len(maximum_matching(G)) + assert all(x in set(matching.keys()) for x in set(matching.values())) + + +class TestMinimumWeightFullMatching: + @classmethod + def setup_class(cls): + pytest.importorskip("scipy") + + def test_minimum_weight_full_matching_incomplete_graph(self): + B = nx.Graph() + B.add_nodes_from([1, 2], bipartite=0) + B.add_nodes_from([3, 4], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 3, weight=100) + B.add_edge(2, 4, weight=50) + matching = minimum_weight_full_matching(B) + assert matching == {1: 4, 2: 3, 4: 1, 3: 2} + + def test_minimum_weight_full_matching_with_no_full_matching(self): + B = nx.Graph() + B.add_nodes_from([1, 2, 3], bipartite=0) + B.add_nodes_from([4, 5, 6], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 4, weight=100) + B.add_edge(3, 4, weight=50) + B.add_edge(3, 5, weight=50) + B.add_edge(3, 6, weight=50) + with pytest.raises(ValueError): + minimum_weight_full_matching(B) + + def test_minimum_weight_full_matching_square(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=300) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2} + + def test_minimum_weight_full_matching_smaller_left(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_top_nodes_right(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6]) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_right(self): + G = nx.complete_bipartite_graph(4, 3) + G.add_edge(0, 4, weight=400) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=300) + G.add_edge(1, 4, weight=150) + G.add_edge(1, 5, weight=450) + G.add_edge(1, 6, weight=225) + G.add_edge(2, 4, weight=400) + G.add_edge(2, 5, weight=600) + G.add_edge(2, 6, weight=290) + G.add_edge(3, 4, weight=1) + G.add_edge(3, 5, weight=2) + G.add_edge(3, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2} + + def test_minimum_weight_full_matching_negative_weights(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, weight=-2) + G.add_edge(0, 3, weight=0.2) + G.add_edge(1, 2, weight=-2) + G.add_edge(1, 3, weight=0.3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} + + def test_minimum_weight_full_matching_different_weight_key(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, mass=2) + G.add_edge(0, 3, mass=0.2) + G.add_edge(1, 2, mass=1) + G.add_edge(1, 3, mass=2) + matching = minimum_weight_full_matching(G, weight="mass") + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..393b71e7ca29aa9385ed312788ec038acdee9390 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py @@ -0,0 +1,79 @@ +import pytest + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") +sparse = pytest.importorskip("scipy.sparse") + + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal + + +class TestBiadjacencyMatrix: + def test_biadjacency_matrix_weight(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph(), []) + + def test_empty_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), []) + + def test_duplicate_row(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [1, 1]) + + def test_duplicate_col(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], [1, 1]) + + def test_format_keyword(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], format="foo") + + def test_from_biadjacency_roundtrip(self): + B1 = nx.path_graph(5) + M = bipartite.biadjacency_matrix(B1, [0, 2, 4]) + B2 = bipartite.from_biadjacency_matrix(M) + assert nx.is_isomorphic(B1, B2) + + def test_from_biadjacency_weight(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (1, 3)]) + B = bipartite.from_biadjacency_matrix(M, edge_attribute="weight") + e = [(0, 2, {"weight": 1}), (0, 3, {"weight": 2}), (1, 3, {"weight": 3})] + assert edges_equal(B.edges(data=True), e) + + def test_from_biadjacency_multigraph(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph()) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (0, 3), (1, 3), (1, 3), (1, 3)]) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py new file mode 100644 index 0000000000000000000000000000000000000000..076bb42b668657cad51f6423e5aacf23a2a1cd28 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py @@ -0,0 +1,407 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, nodes_equal + + +class TestBipartiteProject: + def test_path_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"): + bipartite.projected_graph(G, [0]) + + def test_path_projected_properties_graph(self): + G = nx.path_graph(4) + G.add_node(1, name="one") + G.add_node(2, name="two") + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + assert P.nodes[1]["name"] == G.nodes[1]["name"] + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + assert P.nodes[2]["name"] == G.nodes[2]["name"] + + def test_path_collaboration_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_directed_path_collaboration_projected_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_graph(self): + G = nx.path_graph(4) + + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.weighted_projected_graph(G, [1, 2, 3, 3]) + + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_digraph_weighted_projection(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) + assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} + assert len(P) == 2 + + def test_path_weighted_projected_directed_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_star_projected_graph(self): + G = nx.star_graph(3) + P = bipartite.projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + P = bipartite.weighted_projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + + P = bipartite.projected_graph(G, [0]) + assert nodes_equal(list(P), [0]) + assert edges_equal(list(P.edges()), []) + + def test_project_multigraph(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("a", 2) + G.add_edge("b", 2) + P = bipartite.projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.weighted_projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.projected_graph(G, "ab", multigraph=True) + assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")]) + + def test_project_collaboration(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("b", 2) + G.add_edge("c", 2) + G.add_edge("c", 3) + G.add_edge("c", 4) + G.add_edge("b", 4) + P = bipartite.collaboration_weighted_projected_graph(G, "abc") + assert P["a"]["b"]["weight"] == 1 + assert P["b"]["c"]["weight"] == 2 + + def test_directed_projection(self): + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge("B", 2) + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 1 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B")]) + + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge(2, "B") + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 2 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")]) + + +class TestBipartiteWeightedProjection: + @classmethod + def setup_class(cls): + # Tore Opsahl's example + # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ + cls.G = nx.Graph() + cls.G.add_edge("A", 1) + cls.G.add_edge("A", 2) + cls.G.add_edge("B", 1) + cls.G.add_edge("B", 2) + cls.G.add_edge("B", 3) + cls.G.add_edge("B", 4) + cls.G.add_edge("B", 5) + cls.G.add_edge("C", 1) + cls.G.add_edge("D", 3) + cls.G.add_edge("E", 4) + cls.G.add_edge("E", 5) + cls.G.add_edge("E", 6) + cls.G.add_edge("F", 6) + # Graph based on figure 6 from Newman (2001) + cls.N = nx.Graph() + cls.N.add_edge("A", 1) + cls.N.add_edge("A", 2) + cls.N.add_edge("A", 3) + cls.N.add_edge("B", 1) + cls.N.add_edge("B", 2) + cls.N.add_edge("B", 3) + cls.N.add_edge("C", 1) + cls.N.add_edge("D", 1) + cls.N.add_edge("E", 3) + + def test_project_weighted_shared(self): + edges = [ + ("A", "B", 2), + ("A", "C", 1), + ("B", "C", 1), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3), + ("A", "E", 1), + ("A", "C", 1), + ("A", "D", 1), + ("B", "E", 1), + ("B", "C", 1), + ("B", "D", 1), + ("C", "D", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_newman(self): + edges = [ + ("A", "B", 1.5), + ("A", "C", 0.5), + ("B", "C", 0.5), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 11 / 6.0), + ("A", "E", 1 / 2.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 2.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_ratio(self): + edges = [ + ("A", "B", 2 / 6.0), + ("A", "C", 1 / 6.0), + ("B", "C", 1 / 6.0), + ("B", "D", 1 / 6.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 6.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_overlap(self): + edges = [ + ("A", "B", 2 / 2.0), + ("A", "C", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("B", "E", 2 / 3.0), + ("E", "F", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 1.0), + ("A", "C", 1 / 1.0), + ("A", "D", 1 / 1.0), + ("B", "E", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_jaccard(self): + edges = [ + ("A", "B", 2 / 5.0), + ("A", "C", 1 / 2.0), + ("B", "C", 1 / 5.0), + ("B", "D", 1 / 5.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in P.edges(): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_generic_weighted_projected_graph_simple(self): + def shared(G, u, v): + return len(set(G[u]) & set(G[v])) + + B = nx.path_graph(5) + G = bipartite.generic_weighted_projected_graph( + B, [0, 2, 4], weight_function=shared + ) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + B = nx.DiGraph() + nx.add_path(B, range(5)) + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})] + ) + + def test_generic_weighted_projected_graph_custom(self): + def jaccard(G, u, v): + unbrs = set(G[u]) + vnbrs = set(G[v]) + return len(unbrs & vnbrs) / len(unbrs | vnbrs) + + def my_weight(G, u, v, weight="weight"): + w = 0 + for nbr in set(G[u]) & set(G[v]): + w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1) + return w + + B = nx.bipartite.complete_bipartite_graph(2, 2) + for i, (u, v) in enumerate(B.edges()): + B.edges[u, v]["weight"] = i + 1 + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=jaccard + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})]) + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=my_weight + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})]) + G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})]) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab7813d5facd2953e1d661d3b64a2223b38e48b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py @@ -0,0 +1,37 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module. + +""" + +import pytest + +from networkx import NetworkXError, cycle_graph +from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy + + +def test_no_redundant_nodes(): + G = complete_bipartite_graph(2, 2) + + # when nodes is None + rc = node_redundancy(G) + assert all(redundancy == 1 for redundancy in rc.values()) + + # when set of nodes is specified + rc = node_redundancy(G, (2, 3)) + assert rc == {2: 1.0, 3: 1.0} + + +def test_redundant_nodes(): + G = cycle_graph(6) + edge = {0, 3} + G.add_edge(*edge) + redundancy = node_redundancy(G) + for v in edge: + assert redundancy[v] == 2 / 3 + for v in set(G) - edge: + assert redundancy[v] == 1 + + +def test_not_enough_neighbors(): + with pytest.raises(NetworkXError): + G = complete_bipartite_graph(1, 2) + node_redundancy(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py new file mode 100644 index 0000000000000000000000000000000000000000..b940649793d40aa73606914f3d48348761c329df --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py @@ -0,0 +1,80 @@ +import pytest + +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.bipartite import spectral_bipartivity as sb + +# Examples from Figure 1 +# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of +# bipartivity in complex networks", PhysRev E 72, 046105 (2005) + + +class TestSpectralBipartivity: + def test_star_like(self): + # star-like + + G = nx.star_graph(2) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.843, abs=1e-3) + + G = nx.star_graph(3) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.871, abs=1e-3) + + G = nx.star_graph(4) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.890, abs=1e-3) + + def test_k23_like(self): + # K2,3-like + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.769, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.829, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + assert sb(G) == pytest.approx(0.731, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.692, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.597, abs=1e-3) + + def test_single_nodes(self): + # single nodes + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.85, abs=1e-2) + assert sbn[2] == pytest.approx(0.77, abs=1e-2) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.73, abs=1e-2) + assert sbn[2] == pytest.approx(0.82, abs=1e-2) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/boundary.py b/phivenv/Lib/site-packages/networkx/algorithms/boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..ea97cee6efb10e1e29735184c7e8b5c328943c42 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/boundary.py @@ -0,0 +1,167 @@ +"""Routines to find the boundary of a set of nodes. + +An edge boundary is a set of edges, each of which has exactly one +endpoint in a given set of nodes (or, in the case of directed graphs, +the set of edges whose source node is in the set). + +A node boundary of a set *S* of nodes is the set of (out-)neighbors of +nodes in *S* that are outside *S*. + +""" +from itertools import chain + +import networkx as nx + +__all__ = ["edge_boundary", "node_boundary"] + + +@nx._dispatch(edge_attrs={"data": "default"}, preserve_edge_attrs="data") +def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None): + """Returns the edge boundary of `nbunch1`. + + The *edge boundary* of a set *S* with respect to a set *T* is the + set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*. + If *T* is not specified, it is assumed to be the set of all nodes + not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose edge boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + keys : bool + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + data : bool or object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + default : object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + Returns + ------- + iterator + An iterator over the edges in the boundary of `nbunch1` with + respect to `nbunch2`. If `keys`, `data`, or `default` + are specified and `G` is a multigraph, then edges are returned + with keys and/or data, as in :meth:`MultiGraph.edges`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.edge_boundary(G, (1, 3))) + [(1, 0), (1, 2), (1, 5), (3, 0), (3, 2), (3, 4)] + + When nbunch2 is given: + + >>> list(nx.edge_boundary(G, (1, 3), (2, 0))) + [(1, 0), (1, 2), (3, 0), (3, 2)] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + # Here we create an iterator over edges incident to nodes in the set + # `nset1`. The `Graph.edges()` method does not provide a guarantee + # on the orientation of the edges, so our algorithm below must + # handle the case in which exactly one orientation, either (u, v) or + # (v, u), appears in this iterable. + if G.is_multigraph(): + edges = G.edges(nset1, data=data, keys=keys, default=default) + else: + edges = G.edges(nset1, data=data, default=default) + # If `nbunch2` is not provided, then it is assumed to be the set + # complement of `nbunch1`. For the sake of efficiency, this is + # implemented by using the `not in` operator, instead of by creating + # an additional set and using the `in` operator. + if nbunch2 is None: + return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1)) + nset2 = set(nbunch2) + return ( + e + for e in edges + if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2) + ) + + +@nx._dispatch +def node_boundary(G, nbunch1, nbunch2=None): + """Returns the node boundary of `nbunch1`. + + The *node boundary* of a set *S* with respect to a set *T* is the + set of nodes *v* in *T* such that for some *u* in *S*, there is an + edge joining *u* to *v*. If *T* is not specified, it is assumed to + be the set of all nodes not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose node boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + Returns + ------- + set + The node boundary of `nbunch1` with respect to `nbunch2`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.node_boundary(G, (3, 4))) + [0, 2, 5] + + When nbunch2 is given: + + >>> list(nx.node_boundary(G, (3, 4), (0, 1, 5))) + [0, 5] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1 + # If `nbunch2` is not specified, it is assumed to be the set + # complement of `nbunch1`. + if nbunch2 is not None: + bdy &= set(nbunch2) + return bdy diff --git a/phivenv/Lib/site-packages/networkx/algorithms/bridges.py b/phivenv/Lib/site-packages/networkx/algorithms/bridges.py new file mode 100644 index 0000000000000000000000000000000000000000..106120e2fde435e82fa6186a835f3fedabd21a0a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/bridges.py @@ -0,0 +1,205 @@ +"""Bridge-finding algorithms.""" +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bridges", "has_bridges", "local_bridges"] + + +@not_implemented_for("directed") +@nx._dispatch +def bridges(G, root=None): + """Generate all bridges in a graph. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. Equivalently, a bridge is an + edge that does not belong to any cycle. Bridges are also known as cut-edges, + isthmuses, or cut arcs. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be returned. + + Yields + ------ + e : edge + An edge in the graph whose removal disconnects the graph (or + causes the number of connected components to increase). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge: + + >>> G = nx.barbell_graph(10, 0) + >>> list(nx.bridges(G)) + [(9, 10)] + + Notes + ----- + This is an implementation of the algorithm described in [1]_. An edge is a + bridge if and only if it is not contained in any chain. Chains are found + using the :func:`networkx.chain_decomposition` function. + + The algorithm described in [1]_ requires a simple graph. If the provided + graph is a multigraph, we convert it to a simple graph and verify that any + bridges discovered by the chain decomposition algorithm are not multi-edges. + + Ignoring polylogarithmic factors, the worst-case time complexity is the + same as the :func:`networkx.chain_decomposition` function, + $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is + the number of edges. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions + """ + multigraph = G.is_multigraph() + H = nx.Graph(G) if multigraph else G + chains = nx.chain_decomposition(H, root=root) + chain_edges = set(chain.from_iterable(chains)) + H_copy = H.copy() + if root is not None: + H = H.subgraph(nx.node_connected_component(H, root)).copy() + for u, v in H.edges(): + if (u, v) not in chain_edges and (v, u) not in chain_edges: + if multigraph and len(G[u][v]) > 1: + continue + yield u, v + + +@not_implemented_for("directed") +@nx._dispatch +def has_bridges(G, root=None): + """Decide whether a graph has any bridges. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be considered. + + Returns + ------- + bool + Whether the graph (or the connected component containing `root`) + has any bridges. + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge:: + + >>> G = nx.barbell_graph(10, 0) + >>> nx.has_bridges(G) + True + + On the other hand, the cycle graph has no bridges:: + + >>> G = nx.cycle_graph(5) + >>> nx.has_bridges(G) + False + + Notes + ----- + This implementation uses the :func:`networkx.bridges` function, so + it shares its worst-case time complexity, $O(m + n)$, ignoring + polylogarithmic factors, where $n$ is the number of nodes in the + graph and $m$ is the number of edges. + + """ + try: + next(bridges(G, root=root)) + except StopIteration: + return False + else: + return True + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def local_bridges(G, with_span=True, weight=None): + """Iterate over local bridges of `G` optionally computing the span + + A *local bridge* is an edge whose endpoints have no common neighbors. + That is, the edge is not part of a triangle in the graph. + + The *span* of a *local bridge* is the shortest path length between + the endpoints if the local bridge is removed. + + Parameters + ---------- + G : undirected graph + + with_span : bool + If True, yield a 3-tuple `(u, v, span)` + + weight : function, string or None (default: None) + If function, used to compute edge weights for the span. + If string, the edge data attribute used in calculating span. + If None, all edges have weight 1. + + Yields + ------ + e : edge + The local bridges as an edge 2-tuple of nodes `(u, v)` or + as a 3-tuple `(u, v, span)` when `with_span is True`. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph or multigraph. + + Examples + -------- + A cycle graph has every edge a local bridge with span N-1. + + >>> G = nx.cycle_graph(9) + >>> (0, 8, 8) in set(nx.local_bridges(G)) + True + """ + if with_span is not True: + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + yield u, v + else: + wt = nx.weighted._weight_function(G, weight) + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + enodes = {u, v} + + def hide_edge(n, nbr, d): + if n not in enodes or nbr not in enodes: + return wt(n, nbr, d) + return None + + try: + span = nx.shortest_path_length(G, u, v, weight=hide_edge) + yield u, v, span + except nx.NetworkXNoPath: + yield u, v, float("inf") diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c91a904a13496ecab5a3a6c8caa026970d99a540 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,20 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * +from .laplacian import * diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b7511f556c18f96a4dfd247adfb8eb9c7fbeb14 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e3c9618947db99b5f5434c726332c2ca3044936 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b0bad006e0367bbd9dd1ae6301346352670fc07 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e4e925eddb6ad41f183bc175a5bad7ffb3d45f5 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5da1859348ee3a35dab68cb1daa2cede717eeee5 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0963703b3a02a3906478fbbf71f9de6f8ead1861 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d09c9fd32b3971d0301bdb6b4831e3e5379d08c9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7da07336b5b255cb3fc838fa0273823d856cabe2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c56fd997d6bca00a99c104cb2e457d9fe74c425 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..656ec6550e25e0356c538bef4cb401110a5279f3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52193ff5faa7dc35f2ac6a1ddd028041ddc2ecb1 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5da0c9c422cf7bd46a390f8bb8f374a3dc088f04 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d396315e6480ef3488abd5cc1905c6f6bb37e245 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..478fb375fb4ab2ac79b592d234b9fdcf2f31bc84 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cafb761014860f71efad97348b6966f65858f90 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b4778dd8834002fdcf17deae7376162cdabb89 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b062a12396a49f6cc0ed0d80b476d421eed12a8 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..026d3829a6bbc23fc2ed04c02d623a10ad51176f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93680d5316a7532727c399e349c75705b73a07eb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..635178705b84054c1eea9056b4bb49a3967150ed Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9390f43c24e377eaa62325a0a6d4797708ef2852 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8545781e5278b8b0f55f8701719354dbb73c84f0 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b1f3963b00e451258b9838723b71a2d9f799fe --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py @@ -0,0 +1,435 @@ +"""Betweenness centrality measures.""" +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import py_random_state +from networkx.utils.decorators import not_implemented_for + +__all__ = ["betweenness_centrality", "edge_betweenness_centrality"] + + +@py_random_state(5) +@nx._dispatch(edge_attrs="weight") +def betweenness_centrality( + G, k=None, normalized=True, weight=None, endpoints=False, seed=None +): + r"""Compute the shortest-path betweenness centrality for nodes. + + Betweenness centrality of a node $v$ is the sum of the + fraction of all-pairs shortest paths that pass through $v$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of + those paths passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$, + $\sigma(s, t|v) = 0$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by `2/((n-1)(n-2))` + for graphs, and `1/((n-1)(n-2))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + endpoints : bool, optional + If True include the endpoints in the shortest path counts. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + See [4]_ for the original first published version and [2]_ for details on + algorithms for variations and related metrics. + + For approximate betweenness calculations set k=#samples to use + k nodes ("pivots") to estimate the betweenness values. For an estimate + of the number of pivots needed see [3]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + This algorithm is not guaranteed to be correct if edge weights + are floating point numbers. As a workaround you can use integer + numbers by multiplying the relevant edge attributes by a convenient + constant factor (eg 100) and converting to integers. + + References + ---------- + .. [1] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + .. [3] Ulrik Brandes and Christian Pich: + Centrality Estimation in Large Networks. + International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007. + https://dx.doi.org/10.1142/S0218127407018403 + .. [4] Linton C. Freeman: + A set of measures of centrality based on betweenness. + Sociometry 40: 35–41, 1977 + https://doi.org/10.2307/3033543 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + if endpoints: + betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s) + else: + betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s) + # rescaling + betweenness = _rescale( + betweenness, + len(G), + normalized=normalized, + directed=G.is_directed(), + k=k, + endpoints=endpoints, + ) + return betweenness + + +@py_random_state(4) +@nx._dispatch(edge_attrs="weight") +def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None): + r"""Compute betweenness centrality for edges. + + Betweenness centrality of an edge $e$ is the sum of the + fraction of all-pairs shortest paths that pass through $e$ + + .. math:: + + c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of + those paths passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by $2/(n(n-1))$ + for graphs, and $1/(n(n-1))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + edges : dictionary + Dictionary of edges with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes, + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + # b[e]=0 for e in G.edges() + betweenness.update(dict.fromkeys(G.edges(), 0.0)) + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + betweenness = _accumulate_edges(betweenness, S, P, sigma, s) + # rescaling + for n in G: # remove nodes to only return edges + del betweenness[n] + betweenness = _rescale_e( + betweenness, len(G), normalized=normalized, directed=G.is_directed() + ) + if G.is_multigraph(): + betweenness = _add_edge_keys(G, betweenness, weight=weight) + return betweenness + + +# helpers for betweenness centrality + + +def _single_source_shortest_path_basic(G, s): + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + D[s] = 0 + Q = deque([s]) + while Q: # use BFS to find shortest paths + v = Q.popleft() + S.append(v) + Dv = D[v] + sigmav = sigma[v] + for w in G[v]: + if w not in D: + Q.append(w) + D[w] = Dv + 1 + if D[w] == Dv + 1: # this is a shortest path, count paths + sigma[w] += sigmav + P[w].append(v) # predecessors + return S, P, sigma, D + + +def _single_source_dijkstra_path_basic(G, s, weight): + weight = _weight_function(G, weight) + # modified from Eppstein + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + push = heappush + pop = heappop + seen = {s: 0} + c = count() + Q = [] # use Q as heap with (distance,node id) tuples + push(Q, (0, next(c), s, s)) + while Q: + (dist, _, pred, v) = pop(Q) + if v in D: + continue # already searched this node. + sigma[v] += sigma[pred] # count paths + S.append(v) + D[v] = dist + for w, edgedata in G[v].items(): + vw_dist = dist + weight(v, w, edgedata) + if w not in D and (w not in seen or vw_dist < seen[w]): + seen[w] = vw_dist + push(Q, (vw_dist, next(c), v, w)) + sigma[w] = 0.0 + P[w] = [v] + elif vw_dist == seen[w]: # handle equal paths + sigma[w] += sigma[v] + P[w].append(v) + return S, P, sigma, D + + +def _accumulate_basic(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness, delta + + +def _accumulate_endpoints(betweenness, S, P, sigma, s): + betweenness[s] += len(S) - 1 + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + 1 + return betweenness, delta + + +def _accumulate_edges(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + c = sigma[v] * coeff + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False): + if normalized: + if endpoints: + if n < 2: + scale = None # no normalization + else: + # Scale factor should include endpoint nodes + scale = 1 / (n * (n - 1)) + elif n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False, k=None): + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +@not_implemented_for("graph") +def _add_edge_keys(G, betweenness, weight=None): + r"""Adds the corrected betweenness centrality (BC) values for multigraphs. + + Parameters + ---------- + G : NetworkX graph. + + betweenness : dictionary + Dictionary mapping adjacent node tuples to betweenness centrality values. + + weight : string or function + See `_weight_function` for details. Defaults to `None`. + + Returns + ------- + edges : dictionary + The parameter `betweenness` including edges with keys and their + betweenness centrality values. + + The BC value is divided among edges of equal weight. + """ + _weight = _weight_function(G, weight) + + edge_bc = dict.fromkeys(G.edges, 0.0) + for u, v in betweenness: + d = G[u][v] + wt = _weight(u, v, d) + keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt] + bc = betweenness[(u, v)] / len(keys) + for k in keys: + edge_bc[(u, v, k)] = bc + + return edge_bc diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c1acdf4ffe4d7423a49bcdf8c340886c998b3b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,274 @@ +"""Betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _add_edge_keys, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "edge_betweenness_centrality_subset", +] + + +@nx._dispatch(edge_attrs="weight") +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +@nx._dispatch(edge_attrs="weight") +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/closeness.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..6a95ac14ef848c8d146dee8ceba01d3b2de5eeee --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/closeness.py @@ -0,0 +1,281 @@ +""" +Closeness centrality measures. +""" +import functools + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils.decorators import not_implemented_for + +__all__ = ["closeness_centrality", "incremental_closeness_centrality"] + + +@nx._dispatch(edge_attrs="distance") +def closeness_centrality(G, u=None, distance=None, wf_improved=True): + r"""Compute closeness centrality for nodes. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + average shortest path distance to `u` over all `n-1` reachable nodes. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n-1` is the number of nodes reachable from `u`. Notice that the + closeness distance function computes the incoming distance to `u` + for directed graphs. To use outward distance, act on `G.reverse()`. + + Notice that higher values of closeness indicate higher centrality. + + Wasserman and Faust propose an improved formula for graphs with + more than one connected component. The result is "a ratio of the + fraction of actors in the group who are reachable, to the average + distance" from the reachable actors [2]_. You might think this + scale factor is inverted but it is not. As is, nodes from small + components receive a smaller closeness value. Letting `N` denote + the number of nodes in the graph, + + .. math:: + + C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + Parameters + ---------- + G : graph + A NetworkX graph + + u : node, optional + Return only the value for node u + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.closeness_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, incremental_closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately scaled by that parts size. + + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + The closeness centrality uses *inward* distance to a node, not outward. + If you want to use outword distances apply the function to `G.reverse()` + + In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the + outward distance rather than the inward distance. If you use a 'distance' + keyword and a DiGraph, your results will change between v2.2 and v2.3. + + References + ---------- + .. [1] Linton C. Freeman: Centrality in networks: I. + Conceptual clarification. Social Networks 1:215-239, 1979. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] pg. 201 of Wasserman, S. and Faust, K., + Social Network Analysis: Methods and Applications, 1994, + Cambridge University Press. + """ + if G.is_directed(): + G = G.reverse() # create a reversed graph view + + if distance is not None: + # use Dijkstra's algorithm with specified attribute as edge weight + path_length = functools.partial( + nx.single_source_dijkstra_path_length, weight=distance + ) + else: + path_length = nx.single_source_shortest_path_length + + if u is None: + nodes = G.nodes + else: + nodes = [u] + closeness_dict = {} + for n in nodes: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + if u is not None: + return closeness_dict[u] + return closeness_dict + + +@not_implemented_for("directed") +@nx._dispatch +def incremental_closeness_centrality( + G, edge, prev_cc=None, insertion=True, wf_improved=True +): + r"""Incremental closeness centrality for nodes. + + Compute closeness centrality for nodes using level-based work filtering + as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al. + + Level-based work filtering detects unnecessary updates to the closeness + centrality and filters them out. + + --- + From "Incremental Algorithms for Closeness Centrality": + + Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V + such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)` + Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. + + Where :math:`dG(u, v)` denotes the length of the shortest path between + two vertices u, v in a graph G, cc[s] is the closeness centrality for a + vertex s in V, and cc'[s] is the closeness centrality for a + vertex s in V, with the (u, v) edge added. + --- + + We use Theorem 1 to filter out updates when adding or removing an edge. + When adding an edge (u, v), we compute the shortest path lengths from all + other nodes to u and to v before the node is added. When removing an edge, + we compute the shortest path lengths after the edge is removed. Then we + apply Theorem 1 to use previously computed closeness centrality for nodes + where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for + undirected, unweighted graphs; the distance argument is not supported. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + sum of the shortest path distances from `u` to all `n-1` other nodes. + Since the sum of distances depends on the number of nodes in the + graph, closeness is normalized by the sum of minimum possible + distances `n-1`. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n` is the number of nodes in the graph. + + Notice that higher values of closeness indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge : tuple + The modified edge (u, v) in the graph. + + prev_cc : dictionary + The previous closeness centrality for all nodes in the graph. + + insertion : bool, optional + If True (default) the edge was inserted, otherwise it was deleted from the graph. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately. + + References + ---------- + .. [1] Freeman, L.C., 1979. Centrality in networks: I. + Conceptual clarification. Social Networks 1, 215--239. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental + Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data + http://sariyuce.com/papers/bigdata13.pdf + """ + if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()): + raise NetworkXError("prev_cc and G do not have the same nodes") + + # Unpack edge + (u, v) = edge + path_length = nx.single_source_shortest_path_length + + if insertion: + # For edge insertion, we want shortest paths before the edge is inserted + du = path_length(G, u) + dv = path_length(G, v) + + G.add_edge(u, v) + else: + G.remove_edge(u, v) + + # For edge removal, we want shortest paths after the edge is removed + du = path_length(G, u) + dv = path_length(G, v) + + if prev_cc is None: + return nx.closeness_centrality(G) + + nodes = G.nodes() + closeness_dict = {} + for n in nodes: + if n in du and n in dv and abs(du[n] - dv[n]) <= 1: + closeness_dict[n] = prev_cc[n] + else: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + + # Leave the graph as we found it + if insertion: + G.remove_edge(u, v) + else: + G.add_edge(u, v) + + return closeness_dict diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1b2c8f2f49f97020adf100b495f20ec3f19ce1 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,343 @@ +"""Current-flow betweenness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@not_implemented_for("directed") +@py_random_state(7) +@nx._dispatch(edge_attrs="weight") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(n))) + for i in range(n): + betweenness[s] += (i - pos[i]) * row[i] + betweenness[t] += (n - i - 1 - pos[i]) * row[i] + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = float((betweenness[v] - v) * 2.0 / nb) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, n + 1))) + for i in range(n): + betweenness[e] += (i + 1 - pos[i]) * row[i] + betweenness[e] += (n - i - pos[i]) * row[i] + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..debfca27f55d84d7e40eff227ccdb6e5dd236c6c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py @@ -0,0 +1,226 @@ +"""Current-flow betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import flow_matrix_row +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = [ + "current_flow_betweenness_centrality_subset", + "edge_current_flow_betweenness_centrality_subset", +] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for subsets of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + from networkx.utils import reverse_cuthill_mckee_ordering + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[s] += 0.5 * np.abs(row[i] - row[j]) + betweenness[t] += 0.5 * np.abs(row[i] - row[j]) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = betweenness[v] / nb + 1.0 / (2 - n) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def edge_current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for edges using subsets + of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dict + Dictionary of edge tuples with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[e] += 0.5 * np.abs(row[i] - row[j]) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..daefbae902ba7f513f3f7b979c4b4918053eaf9a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py @@ -0,0 +1,97 @@ +"""Current-flow closeness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, +) +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = ["current_flow_closeness_centrality", "information_centrality"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"): + """Compute current-flow closeness centrality for nodes. + + Current-flow closeness centrality is variant of closeness + centrality based on effective resistance between nodes in + a network. This metric is also known as information centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with current flow closeness centrality as the value. + + See Also + -------- + closeness_centrality + + Notes + ----- + The algorithm is from Brandes [1]_. + + See also [2]_ for the original definition of information centrality. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer, + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] Karen Stephenson and Marvin Zelen: + Rethinking centrality: Methods and examples. + Social Networks 11(1):1-37, 1989. + https://doi.org/10.1016/0378-8733(89)90016-6 + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + n = H.number_of_nodes() + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver + for v in H: + col = C2.get_row(v) + for w in H: + betweenness[v] += col[v] - 2 * col[w] + betweenness[w] += col[v] + for v in H: + betweenness[v] = 1 / (betweenness[v]) + return {ordering[k]: v for k, v in betweenness.items()} + + +information_centrality = current_flow_closeness_centrality diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..2631730dbc0d63e1c907a0accc3170af00c50dfa --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py @@ -0,0 +1,149 @@ +"""Degree centrality measures.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"] + + +@nx._dispatch +def degree_centrality(G): + """Compute the degree centrality for nodes. + + The degree centrality for a node v is the fraction of nodes it + is connected to. + + Parameters + ---------- + G : graph + A networkx graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with degree centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.degree_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def in_degree_centrality(G): + """Compute the in-degree centrality for nodes. + + The in-degree centrality for a node v is the fraction of nodes its + incoming edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with in-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.in_degree_centrality(G) + {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + degree_centrality, out_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.in_degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def out_degree_centrality(G): + """Compute the out-degree centrality for nodes. + + The out-degree centrality for a node v is the fraction of nodes its + outgoing edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with out-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.out_degree_centrality(G) + {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} + + See Also + -------- + degree_centrality, in_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.out_degree()} + return centrality diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/dispersion.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..a551c387d88b0c69f4763678b5cc88567455d98b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/dispersion.py @@ -0,0 +1,107 @@ +from itertools import combinations + +import networkx as nx + +__all__ = ["dispersion"] + + +@nx._dispatch +def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): + r"""Calculate dispersion between `u` and `v` in `G`. + + A link between two actors (`u` and `v`) has a high dispersion when their + mutual ties (`s` and `t`) are not well connected with each other. + + Parameters + ---------- + G : graph + A NetworkX graph. + u : node, optional + The source for the dispersion score (e.g. ego node of the network). + v : node, optional + The target of the dispersion score if specified. + normalized : bool + If True (default) normalize by the embeddedness of the nodes (u and v). + alpha, b, c : float + Parameters for the normalization procedure. When `normalized` is True, + the dispersion value is normalized by:: + + result = ((dispersion + b) ** alpha) / (embeddedness + c) + + as long as the denominator is nonzero. + + Returns + ------- + nodes : dictionary + If u (v) is specified, returns a dictionary of nodes with dispersion + score for all "target" ("source") nodes. If neither u nor v is + specified, returns a dictionary of dictionaries for all nodes 'u' in the + graph with a dispersion score for each node 'v'. + + Notes + ----- + This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical + usage would be to run dispersion on the ego network $G_u$ if $u$ were + specified. Running :func:`dispersion` with neither $u$ nor $v$ specified + can take some time to complete. + + References + ---------- + .. [1] Romantic Partnerships and the Dispersion of Social Ties: + A Network Analysis of Relationship Status on Facebook. + Lars Backstrom, Jon Kleinberg. + https://arxiv.org/pdf/1310.6753v1.pdf + + """ + + def _dispersion(G_u, u, v): + """dispersion for all nodes 'v' in a ego network G_u of node 'u'""" + u_nbrs = set(G_u[u]) + ST = {n for n in G_u[v] if n in u_nbrs} + set_uv = {u, v} + # all possible ties of connections that u and b share + possib = combinations(ST, 2) + total = 0 + for s, t in possib: + # neighbors of s that are in G_u, not including u and v + nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv + # s and t are not directly connected + if t not in nbrs_s: + # s and t do not share a connection + if nbrs_s.isdisjoint(G_u[t]): + # tick for disp(u, v) + total += 1 + # neighbors that u and v share + embeddedness = len(ST) + + dispersion_val = total + if normalized: + dispersion_val = (total + b) ** alpha + if embeddedness + c != 0: + dispersion_val /= embeddedness + c + + return dispersion_val + + if u is None: + # v and u are not specified + if v is None: + results = {n: {} for n in G} + for u in G: + for v in G[u]: + results[u][v] = _dispersion(G, u, v) + # u is not specified, but v is + else: + results = dict.fromkeys(G[v], {}) + for u in G[v]: + results[u] = _dispersion(G, v, u) + else: + # u is specified with no target v + if v is None: + results = dict.fromkeys(G[u], {}) + for v in G[u]: + results[v] = _dispersion(G, u, v) + # both u and v are specified + else: + results = _dispersion(G, u, v) + + return results diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py new file mode 100644 index 0000000000000000000000000000000000000000..267e7b5102734d0d28f310ad019325d483c931cb --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py @@ -0,0 +1,341 @@ +"""Functions for computing eigenvector centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph G. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron–Frobenius theorem [1]_, if G is strongly + connected there is a unique eigenvector $x$, and all its entries + are strictly positive. + + If G is not strongly connected there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + + Parameters + ---------- + G : graph + A networkx graph. + + max_iter : integer, optional (default=100) + Maximum number of power iterations. + + tol : float, optional (default=1.0e-6) + Error tolerance (in Euclidean norm) used to check convergence in + power iteration. + + nstart : dictionary, optional (default=None) + Starting value of power iteration for each node. Must have a nonzero + projection on the desired eigenvector for the power method to converge. + If None, this implementation uses an all-ones vector, which is a safe + choice. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality(G) + >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items()) + [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')] + + Raises + ------ + NetworkXPointlessConcept + If the graph G is the null graph. + + NetworkXError + If each value in `nstart` is zero. + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + See Also + -------- + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + The implementation uses power iteration [7]_ to compute a dominant + eigenvector starting from the provided vector `nstart`. Convergence is + guaranteed as long as `nstart` has a nonzero projection on a dominant + eigenvector, which certainly happens using the default value. + + The method stops when the change in the computed vector between two + iterations is smaller than an error tolerance of ``G.number_of_nodes() + * tol`` or after ``max_iter`` iterations, but in the second case it + raises an exception. + + This implementation uses $(A + I)$ rather than the adjacency matrix + $A$ because the change preserves eigenvectors, but it shifts the + spectrum, thus guaranteeing convergence even for networks with + negative eigenvalues of maximum modulus. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences." + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate." + Deutsches Wochenschach, 11:366–369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory." + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons." + Biometrics, 11(1):43–62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge + "Théorie des graphes et ses applications." + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships." + Sociological Methodology, 4:176–185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + # If no initial vector is provided, start with the all-ones vector. + if nstart is None: + nstart = {v: 1 for v in G} + if all(v == 0 for v in nstart.values()): + raise nx.NetworkXError("initial vector cannot have all zero values") + # Normalize the initial vector so that each entry is in [0, 1]. This is + # guaranteed to never have a divide-by-zero error by the previous line. + nstart_sum = sum(nstart.values()) + x = {k: v / nstart_sum for k, v in nstart.items()} + nnodes = G.number_of_nodes() + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = xlast.copy() # Start with xlast times I to iterate with (A+I) + # do the multiplication y^T = x^T A (left eigenvector) + for n in x: + for nbr in G[n]: + w = G[n][nbr].get(weight, 1) if weight else 1 + x[nbr] += xlast[n] * w + # Normalize the vector. The normalization denominator `norm` + # should never be zero by the Perron--Frobenius + # theorem. However, in case it is due to numerical error, we + # assume the norm to be one instead. + norm = math.hypot(*x.values()) or 1 + x = {k: v / norm for k, v in x.items()} + # Check for convergence (in the L_1 norm). + if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@nx._dispatch(edge_attrs="weight") +def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph G. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron–Frobenius theorem [1]_, if G is strongly + connected there is a unique eigenvector $x$, and all its entries + are strictly positive. + + If G is not strongly connected there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + + Parameters + ---------- + G : graph + A networkx graph. + + max_iter : integer, optional (default=50) + Maximum number of Arnoldi update iterations allowed. + + tol : float, optional (default=0) + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality_numpy(G) + >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality]) + ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] + + Raises + ------ + NetworkXPointlessConcept + If the graph G is the null graph. + + ArpackNoConvergence + When the requested convergence is not obtained. The currently + converged eigenvalues and eigenvectors can be found as + eigenvalues and eigenvectors attributes of the exception object. + + See Also + -------- + :func:`scipy.sparse.linalg.eigs` + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + This implementation uses the + :func:`SciPy sparse eigenvalue solver` (ARPACK) + to find the largest eigenvalue/eigenvector pair using Arnoldi iterations + [7]_. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences." + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate." + Deutsches Wochenschach, 11:366–369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory." + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons." + Biometrics, 11(1):43–62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge + "Théorie des graphes et ses applications." + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships." + Sociological Methodology, 4:176–185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Arnoldi iteration:: https://en.wikipedia.org/wiki/Arnoldi_iteration + + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float) + _, eigenvector = sp.sparse.linalg.eigs( + M.T, k=1, which="LR", maxiter=max_iter, tol=tol + ) + largest = eigenvector.flatten().real + norm = np.sign(largest.sum()) * sp.linalg.norm(largest) + return dict(zip(G, largest / norm)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..e9cd7e26016e1be01c8d096b0404a33144799eb6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py @@ -0,0 +1,130 @@ +# Helpers for current-flow betweenness and current-flow closeness +# Lazy computations for inverse Laplacian and flow-matrix rows. +import networkx as nx + + +@nx._dispatch(edge_attrs="weight") +def flow_matrix_row(G, weight=None, dtype=float, solver="lu"): + # Generate a row of the current-flow matrix + import numpy as np + + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + w = C.w # w is the Laplacian matrix width + # row-by-row flow matrix + for u, v in sorted(sorted((u, v)) for u, v in G.edges()): + B = np.zeros(w, dtype=dtype) + c = G[u][v].get(weight, 1.0) + B[u % w] = c + B[v % w] = -c + # get only the rows needed in the inverse laplacian + # and multiply to get the flow matrix row + row = B @ C.get_rows(u, v) + yield row, (u, v) + + +# Class to compute the inverse laplacian only for specified rows +# Allows computation of the current-flow matrix without storing entire +# inverse laplacian matrix +class InverseLaplacian: + def __init__(self, L, width=None, dtype=None): + global np + import numpy as np + + (n, n) = L.shape + self.dtype = dtype + self.n = n + if width is None: + self.w = self.width(L) + else: + self.w = width + self.C = np.zeros((self.w, n), dtype=dtype) + self.L1 = L[1:, 1:] + self.init_solver(L) + + def init_solver(self, L): + pass + + def solve(self, r): + raise nx.NetworkXError("Implement solver") + + def solve_inverse(self, r): + raise nx.NetworkXError("Implement solver") + + def get_rows(self, r1, r2): + for r in range(r1, r2 + 1): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C + + def get_row(self, r): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C[r % self.w] + + def width(self, L): + m = 0 + for i, row in enumerate(L): + w = 0 + x, y = np.nonzero(row) + if len(y) > 0: + v = y - i + w = v.max() - v.min() + 1 + m = max(w, m) + return m + + +class FullInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + self.IL = np.zeros(L.shape, dtype=self.dtype) + self.IL[1:, 1:] = np.linalg.inv(self.L1.todense()) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s = self.IL @ rhs + return s + + def solve_inverse(self, r): + return self.IL[r, 1:] + + +class SuperLUInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + import scipy as sp + + self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc()) + + def solve_inverse(self, r): + rhs = np.zeros(self.n, dtype=self.dtype) + rhs[r] = 1 + return self.lusolve(rhs[1:]) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = self.lusolve(rhs[1:]) + return s + + +class CGInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + global sp + import scipy as sp + + ilu = sp.sparse.linalg.spilu(self.L1.tocsc()) + n = self.n - 1 + self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] + return s + + def solve_inverse(self, r): + rhs = np.zeros(self.n, self.dtype) + rhs[r] = 1 + return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/group.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/group.py new file mode 100644 index 0000000000000000000000000000000000000000..8207a71a5ae3bd0acfe0d90370ec10304d8a8c67 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/group.py @@ -0,0 +1,785 @@ +"""Group centrality measures.""" +from copy import deepcopy + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _accumulate_endpoints, + _single_source_dijkstra_path_basic, + _single_source_shortest_path_basic, +) +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "group_betweenness_centrality", + "group_closeness_centrality", + "group_degree_centrality", + "group_in_degree_centrality", + "group_out_degree_centrality", + "prominent_group", +] + + +@nx._dispatch(edge_attrs="weight") +def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): + r"""Compute the group betweenness centrality for a group of nodes. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + C : list or set or list of lists or list of sets + A group or a list of groups containing nodes which belong to G, for which group betweenness + centrality is to be calculated. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` + where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + betweenness : list of floats or float + If C is a single group then return a float. If C is a list with + several groups then return a list of group betweenness centralities. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. + + The number of nodes in the group must be a maximum of n - 2 where `n` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + + """ + GBC = [] # initialize betweenness + list_of_groups = True + # check weather C contains one or many groups + if any(el in G for el in C): + C = [C] + list_of_groups = False + set_v = {node for group in C for node in group} + if set_v - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.") + + # pre-processing + PB, sigma, D = _group_preprocessing(G, set_v, weight) + + # the algorithm for each group + for group in C: + group = set(group) # set of nodes in group + # initialize the matrices of the sigma and the PB + GBC_group = 0 + sigma_m = deepcopy(sigma) + PB_m = deepcopy(PB) + sigma_m_v = deepcopy(sigma_m) + PB_m_v = deepcopy(PB_m) + for v in group: + GBC_group += PB_m[v][v] + for x in group: + for y in group: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 + ): + if D[x][v] == D[x][y] + D[y][v]: + dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] + if D[x][y] == D[x][v] + D[v][y]: + dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] + if D[v][y] == D[v][x] + D[x][y]: + dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] + sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) + PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy + if y != v: + PB_m_v[x][y] -= PB_m[x][v] * dxyv + if x != v: + PB_m_v[x][y] -= PB_m[v][y] * dvxy + sigma_m, sigma_m_v = sigma_m_v, sigma_m + PB_m, PB_m_v = PB_m_v, PB_m + + # endpoints + v, c = len(G), len(group) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = c * (2 * v - c - 1) + elif nx.is_connected(G): + scale = c * (2 * v - c - 1) + if scale == 0: + for group_node1 in group: + for node in D[group_node1]: + if node != group_node1: + if node in group: + scale += 1 + else: + scale += 2 + GBC_group -= scale + + # normalized + if normalized: + scale = 1 / ((v - c) * (v - c - 1)) + GBC_group *= scale + + # If undirected than count only the undirected edges + elif not G.is_directed(): + GBC_group /= 2 + + GBC.append(GBC_group) + if list_of_groups: + return GBC + return GBC[0] + + +def _group_preprocessing(G, set_v, weight): + sigma = {} + delta = {} + D = {} + betweenness = dict.fromkeys(G, 0) + for s in G: + if weight is None: # use BFS + S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight) + betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s) + for i in delta[s]: # add the paths from s to i and rescale sigma + if s != i: + delta[s][i] += 1 + if weight is not None: + sigma[s][i] = sigma[s][i] / 2 + # building the path betweenness matrix only for nodes that appear in the group + PB = dict.fromkeys(G) + for group_node1 in set_v: + PB[group_node1] = dict.fromkeys(G, 0.0) + for group_node2 in set_v: + if group_node2 not in D[group_node1]: + continue + for node in G: + # if node is connected to the two group nodes than continue + if group_node2 in D[node] and group_node1 in D[node]: + if ( + D[node][group_node2] + == D[node][group_node1] + D[group_node1][group_node2] + ): + PB[group_node1][group_node2] += ( + delta[node][group_node2] + * sigma[node][group_node1] + * sigma[group_node1][group_node2] + / sigma[node][group_node2] + ) + return PB, sigma, D + + +@nx._dispatch(edge_attrs="weight") +def prominent_group( + G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False +): + r"""Find the prominent group of size $k$ in graph $G$. The prominence of the + group is evaluated by the group betweenness centrality. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int + The number of nodes in the group. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))`` + where ``|V|`` is the number of nodes in G and ``|C|`` is the number of + nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + C : list or set, optional (default=None) + list of nodes which won't be candidates of the prominent group. + + greedy : bool, optional (default=False) + Using a naive greedy algorithm in order to find non-optimal prominent + group. For scale free networks the results are negligibly below the optimal + results. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + max_GBC : float + The group betweenness centrality of the prominent group. + + max_group : list + The list of nodes in the prominent group. + + See Also + -------- + betweenness_centrality, group_betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The algorithm is described in [2]_ and is based on techniques mentioned in [4]_. + + The number of nodes in the group must be a maximum of ``n - 2`` where ``n`` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev: + "Finding the Most Prominent Group in Complex Networks" + AI communications 20(4): 287-296, 2007. + https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855 + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + """ + import numpy as np + import pandas as pd + + if C is not None: + C = set(C) + if C - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.") + nodes = list(G.nodes - C) + else: + nodes = list(G.nodes) + DF_tree = nx.Graph() + PB, sigma, D = _group_preprocessing(G, nodes, weight) + betweenness = pd.DataFrame.from_dict(PB) + if C is not None: + for node in C: + # remove from the betweenness all the nodes not part of the group + betweenness.drop(index=node, inplace=True) + betweenness.drop(columns=node, inplace=True) + CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)] + max_GBC = 0 + max_group = [] + DF_tree.add_node( + 1, + CL=CL, + betweenness=betweenness, + GBC=0, + GM=[], + sigma=sigma, + cont=dict(zip(nodes, np.diag(betweenness))), + ) + + # the algorithm + DF_tree.nodes[1]["heu"] = 0 + for i in range(k): + DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]] + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy + ) + + v = len(G) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = k * (2 * v - k - 1) + elif nx.is_connected(G): + scale = k * (2 * v - k - 1) + if scale == 0: + for group_node1 in max_group: + for node in D[group_node1]: + if node != group_node1: + if node in max_group: + scale += 1 + else: + scale += 2 + max_GBC -= scale + + # normalized + if normalized: + scale = 1 / ((v - k) * (v - k - 1)) + max_GBC *= scale + + # If undirected then count only the undirected edges + elif not G.is_directed(): + max_GBC /= 2 + max_GBC = float("%.2f" % max_GBC) + return max_GBC, max_group + + +def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy): + # stopping condition - if we found a group of size k and with higher GBC then prune + if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC: + return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"] + # stopping condition - if the size of group members equal to k or there are less than + # k - |GM| in the candidate list or the heuristic function plus the GBC is below the + # maximal GBC found then prune + if ( + len(DF_tree.nodes[root]["GM"]) == k + or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"]) + or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC + ): + return max_GBC, DF_tree, max_group + + # finding the heuristic of both children + node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy) + + # finding the child with the bigger heuristic + GBC and expand + # that node first if greedy then only expand the plus node + if greedy: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + + elif ( + DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"] + > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"] + ): + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + else: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + return max_GBC, DF_tree, max_group + + +def _heuristic(k, root, DF_tree, D, nodes, greedy): + import numpy as np + + # This helper function add two nodes to DF_tree - one left son and the + # other right son, finds their heuristic, CL, GBC, and GM + node_p = DF_tree.number_of_nodes() + 1 + node_m = DF_tree.number_of_nodes() + 2 + added_node = DF_tree.nodes[root]["CL"][0] + + # adding the plus node + DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_p]["GM"].append(added_node) + DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node] + root_node = DF_tree.nodes[root] + for x in nodes: + for y in nodes: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + root_node["sigma"][x][y] == 0 + or root_node["sigma"][x][added_node] == 0 + or root_node["sigma"][added_node][y] == 0 + ): + if D[x][added_node] == D[x][y] + D[y][added_node]: + dxyv = ( + root_node["sigma"][x][y] + * root_node["sigma"][y][added_node] + / root_node["sigma"][x][added_node] + ) + if D[x][y] == D[x][added_node] + D[added_node][y]: + dxvy = ( + root_node["sigma"][x][added_node] + * root_node["sigma"][added_node][y] + / root_node["sigma"][x][y] + ) + if D[added_node][y] == D[added_node][x] + D[x][y]: + dvxy = ( + root_node["sigma"][added_node][x] + * root_node["sigma"][x][y] + / root_node["sigma"][added_node][y] + ) + DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy) + DF_tree.nodes[node_p]["betweenness"][x][y] = ( + root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy + ) + if y != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][x][added_node] * dxyv + ) + if x != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][added_node][y] * dvxy + ) + + DF_tree.nodes[node_p]["CL"] = [ + node + for _, node in sorted( + zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True + ) + if node not in DF_tree.nodes[node_p]["GM"] + ] + DF_tree.nodes[node_p]["cont"] = dict( + zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"])) + ) + DF_tree.nodes[node_p]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_p]["GM"])): + DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][ + DF_tree.nodes[node_p]["CL"][i] + ] + + # adding the minus node - don't insert the first node in the CL to GM + # Insert minus node only if isn't greedy type algorithm + if not greedy: + DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_m]["CL"].pop(0) + DF_tree.nodes[node_m]["cont"].pop(added_node) + DF_tree.nodes[node_m]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_m]["GM"])): + DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][ + DF_tree.nodes[node_m]["CL"][i] + ] + else: + node_m = None + + return node_p, node_m, DF_tree + + +@nx._dispatch(edge_attrs="weight") +def group_closeness_centrality(G, S, weight=None): + r"""Compute the group closeness centrality for a group of nodes. + + Group closeness centrality of a group of nodes $S$ is a measure + of how close the group is to the other nodes in the graph. + + .. math:: + + c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}} + + d_{S, v} = min_{u \in S} (d_{u, v}) + + where $V$ is the set of nodes, $d_{S, v}$ is the distance of + the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes + in $V$ that are not in $S$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group closeness + centrality is to be calculated. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + Raises + ------ + NodeNotFound + If node(s) in S are not present in G. + + Returns + ------- + closeness : float + Group closeness centrality of the group S. + + See Also + -------- + closeness_centrality + + Notes + ----- + The measure was introduced in [1]_. + The formula implemented here is described in [2]_. + + Higher values of closeness indicate greater centrality. + + It is assumed that 1 / 0 is 0 (required in the case of directed graphs, + or when a shortest path length is 0). + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + For directed graphs, the incoming distance is utilized here. To use the + outward distance, act on `G.reverse()`. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] J. Zhao et. al.: + Measuring and Maximizing Group Closeness Centrality over + Disk Resident Graphs. + WWWConference Proceedings, 2014. 689-694. + https://doi.org/10.1145/2567948.2579356 + """ + if G.is_directed(): + G = G.reverse() # reverse view + closeness = 0 # initialize to 0 + V = set(G) # set of nodes in G + S = set(S) # set of nodes in group S + V_S = V - S # set of nodes in V but not S + shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight) + # accumulation + for v in V_S: + try: + closeness += shortest_path_lengths[v] + except KeyError: # no path exists + closeness += 0 + try: + closeness = len(V_S) / closeness + except ZeroDivisionError: # 1 / 0 assumed as 0 + closeness = 0 + return closeness + + +@nx._dispatch +def group_degree_centrality(G, S): + """Compute the group degree centrality for a group of nodes. + + Group degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group degree + centrality is to be calculated. + + Raises + ------ + NetworkXError + If node(s) in S are not in G. + + Returns + ------- + centrality : float + Group degree centrality of the group S. + + See Also + -------- + degree_centrality + group_in_degree_centrality + group_out_degree_centrality + + Notes + ----- + The measure was introduced in [1]_. + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + """ + centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S)) + centrality /= len(G.nodes()) - len(S) + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def group_in_degree_centrality(G, S): + """Compute the group in-degree centrality for a group of nodes. + + Group in-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by incoming edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group in-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_out_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group in-degree centrality, the reverse graph is used. + """ + return group_degree_centrality(G.reverse(), S) + + +@not_implemented_for("undirected") +@nx._dispatch +def group_out_degree_centrality(G, S): + """Compute the group out-degree centrality for a group of nodes. + + Group out-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by outgoing edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group out-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_in_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group out-degree centrality, the graph itself is used. + """ + return group_degree_centrality(G, S) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/harmonic.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/harmonic.py new file mode 100644 index 0000000000000000000000000000000000000000..86b5020f96c49f4d647bea5d1624b862ee54c849 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/harmonic.py @@ -0,0 +1,80 @@ +"""Functions for computing the harmonic centrality of a graph.""" +from functools import partial + +import networkx as nx + +__all__ = ["harmonic_centrality"] + + +@nx._dispatch(edge_attrs="distance") +def harmonic_centrality(G, nbunch=None, distance=None, sources=None): + r"""Compute harmonic centrality for nodes. + + Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal + of the shortest path distances from all other nodes to `u` + + .. math:: + + C(u) = \sum_{v \neq u} \frac{1}{d(v, u)} + + where `d(v, u)` is the shortest-path distance between `v` and `u`. + + If `sources` is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in `sources` to `u` instead + of from all nodes to `u`. + + Notice that higher values indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : container (default: all nodes in G) + Container of nodes for which harmonic centrality values are calculated. + + sources : container (default: all nodes in G) + Container of nodes `v` over which reciprocal distances are computed. + Nodes not in `G` are silently ignored. + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with harmonic centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + References + ---------- + .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." + Internet Mathematics 10.3-4 (2014): 222-262. + """ + + nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes) + sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes + + spl = partial(nx.shortest_path_length, G, weight=distance) + centrality = {u: 0 for u in nbunch} + for v in sources: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: # handle u == v and edges with 0 weight + continue + centrality[u] += 1 / d + + return centrality diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/katz.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/katz.py new file mode 100644 index 0000000000000000000000000000000000000000..3c18e5aa2b25fbefb88411e691de3380bc94f012 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/katz.py @@ -0,0 +1,331 @@ +"""Katz centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["katz_centrality", "katz_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def katz_centrality( + G, + alpha=0.1, + beta=1.0, + max_iter=1000, + tol=1.0e-6, + nstart=None, + normalized=True, + weight=None, +): + r"""Compute the Katz centrality for the nodes of the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + alpha : float, optional (default=0.1) + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar, the + dictionary must have an value for every node. + + max_iter : integer, optional (default=1000) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of Katz iteration for each node. + + normalized : bool, optional (default=True) + If True normalize the resulting values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality_numpy + eigenvector_centrality + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm it uses the power method to find the eigenvector + corresponding to the largest eigenvalue of the adjacency matrix of ``G``. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for the algorithm to converge. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + The iteration will stop after ``max_iter`` iterations or an error tolerance of + ``number_of_nodes(G) * tol`` has been reached. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 720. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + if len(G) == 0: + return {} + + nnodes = G.number_of_nodes() + + if nstart is None: + # choose starting vector with entries of 0 + x = {n: 0 for n in G} + else: + x = nstart + + try: + b = dict.fromkeys(G, float(beta)) + except (TypeError, ValueError, AttributeError) as err: + b = beta + if set(beta) != set(G): + raise nx.NetworkXError( + "beta dictionary " "must have a value for every node" + ) from err + + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast, 0) + # do the multiplication y^T = Alpha * x^T A + Beta + for n in x: + for nbr in G[n]: + x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) + for n in x: + x[n] = alpha * x[n] + b[n] + + # check convergence + error = sum(abs(x[n] - xlast[n]) for n in x) + if error < nnodes * tol: + if normalized: + # normalize vector + try: + s = 1.0 / math.hypot(*x.values()) + # this should never be zero? + except ZeroDivisionError: + s = 1.0 + else: + s = 1 + for n in x: + x[n] *= s + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None): + r"""Compute the Katz centrality for the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar the + dictionary must have an value for every node. + + normalized : bool + If True normalize the resulting values. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality_numpy(G, 1 / phi) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality + eigenvector_centrality_numpy + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm uses a direct linear solver to solve the above equation. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for there to be a solution. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 173. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + import numpy as np + + if len(G) == 0: + return {} + try: + nodelist = beta.keys() + if set(nodelist) != set(G): + raise nx.NetworkXError("beta dictionary must have a value for every node") + b = np.array(list(beta.values()), dtype=float) + except AttributeError: + nodelist = list(G) + try: + b = np.ones((len(nodelist), 1)) * beta + except (TypeError, ValueError, AttributeError) as err: + raise nx.NetworkXError("beta must be a number") from err + + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T + n = A.shape[0] + centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze() + + # Normalize: rely on truediv to cast to float + norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1 + return dict(zip(nodelist, centrality / norm)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/laplacian.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a9a6d517254b72b130474577688ceb3d02ad8c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/laplacian.py @@ -0,0 +1,146 @@ +""" +Laplacian centrality measures. +""" +import networkx as nx + +__all__ = ["laplacian_centrality"] + + +@nx._dispatch(edge_attrs="weight") +def laplacian_centrality( + G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Compute the Laplacian centrality for nodes in the graph `G`. + + The Laplacian Centrality of a node ``i`` is measured by the drop in the + Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy + is the sum of the squared eigenvalues of a graph's Laplacian matrix. + + .. math:: + + C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)} + + E_L (G) = \sum_{i=0}^n \lambda_i^2 + + Where $E_L (G)$ is the Laplacian energy of graph `G`, + E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i`` + and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix. + This formula shows the normalized value. Without normalization, + the numerator on the right side is returned. + + Parameters + ---------- + G : graph + A networkx graph + + normalized : bool (default = True) + If True the centrality score is scaled so the sum over all nodes is 1. + If False the centrality score for each node is the drop in Laplacian + energy when that node is removed. + + nodelist : list, optional (default = None) + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight: string or None, optional (default=`weight`) + Optional parameter `weight` to compute the Laplacian matrix. + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + Optional parameter `walk_type` used when calling + :func:`directed_laplacian_matrix `. + If None, the transition matrix is selected depending on the properties + of the graph. Otherwise can be `random`, `lazy`, or `pagerank`. + + alpha : real (default = 0.95) + Optional parameter `alpha` used when calling + :func:`directed_laplacian_matrix `. + (1 - alpha) is the teleportation probability used with pagerank. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Laplacian centrality as the value. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)] + >>> G.add_weighted_edges_from(edges) + >>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items()) + [(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')] + + Notes + ----- + The algorithm is implemented based on [1]_ with an extension to directed graphs + using the ``directed_laplacian_matrix`` function. + + Raises + ------ + NetworkXPointlessConcept + If the graph `G` is the null graph. + ZeroDivisionError + If the graph `G` has no edges (is empty) and normalization is requested. + + References + ---------- + .. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012). + Laplacian centrality: A new centrality measure for weighted networks. + Information Sciences, 194:240-253. + https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf + + See Also + -------- + :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix` + :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix` + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept("null graph has no centrality defined") + if G.size(weight=weight) == 0: + if normalized: + raise ZeroDivisionError("graph with no edges has zero full energy") + return {n: 0 for n in G} + + if nodelist is not None: + nodeset = set(G.nbunch_iter(nodelist)) + if len(nodeset) != len(nodelist): + raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G") + nodes = nodelist + [n for n in G if n not in nodeset] + else: + nodelist = nodes = list(G) + + if G.is_directed(): + lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha) + else: + lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray() + + full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum() + + # calculate laplacian centrality + laplace_centralities_dict = {} + for i, node in enumerate(nodelist): + # remove row and col i from lap_matrix + all_but_i = list(np.arange(lap_matrix.shape[0])) + all_but_i.remove(i) + A_2 = lap_matrix[all_but_i, :][:, all_but_i] + + # Adjust diagonal for removed row + new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i]) + np.fill_diagonal(A_2, new_diag[all_but_i]) + + if len(all_but_i) > 0: # catches degenerate case of single node + new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum() + else: + new_energy = 0.0 + + lapl_cent = full_energy - new_energy + if normalized: + lapl_cent = lapl_cent / full_energy + + laplace_centralities_dict[node] = lapl_cent + + return laplace_centralities_dict diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/load.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/load.py new file mode 100644 index 0000000000000000000000000000000000000000..9a81cc43282d2cdd19fb365d6265c3d128faddc9 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/load.py @@ -0,0 +1,199 @@ +"""Load centrality.""" +from operator import itemgetter + +import networkx as nx + +__all__ = ["load_centrality", "edge_load_centrality"] + + +@nx._dispatch(edge_attrs="weight") +def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None): + """Compute load centrality for nodes. + + The load centrality of a node is the fraction of all shortest + paths that pass through that node. + + Parameters + ---------- + G : graph + A networkx graph. + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + cutoff : bool, optional (default=None) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Load centrality is slightly different than betweenness. It was originally + introduced by [2]_. For this load algorithm see [1]_. + + References + ---------- + .. [1] Mark E. J. Newman: + Scientific collaboration networks. II. + Shortest paths, weighted networks, and centrality. + Physical Review E 64, 016132, 2001. + http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132 + .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim + Universal behavior of Load Distribution in Scale-Free Networks. + Physical Review Letters 87(27):1–4, 2001. + https://doi.org/10.1103/PhysRevLett.87.278701 + """ + if v is not None: # only one node + betweenness = 0.0 + for source in G: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + betweenness += ubetween[v] if v in ubetween else 0 + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + betweenness *= 1.0 / ((order - 1) * (order - 2)) + else: + betweenness = {}.fromkeys(G, 0.0) + for source in betweenness: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + for vk in ubetween: + betweenness[vk] += ubetween[vk] + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + scale = 1.0 / ((order - 1) * (order - 2)) + for v in betweenness: + betweenness[v] *= scale + return betweenness # all nodes + + +def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None): + """Node betweenness_centrality helper: + + See betweenness_centrality for what you probably want. + This actually computes "load" and not betweenness. + See https://networkx.lanl.gov/ticket/103 + + This calculates the load of each node for paths from a single source. + (The fraction of number of shortests paths from source that go + through each node.) + + To get the load for a node you need to do all-pairs shortest paths. + + If weight is not None then use Dijkstra for finding shortest paths. + """ + # get the predecessor and path length data + if weight is None: + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + else: + (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight) + + # order the nodes by path length + onodes = [(l, vert) for (vert, l) in length.items()] + onodes.sort() + onodes[:] = [vert for (l, vert) in onodes if l > 0] + + # initialize betweenness + between = {}.fromkeys(length, 1.0) + + while onodes: + v = onodes.pop() + if v in pred: + num_paths = len(pred[v]) # Discount betweenness if more than + for x in pred[v]: # one shortest path. + if x == source: # stop if hit source because all remaining v + break # also have pred[v]==[source] + between[x] += between[v] / num_paths + # remove source + for v in between: + between[v] -= 1 + # rescale to be between 0 and 1 + if normalized: + l = len(between) + if l > 2: + # scale by 1/the number of possible paths + scale = 1 / ((l - 1) * (l - 2)) + for v in between: + between[v] *= scale + return between + + +load_centrality = newman_betweenness_centrality + + +@nx._dispatch +def edge_load_centrality(G, cutoff=False): + """Compute edge load. + + WARNING: This concept of edge load has not been analysed + or discussed outside of NetworkX that we know of. + It is based loosely on load_centrality in the sense that + it counts the number of shortest paths which cross each edge. + This function is for demonstration and testing purposes. + + Parameters + ---------- + G : graph + A networkx graph + + cutoff : bool, optional (default=False) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + A dict keyed by edge 2-tuple to the number of shortest paths + which use that edge. Where more than one path is shortest + the count is divided equally among paths. + """ + betweenness = {} + for u, v in G.edges(): + betweenness[(u, v)] = 0.0 + betweenness[(v, u)] = 0.0 + + for source in G: + ubetween = _edge_betweenness(G, source, cutoff=cutoff) + for e, ubetweenv in ubetween.items(): + betweenness[e] += ubetweenv # cumulative total + return betweenness + + +def _edge_betweenness(G, source, nodes=None, cutoff=False): + """Edge betweenness helper.""" + # get the predecessor data + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + # order the nodes by path length + onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))] + # initialize betweenness, doesn't account for any edge weights + between = {} + for u, v in G.edges(nodes): + between[(u, v)] = 1.0 + between[(v, u)] = 1.0 + + while onodes: # work through all paths + v = onodes.pop() + if v in pred: + # Discount betweenness if more than one shortest path. + num_paths = len(pred[v]) + for w in pred[v]: + if w in pred: + # Discount betweenness, mult path + num_paths = len(pred[w]) + for x in pred[w]: + between[(w, x)] += between[(v, w)] / num_paths + between[(x, w)] += between[(w, v)] / num_paths + return between diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/percolation.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/percolation.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5d5ce6d7ddc0f81fbbebc7512901dc82d858ef --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/percolation.py @@ -0,0 +1,128 @@ +"""Percolation centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = ["percolation_centrality"] + + +@nx._dispatch(node_attrs="attribute", edge_attrs="weight") +def percolation_centrality(G, attribute="percolation", states=None, weight=None): + r"""Compute the percolation centrality for nodes. + + Percolation centrality of a node $v$, at a given time, is defined + as the proportion of ‘percolated paths’ that go through that node. + + This measure quantifies relative impact of nodes based on their + topological connectivity, as well as their percolation states. + + Percolation states of nodes are used to depict network percolation + scenarios (such as during infection transmission in a social network + of individuals, spreading of computer viruses on computer networks, or + transmission of disease over a network of towns) over time. In this + measure usually the percolation state is expressed as a decimal + between 0.0 and 1.0. + + When all nodes are in the same percolated state this measure is + equivalent to betweenness centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + attribute : None or string, optional (default='percolation') + Name of the node attribute to use for percolation state, used + if `states` is None. If a node does not set the attribute the + state of that node will be set to the default value of 1. + If all nodes do not have the attribute all nodes will be set to + 1 and the centrality measure will be equivalent to betweenness centrality. + + states : None or dict, optional (default=None) + Specify percolation states for the nodes, nodes as keys states + as values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + + Returns + ------- + nodes : dictionary + Dictionary of nodes with percolation centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and + Liaquat Hossain [1]_ + Pair dependencies are calculated and accumulated using [2]_ + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain + Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes + during Percolation in Networks + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095 + .. [2] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + """ + percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + + nodes = G + + if states is None: + states = nx.get_node_attributes(nodes, attribute, default=1) + + # sum of all percolation states + p_sigma_x_t = 0.0 + for v in states.values(): + p_sigma_x_t += v + + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + # accumulation + percolation = _accumulate_percolation( + percolation, S, P, sigma, s, states, p_sigma_x_t + ) + + n = len(G) + + for v in percolation: + percolation[v] *= 1 / (n - 2) + + return percolation + + +def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + # percolation weight + pw_s_w = states[s] / (p_sigma_x_t - states[w]) + percolation[w] += delta[w] * pw_s_w + return percolation diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/reaching.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..7b9eac564acc0dcde38409007e9df38863ee24de --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/reaching.py @@ -0,0 +1,206 @@ +"""Functions for computing reaching centrality of a node or a graph.""" + +import networkx as nx +from networkx.utils import pairwise + +__all__ = ["global_reaching_centrality", "local_reaching_centrality"] + + +def _average_weight(G, path, weight=None): + """Returns the average weight of an edge in a weighted path. + + Parameters + ---------- + G : graph + A networkx graph. + + path: list + A list of vertices that define the path. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. Then the average weight of an edge + is assumed to be the multiplicative inverse of the length of the path. + Otherwise holds the name of the edge attribute used as weight. + """ + path_length = len(path) - 1 + if path_length <= 0: + return 0 + if weight is None: + return 1 / path_length + total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path)) + return total_weight / path_length + + +@nx._dispatch(edge_attrs="weight") +def global_reaching_centrality(G, weight=None, normalized=True): + """Returns the global reaching centrality of a directed graph. + + The *global reaching centrality* of a weighted directed graph is the + average over all nodes of the difference between the local reaching + centrality of the node and the greatest local reaching centrality of + any node in the graph [1]_. For more information on the local + reaching centrality, see :func:`local_reaching_centrality`. + Informally, the local reaching centrality is the proportion of the + graph that is reachable from the neighbors of the node. + + Parameters + ---------- + G : DiGraph + A networkx DiGraph. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If ``None``, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The global reaching centrality of the graph. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(1, 3) + >>> nx.global_reaching_centrality(G) + 1.0 + >>> G.add_edge(3, 2) + >>> nx.global_reaching_centrality(G) + 0.75 + + See also + -------- + local_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + + # If provided, weights must be interpreted as connection strength + # (so higher weights are more likely to be chosen). However, the + # shortest path algorithms in NetworkX assume the provided "weight" + # is actually a distance (so edges with higher weight are less + # likely to be chosen). Therefore we need to invert the weights when + # computing shortest paths. + # + # If weight is None, we leave it as-is so that the shortest path + # algorithm can use a faster, unweighted algorithm. + if weight is not None: + + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + shortest_paths = nx.shortest_path(G, weight=as_distance) + else: + shortest_paths = nx.shortest_path(G) + + centrality = local_reaching_centrality + # TODO This can be trivially parallelized. + lrc = [ + centrality(G, node, paths=paths, weight=weight, normalized=normalized) + for node, paths in shortest_paths.items() + ] + + max_lrc = max(lrc) + return sum(max_lrc - c for c in lrc) / (len(G) - 1) + + +@nx._dispatch(edge_attrs="weight") +def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True): + """Returns the local reaching centrality of a node in a directed + graph. + + The *local reaching centrality* of a node in a directed graph is the + proportion of other nodes reachable from that node [1]_. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph. + + v : node + A node in the directed graph `G`. + + paths : dictionary (default=None) + If this is not `None` it must be a dictionary representation + of single-source shortest paths, as computed by, for example, + :func:`networkx.shortest_path` with source node `v`. Use this + keyword argument if you intend to invoke this function many + times but don't want the paths to be recomputed each time. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If `None`, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The local reaching centrality of the node ``v`` in the graph + ``G``. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3)]) + >>> nx.local_reaching_centrality(G, 3) + 0.0 + >>> G.add_edge(3, 2) + >>> nx.local_reaching_centrality(G, 3) + 0.5 + + See also + -------- + global_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if paths is None: + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + if weight is not None: + # Interpret weights as lengths. + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + paths = nx.shortest_path(G, source=v, weight=as_distance) + else: + paths = nx.shortest_path(G, source=v) + # If the graph is unweighted, simply return the proportion of nodes + # reachable from the source node ``v``. + if weight is None and G.is_directed(): + return (len(paths) - 1) / (len(G) - 1) + if normalized and weight is not None: + norm = G.size(weight=weight) / G.size() + else: + norm = 1 + # TODO This can be trivially parallelized. + avgw = (_average_weight(G, path, weight=weight) for path in paths.values()) + sum_avg_weight = sum(avgw) / norm + return sum_avg_weight / (len(G) - 1) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/second_order.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/second_order.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdb1f52141223f43d8e5eb6c0a4fddbd7e58e08 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/second_order.py @@ -0,0 +1,138 @@ +"""Copyright (c) 2015 – Thomson Licensing, SAS + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of Thomson Licensing, or Technicolor, nor the names +of its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com) + +__all__ = ["second_order_centrality"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def second_order_centrality(G, weight="weight"): + """Compute the second order centrality for nodes of G. + + The second order centrality of a given node is the standard deviation of + the return times to that node of a perpetual random walk on G: + + Parameters + ---------- + G : graph + A NetworkX connected and undirected graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + nodes : dictionary + Dictionary keyed by node with second order centrality as the value. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) + >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id + 0 + + Raises + ------ + NetworkXException + If the graph G is empty, non connected or has negative weights. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Lower values of second order centrality indicate higher centrality. + + The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_. + + This code implements the analytical version of the algorithm, i.e., + there is no simulation of a random walk process involved. The random walk + is here unbiased (corresponding to eq 6 of the paper [1]_), thus the + centrality values are the standard deviations for random walk return times + on the transformed input graph G (equal in-degree at each nodes by adding + self-loops). + + Complexity of this implementation, made to run locally on a single machine, + is O(n^3), with n the size of G, which makes it viable only for small + graphs. + + References + ---------- + .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan + "Second order centrality: Distributed assessment of nodes criticity in + complex networks", Elsevier Computer Communications 34(5):619-628, 2011. + """ + import numpy as np + + n = len(G) + + if n == 0: + raise nx.NetworkXException("Empty graph.") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)): + raise nx.NetworkXException("Graph has negative edge weights.") + + # balancing G for Metropolis-Hastings random walks + G = nx.DiGraph(G) + in_deg = dict(G.in_degree(weight=weight)) + d_max = max(in_deg.values()) + for i, deg in in_deg.items(): + if deg < d_max: + G.add_edge(i, i, weight=d_max - deg) + + P = nx.to_numpy_array(G) + P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix + + def _Qj(P, j): + P = P.copy() + P[:, j] = 0 + return P + + M = np.empty([n, n]) + + for i in range(n): + M[:, i] = np.linalg.solve( + np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0] + ) # eq 3 + + return dict( + zip(G.nodes, [np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1)) for i in range(n)]) + ) # eq 6 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..c615b4892014e9a180eb6ff6028988a571284bc4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py @@ -0,0 +1,340 @@ +""" +Subraph centrality and communicability betweenness. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "subgraph_centrality_exp", + "subgraph_centrality", + "communicability_betweenness_centrality", + "estrada_index", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def subgraph_centrality_exp(G): + r"""Returns the subgraph centrality for each node of G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes:dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm exponentiates the adjacency matrix. + + The subgraph centrality of a node `u` in G can be found using + the matrix exponential of the adjacency matrix of G [1]_, + + .. math:: + + SC(u)=(e^A)_{uu} . + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality_exp(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + """ + # alternative implementation that calculates the matrix exponential + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + expA = sp.linalg.expm(A) + # convert diagonal to dictionary keyed by node + sc = dict(zip(nodelist, map(float, expA.diagonal()))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def subgraph_centrality(G): + r"""Returns subgraph centrality for each node in G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality_exp: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm computes eigenvalues and eigenvectors + of the adjacency matrix. + + Subgraph centrality of a node `u` in G can be found using + a spectral decomposition of the adjacency matrix [1]_, + + .. math:: + + SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, + + where `v_j` is an eigenvector of the adjacency matrix `A` of G + corresponding to the eigenvalue `\lambda_j`. + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + w, v = np.linalg.eigh(A) + vsquare = np.array(v) ** 2 + expw = np.exp(w) + xg = vsquare @ expw + # convert vector dictionary keyed by node + sc = dict(zip(nodelist, map(float, xg))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability_betweenness_centrality(G): + r"""Returns subgraph communicability for all pairs of nodes in G. + + Communicability betweenness measure makes use of the number of walks + connecting every pair of nodes as the basis of a betweenness centrality + measure. + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with communicability betweenness as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, + and `A` denote the adjacency matrix of `G`. + + Let `G(r)=(V,E(r))` be the graph resulting from + removing all edges connected to node `r` but not the node itself. + + The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros + only in row and column `r`. + + The subraph betweenness of a node `r` is [1]_ + + .. math:: + + \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, + p\neq q, q\neq r, + + where + `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks + involving node r, + `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting + at node `p` and ending at node `q`, + and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the + number of terms in the sum. + + The resulting `\omega_{r}` takes values between zero and one. + The lower bound cannot be attained for a connected + graph, and the upper bound is attained in the star graph. + + References + ---------- + .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, + "Communicability Betweenness in Complex Networks" + Physica A 388 (2009) 764-774. + https://arxiv.org/abs/0905.4102 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> cbc = nx.communicability_betweenness_centrality(G) + >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)]) + ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03'] + """ + import numpy as np + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + n = len(nodelist) + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(n))) + cbc = {} + for v in G: + # remove row and col of node v + i = mapping[v] + row = A[i, :].copy() + col = A[:, i].copy() + A[i, :] = 0 + A[:, i] = 0 + B = (expA - sp.linalg.expm(A)) / expA + # sum with row/col of node v and diag set to zero + B[i, :] = 0 + B[:, i] = 0 + B -= np.diag(np.diag(B)) + cbc[v] = B.sum() + # put row and col back + A[i, :] = row + A[:, i] = col + # rescale when more than two nodes + order = len(cbc) + if order > 2: + scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0)) + for v in cbc: + cbc[v] *= scale + return cbc + + +@nx._dispatch +def estrada_index(G): + r"""Returns the Estrada index of a the graph G. + + The Estrada Index is a topological index of folding or 3D "compactness" ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + estrada index: float + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and let + `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` + be a non-increasing ordering of the eigenvalues of its adjacency + matrix `A`. The Estrada index is ([1]_, [2]_) + + .. math:: + EE(G)=\sum_{j=1}^n e^{\lambda _j}. + + References + ---------- + .. [1] E. Estrada, "Characterization of 3D molecular structure", + Chem. Phys. Lett. 319, 713 (2000). + https://doi.org/10.1016/S0009-2614(00)00158-5 + .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada, + "Estimating the Estrada index", + Linear Algebra and its Applications. 427, 1 (2007). + https://doi.org/10.1016/j.laa.2007.06.020 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> ei = nx.estrada_index(G) + >>> print(f"{ei:0.5}") + 20.55 + """ + return sum(subgraph_centrality(G).values()) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7472e9bb7b60d7008a94b0ac3017a428c38e2609 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc7810a671e90a5356c98602081c646c8f7801e2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be115c4730ed864e7ca859dabb810eeedb8d7b54 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0813a8d9ad0e2cb34f65863cfbc8025575766f85 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10b0eb9ac5fb51d435651777fde1736089a8bf0 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35045da375df86dcd0172b14304b92158ab25562 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67a7e4fdc77f5764fe39c0a53ecc42f7a3d5e08f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0764289fbb1e230eb914bbf1062e273022f3b7db Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bbda1cdf1d6eb6396c7c7b233662d66242e3599 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..885f7025d4f79cbeadb65908b6049797e70d99a4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4a810c6c251b2f3f2e530e552c1110bc47b94fd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bf1c760c52ca86ecbc4bfc3f0b3fdb7c131c878 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5535087783c78738fbf87348534aec6a4f750b35 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b33ad9dc2b1cfaecb89da298c10bbbbdc727bf6b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..892a94444dff15e422f8edcbad2568d554fca5c1 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f57bad1431a2883a1196aebc5ecdc93b2ae3343e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd06a040335324c32268fd8eb9303326863cec0 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..992d1d4655bf3f6cb26ae332ae8be42740cf4a8e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69565d986f0b397c91cfe521e4922f8f051bf732 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2e0f240e4a98a22e3267e990d041c8b03a9201a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228c9f3d3fc46bb64467112bcfb8229211292954 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..4c059cf980666f7e14a80929f84c80bc38749432 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py @@ -0,0 +1,780 @@ +import pytest + +import networkx as nx + + +def weighted_G(): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + return G + + +class TestBetweennessCentrality: + def test_K5(self): + """Betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K5_endpoints(self): + """Betweenness centrality: K5 endpoints""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_sample_from_P3(self): + """Betweenness centrality: P3 sample""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1) + # python versions give different results with same seed + b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0} + b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0} + for n in sorted(G): + assert b[n] in (b_approx1[n], b_approx2[n]) + + def test_P3_endpoints(self): + """Betweenness centrality: P3 endpoints""" + G = nx.path_graph(3) + b_answer = {0: 2.0, 1: 3.0, 2: 2.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3} + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Betweenness centrality: Krackhardt kite graph normalized""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Betweenness centrality: Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.570, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.041, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.130, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.011, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.165, + "Gillenormand": 0.020, + "Magnon": 0.000, + "MlleGillenormand": 0.048, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.132, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.043, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.005, + "Bahorel": 0.002, + "Bossuet": 0.031, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_disconnected_path(self): + """Betweenness centrality: disconnected path""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_disconnected_path_endpoints(self): + """Betweenness centrality: disconnected path endpoints""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7) + + def test_directed_path(self): + """Betweenness centrality: directed path""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_directed_path_normalized(self): + """Betweenness centrality: directed path normalized""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 0.5, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedBetweennessCentrality: + def test_K5(self): + """Weighted betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Weighted betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Weighted betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Weighted betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Weighted betweenness centrality: + Krackhardt kite graph normalized + """ + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Weighted betweenness centrality: + Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Weighted betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.454, + "Labarre": 0.000, + "Marguerite": 0.009, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.066, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.114, + "MmeThenardier": 0.046, + "Thenardier": 0.129, + "Cosette": 0.075, + "Javert": 0.193, + "Fauchelevent": 0.026, + "Bamatabois": 0.080, + "Perpetue": 0.000, + "Simplice": 0.001, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.023, + "Boulatruelle": 0.000, + "Eponine": 0.023, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.285, + "Gillenormand": 0.024, + "Magnon": 0.005, + "MlleGillenormand": 0.036, + "MmePontmercy": 0.005, + "MlleVaubois": 0.000, + "LtGillenormand": 0.015, + "Marius": 0.072, + "BaronessT": 0.004, + "Mabeuf": 0.089, + "Enjolras": 0.003, + "Combeferre": 0.000, + "Prouvaire": 0.000, + "Feuilly": 0.004, + "Courfeyrac": 0.001, + "Bahorel": 0.007, + "Bossuet": 0.028, + "Joly": 0.000, + "Grantaire": 0.036, + "MotherPlutarch": 0.000, + "Gueulemer": 0.025, + "Babet": 0.015, + "Claquesous": 0.042, + "Montparnasse": 0.050, + "Toussaint": 0.011, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.002, + "MmeHucheloup": 0.034, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Weighted betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_G(self): + """Weighted betweenness centrality: G""" + G = weighted_G() + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G2(self): + """Weighted betweenness centrality: G2""" + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G3(self): + """Weighted betweenness centrality: G3""" + G = nx.MultiGraph(weighted_G()) + es = list(G.edges(data=True))[::2] # duplicate every other edge + G.add_edges_from(es) + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G4(self): + """Weighted betweenness centrality: G4""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("s", "x", 6), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("x", "y", 3), + ("y", "s", 7), + ("y", "v", 6), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = dict.fromkeys(G.edges(), 1 / 10) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness centrality: weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_graph(self): + """Edge betweenness centrality: normalized weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) + + def test_weighted_multigraph(self): + """Edge betweenness centrality: weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_multigraph(self): + """Edge betweenness centrality: normalized weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..a35a401a28e31d279c0d715f79f8a7cc5738050f --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py @@ -0,0 +1,340 @@ +import pytest + +import networkx as nx + + +class TestSubsetBetweennessCentrality: + def test_K5(self): + """Betweenness Centrality Subset: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Betweenness Centrality Subset: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Betweenness Centrality Subset: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Betweenness Centrality Subset: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Betweenness Centrality Subset: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Betweenness Centrality Subset: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Betweenness Centrality Subset: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Betweenness Centrality Subset: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None) + + expected_b = { + 1: 0, + 2: 1.0 / 10, + 3: 1.0 / 10, + 4: 1.0 / 10, + 5: 1.0 / 10, + 6: 3.0 / 10, + 7: 1.0 / 10, + 8: 4.0 / 10, + 9: 0, + 10: 1.0 / 10, + 11: 1.0 / 10, + 12: 1.0 / 10, + } + + for n in sorted(G): + assert b[n] == pytest.approx(expected_b[n], abs=1e-7) + + def test_normalized_p2(self): + """ + Betweenness Centrality Subset: Normalized P2 + if n <= 2: no normalization, betweenness centrality should be 0 for all nodes. + """ + G = nx.Graph() + nx.add_path(G, range(2)) + b_answer = {0: 0, 1: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Betweenness Centrality Subset: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Betweenness Centrality Subset: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeSubsetBetweennessCentrality: + def test_K5(self): + """Edge betweenness subset centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Edge betweenness subset centrality: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Edge betweenness subset centrality: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Edge betweenness subset centrality: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Edge betweenness subset centrality: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Edge betweenness subset centrality: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Edge betweenness subset centrality: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = 1.0 + b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5 + b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Edge betweenness subset centrality: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(8, 9)] = 0.4 + b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2 + b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0 + b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0 + b_answer[(5, 7)] = 0.2 + b_answer[(1, 5)] = 0.2 + b_answer[(9, 12)] = 0.1 + b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[1], targets=[9], weight=None + ) + for n in G.edges(): + sort_n = tuple(sorted(n)) + assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7) + + def test_normalized_p1(self): + """ + Edge betweenness subset centrality: P1 + if n <= 1: no normalization b=0 for all nodes + """ + G = nx.Graph() + nx.add_path(G, range(1)) + b_answer = dict.fromkeys(G.edges(), 0) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[0], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Edge betweenness subset centrality: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness subset centrality: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5 + b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..d274206a7d6dffb6d9101378370210dd0cb8e01f --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py @@ -0,0 +1,306 @@ +""" +Tests for closeness centrality. +""" +import pytest + +import networkx as nx + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + + F = nx.florentine_families_graph() + cls.F = F + + cls.LM = nx.les_miserables_graph() + + # Create random undirected, unweighted graph for testing incremental version + cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123) + cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G) + + def test_wf_improved(self): + G = nx.union(self.P4, nx.path_graph([4, 5, 6])) + c = nx.closeness_centrality(G) + cwf = nx.closeness_centrality(G, wf_improved=False) + res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222} + wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667} + for n in G: + assert c[n] == pytest.approx(res[n], abs=1e-3) + assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3) + + def test_digraph(self): + G = nx.path_graph(3, create_using=nx.DiGraph()) + c = nx.closeness_centrality(G) + cr = nx.closeness_centrality(G.reverse()) + d = {0: 0.0, 1: 0.500, 2: 0.667} + dr = {0: 0.667, 1: 0.500, 2: 0.0} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + assert cr[n] == pytest.approx(dr[n], abs=1e-3) + + def test_k5_closeness(self): + c = nx.closeness_centrality(self.K5) + d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000} + for n in sorted(self.K5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_closeness(self): + c = nx.closeness_centrality(self.P3) + d = {0: 0.667, 1: 1.000, 2: 0.667} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_closeness(self): + c = nx.closeness_centrality(self.K) + d = { + 0: 0.529, + 1: 0.529, + 2: 0.500, + 3: 0.600, + 4: 0.500, + 5: 0.643, + 6: 0.643, + 7: 0.600, + 8: 0.429, + 9: 0.310, + } + for n in sorted(self.K): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_closeness(self): + c = nx.closeness_centrality(self.F) + d = { + "Acciaiuoli": 0.368, + "Albizzi": 0.483, + "Barbadori": 0.4375, + "Bischeri": 0.400, + "Castellani": 0.389, + "Ginori": 0.333, + "Guadagni": 0.467, + "Lamberteschi": 0.326, + "Medici": 0.560, + "Pazzi": 0.286, + "Peruzzi": 0.368, + "Ridolfi": 0.500, + "Salviati": 0.389, + "Strozzi": 0.4375, + "Tornabuoni": 0.483, + } + for n in sorted(self.F): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_closeness(self): + c = nx.closeness_centrality(self.LM) + d = { + "Napoleon": 0.302, + "Myriel": 0.429, + "MlleBaptistine": 0.413, + "MmeMagloire": 0.413, + "CountessDeLo": 0.302, + "Geborand": 0.302, + "Champtercier": 0.302, + "Cravatte": 0.302, + "Count": 0.302, + "OldMan": 0.302, + "Valjean": 0.644, + "Labarre": 0.394, + "Marguerite": 0.413, + "MmeDeR": 0.394, + "Isabeau": 0.394, + "Gervais": 0.394, + "Listolier": 0.341, + "Tholomyes": 0.392, + "Fameuil": 0.341, + "Blacheville": 0.341, + "Favourite": 0.341, + "Dahlia": 0.341, + "Zephine": 0.341, + "Fantine": 0.461, + "MmeThenardier": 0.461, + "Thenardier": 0.517, + "Cosette": 0.478, + "Javert": 0.517, + "Fauchelevent": 0.402, + "Bamatabois": 0.427, + "Perpetue": 0.318, + "Simplice": 0.418, + "Scaufflaire": 0.394, + "Woman1": 0.396, + "Judge": 0.404, + "Champmathieu": 0.404, + "Brevet": 0.404, + "Chenildieu": 0.404, + "Cochepaille": 0.404, + "Pontmercy": 0.373, + "Boulatruelle": 0.342, + "Eponine": 0.396, + "Anzelma": 0.352, + "Woman2": 0.402, + "MotherInnocent": 0.398, + "Gribier": 0.288, + "MmeBurgon": 0.344, + "Jondrette": 0.257, + "Gavroche": 0.514, + "Gillenormand": 0.442, + "Magnon": 0.335, + "MlleGillenormand": 0.442, + "MmePontmercy": 0.315, + "MlleVaubois": 0.308, + "LtGillenormand": 0.365, + "Marius": 0.531, + "BaronessT": 0.352, + "Mabeuf": 0.396, + "Enjolras": 0.481, + "Combeferre": 0.392, + "Prouvaire": 0.357, + "Feuilly": 0.392, + "Courfeyrac": 0.400, + "Bahorel": 0.394, + "Bossuet": 0.475, + "Joly": 0.394, + "Grantaire": 0.358, + "MotherPlutarch": 0.285, + "Gueulemer": 0.463, + "Babet": 0.463, + "Claquesous": 0.452, + "Montparnasse": 0.458, + "Toussaint": 0.402, + "Child1": 0.342, + "Child2": 0.342, + "Brujon": 0.380, + "MmeHucheloup": 0.353, + } + for n in sorted(self.LM): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_closeness(self): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + XG = nx.Graph() + XG.add_weighted_edges_from(edges) + c = nx.closeness_centrality(XG, distance="weight") + d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + # + # Tests for incremental closeness centrality. + # + @staticmethod + def pick_add_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = set(g.nodes()) + neighbors = list(g.neighbors(u)) + [u] + possible_nodes.difference_update(neighbors) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + @staticmethod + def pick_remove_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = list(g.neighbors(u)) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + dir_G = nx.gn_graph(n=5) + prev_cc = None + edge = self.pick_add_edge(dir_G) + insert = True + nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert) + + def test_wrong_size_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + prev_cc.pop(0) + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_wrong_nodes_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + num_nodes = len(prev_cc) + prev_cc.pop(0) + prev_cc[num_nodes] = 0.5 + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_zero_centrality(self): + G = nx.path_graph(3) + prev_cc = nx.closeness_centrality(G) + edge = self.pick_remove_edge(G) + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False) + G.remove_edges_from([edge]) + real_cc = nx.closeness_centrality(G) + shared_items = set(test_cc.items()) & set(real_cc.items()) + assert len(shared_items) == len(real_cc) + assert 0 in test_cc.values() + + def test_incremental(self): + # Check that incremental and regular give same output + G = self.undirected_G.copy() + prev_cc = None + for i in range(5): + if i % 2 == 0: + # Remove an edge + insert = False + edge = self.pick_remove_edge(G) + else: + # Add an edge + insert = True + edge = self.pick_add_edge(G) + + # start = timeit.default_timer() + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + # inc_elapsed = (timeit.default_timer() - start) + # print(f"incremental time: {inc_elapsed}") + + if insert: + G.add_edges_from([edge]) + else: + G.remove_edges_from([edge]) + + # start = timeit.default_timer() + real_cc = nx.closeness_centrality(G) + # reg_elapsed = (timeit.default_timer() - start) + # print(f"regular time: {reg_elapsed}") + # Example output: + # incremental time: 0.208 + # regular time: 0.276 + # incremental time: 0.00683 + # regular time: 0.260 + # incremental time: 0.0224 + # regular time: 0.278 + # incremental time: 0.00804 + # regular time: 0.208 + # incremental time: 0.00947 + # regular time: 0.188 + + assert set(test_cc.items()) == set(real_cc.items()) + + prev_cc = test_cc diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4385c9b266975140d49b739d09fbd449d8a6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py @@ -0,0 +1,197 @@ +import pytest + +import networkx as nx +from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + b_answer = {0: 0, 1: 2, 2: 2, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_solvers2(self): + """Betweenness centrality: alternate solvers""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestApproximateFlowBetweennessCentrality: + def test_K4_normalized(self): + "Approximate current-flow betweenness centrality: K4 normalized" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_K4(self): + "Approximate current-flow betweenness centrality: K4" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2) + + def test_star(self): + "Approximate current-flow betweenness centrality: star" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_grid(self): + "Approximate current-flow betweenness centrality: 2d grid" + G = nx.grid_2d_graph(4, 4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_seed(self): + G = nx.complete_graph(4) + b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=0.1) + + def test_solvers(self): + "Approximate current-flow betweenness centrality: solvers" + G = nx.complete_graph(4) + epsilon = 0.1 + for solver in ["full", "lu", "cg"]: + b = approximate_cfbc( + G, normalized=False, solver=solver, epsilon=0.5 * epsilon + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon) + + def test_lower_kmax(self): + G = nx.complete_graph(4) + with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"): + nx.approximate_current_flow_betweenness_centrality(G, kmax=4) + + +class TestWeightedFlowBetweennessCentrality: + pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=True) + b_answer = dict.fromkeys(G.edges(), 0.25) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4_normalized(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = dict.fromkeys(G.edges(), 0.75) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge flow betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + +@pytest.mark.parametrize( + "centrality_func", + ( + nx.current_flow_betweenness_centrality, + nx.edge_current_flow_betweenness_centrality, + nx.approximate_current_flow_betweenness_centrality, + ), +) +def test_unconnected_graphs_betweenness_centrality(centrality_func): + G = nx.Graph([(1, 2), (3, 4)]) + G.add_node(5) + with pytest.raises(nx.NetworkXError, match="Graph not connected"): + centrality_func(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..7b1611b07bbf890f5e45bba7a42c298bd8f4e749 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py @@ -0,0 +1,147 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow +from networkx import ( + edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset, +) + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight="other" + ) + b_answer = nx.current_flow_betweenness_centrality( + G, normalized=True, weight="other" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +# class TestWeightedFlowBetweennessCentrality(): +# pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None) + # weight is None => same as unweighted network + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset( + G, list(G), list(G), normalized=False, weight="other" + ) + b_answer = edge_current_flow(G, normalized=False, weight="other") + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..2528d622855938b8f569d4fb33309ebed1dbd7c8 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py @@ -0,0 +1,43 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +class TestFlowClosenessCentrality: + def test_K4(self): + """Closeness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Closeness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Closeness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_closeness_centrality(G) + b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_current_flow_closeness_centrality_not_connected(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NetworkXError): + nx.current_flow_closeness_centrality(G) + + +class TestWeightedFlowClosenessCentrality: + pass diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f6c39d3bd58d243627c9f33a088e4f4e37d3bb --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py @@ -0,0 +1,144 @@ +""" + Unit tests for degree centrality. +""" + +import pytest + +import networkx as nx + + +class TestDegreeCentrality: + def setup_method(self): + self.K = nx.krackhardt_kite_graph() + self.P3 = nx.path_graph(3) + self.K5 = nx.complete_graph(5) + + F = nx.Graph() # Florentine families + F.add_edge("Acciaiuoli", "Medici") + F.add_edge("Castellani", "Peruzzi") + F.add_edge("Castellani", "Strozzi") + F.add_edge("Castellani", "Barbadori") + F.add_edge("Medici", "Barbadori") + F.add_edge("Medici", "Ridolfi") + F.add_edge("Medici", "Tornabuoni") + F.add_edge("Medici", "Albizzi") + F.add_edge("Medici", "Salviati") + F.add_edge("Salviati", "Pazzi") + F.add_edge("Peruzzi", "Strozzi") + F.add_edge("Peruzzi", "Bischeri") + F.add_edge("Strozzi", "Ridolfi") + F.add_edge("Strozzi", "Bischeri") + F.add_edge("Ridolfi", "Tornabuoni") + F.add_edge("Tornabuoni", "Guadagni") + F.add_edge("Albizzi", "Ginori") + F.add_edge("Albizzi", "Guadagni") + F.add_edge("Bischeri", "Guadagni") + F.add_edge("Guadagni", "Lamberteschi") + self.F = F + + G = nx.DiGraph() + G.add_edge(0, 5) + G.add_edge(1, 5) + G.add_edge(2, 5) + G.add_edge(3, 5) + G.add_edge(4, 5) + G.add_edge(5, 6) + G.add_edge(5, 7) + G.add_edge(5, 8) + self.G = G + + def test_degree_centrality_1(self): + d = nx.degree_centrality(self.K5) + exact = dict(zip(range(5), [1] * 5)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_2(self): + d = nx.degree_centrality(self.P3) + exact = {0: 0.5, 1: 1, 2: 0.5} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_3(self): + d = nx.degree_centrality(self.K) + exact = { + 0: 0.444, + 1: 0.444, + 2: 0.333, + 3: 0.667, + 4: 0.333, + 5: 0.556, + 6: 0.556, + 7: 0.333, + 8: 0.222, + 9: 0.111, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_degree_centrality_4(self): + d = nx.degree_centrality(self.F) + names = sorted(self.F.nodes()) + dcs = [ + 0.071, + 0.214, + 0.143, + 0.214, + 0.214, + 0.071, + 0.286, + 0.071, + 0.429, + 0.071, + 0.214, + 0.214, + 0.143, + 0.286, + 0.214, + ] + exact = dict(zip(names, dcs)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_indegree_centrality(self): + d = nx.in_degree_centrality(self.G) + exact = { + 0: 0.0, + 1: 0.0, + 2: 0.0, + 3: 0.0, + 4: 0.0, + 5: 0.625, + 6: 0.125, + 7: 0.125, + 8: 0.125, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_outdegree_centrality(self): + d = nx.out_degree_centrality(self.G) + exact = { + 0: 0.125, + 1: 0.125, + 2: 0.125, + 3: 0.125, + 4: 0.125, + 5: 0.375, + 6: 0.0, + 7: 0.0, + 8: 0.0, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_small_graph_centrality(self): + G = nx.empty_graph(create_using=nx.DiGraph) + assert {} == nx.degree_centrality(G) + assert {} == nx.out_degree_centrality(G) + assert {} == nx.in_degree_centrality(G) + + G = nx.empty_graph(1, create_using=nx.DiGraph) + assert {0: 1} == nx.degree_centrality(G) + assert {0: 1} == nx.out_degree_centrality(G) + assert {0: 1} == nx.in_degree_centrality(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..05de1c43659a44f2dbf45368bf2ee552dd61dd78 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py @@ -0,0 +1,73 @@ +import networkx as nx + + +def small_ego_G(): + """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf""" + edges = [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("b", "f"), + ("c", "d"), + ("c", "f"), + ("c", "h"), + ("d", "f"), + ("e", "f"), + ("f", "h"), + ("h", "j"), + ("h", "k"), + ("i", "j"), + ("i", "k"), + ("j", "k"), + ("u", "a"), + ("u", "b"), + ("u", "c"), + ("u", "d"), + ("u", "e"), + ("u", "f"), + ("u", "g"), + ("u", "h"), + ("u", "i"), + ("u", "j"), + ("u", "k"), + ] + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +class TestDispersion: + def test_article(self): + """our algorithm matches article's""" + G = small_ego_G() + disp_uh = nx.dispersion(G, "u", "h", normalized=False) + disp_ub = nx.dispersion(G, "u", "b", normalized=False) + assert disp_uh == 4 + assert disp_ub == 1 + + def test_results_length(self): + """there is a result for every node""" + G = small_ego_G() + disp = nx.dispersion(G) + disp_Gu = nx.dispersion(G, "u") + disp_uv = nx.dispersion(G, "u", "h") + assert len(disp) == len(G) + assert len(disp_Gu) == len(G) - 1 + assert isinstance(disp_uv, float) + + def test_dispersion_v_only(self): + G = small_ego_G() + disp_G_h = nx.dispersion(G, v="h", normalized=False) + disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True) + assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4} + assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0} + + def test_impossible_things(self): + G = nx.karate_club_graph() + disp = nx.dispersion(G) + for u in disp: + for v in disp[u]: + assert disp[u][v] >= 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..b8620056a94995100fae72a66bb7e0558aae953b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -0,0 +1,175 @@ +import math + +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx + + +class TestEigenvectorCentrality: + def test_K5(self): + """Eigenvector centrality: K5""" + G = nx.complete_graph(5) + b = nx.eigenvector_centrality(G) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.eigenvector_centrality(G, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + b = nx.eigenvector_centrality(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_P3_unweighted(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + G = nx.path_graph(3) + nx.eigenvector_centrality(G, max_iter=0) + + +class TestEigenvectorCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + H = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges) + cls.H = G.reverse() + cls.H.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + def test_eigenvector_centrality_weighted(self): + G = self.G + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_weighted_numpy(self): + G = self.G + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_eigenvector_centrality_unweighted(self): + G = self.H + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_unweighted_numpy(self): + G = self.H + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestEigenvectorCentralityExceptions: + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.MultiGraph()) + + def test_multigraph_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.MultiGraph()) + + def test_empty(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.Graph()) + + def test_empty_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.Graph()) + + def test_zero_nstart(self): + G = nx.Graph([(1, 2), (1, 3), (2, 3)]) + with pytest.raises( + nx.NetworkXException, match="initial vector cannot have all zero values" + ): + nx.eigenvector_centrality(G, nstart={v: 0 for v in G}) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py new file mode 100644 index 0000000000000000000000000000000000000000..3f5559dcd73a268c28b513678b1fe3dd058220cb --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py @@ -0,0 +1,278 @@ +""" +Tests for Group Centrality Measures +""" + + +import pytest + +import networkx as nx + + +class TestGroupBetweennessCentrality: + def test_group_betweenness_single_node(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=False + ) + b_answer = 3.0 + assert b == b_answer + + def test_group_betweenness_with_endpoints(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=True + ) + b_answer = 7.0 + assert b == b_answer + + def test_group_betweenness_normalized(self): + """ + Group betweenness centrality for group with more than + 1 node and normalized + """ + G = nx.path_graph(5) + C = [1, 3] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=True, endpoints=False + ) + b_answer = 1.0 + assert b == b_answer + + def test_two_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(7) + C = [[0, 1, 6], [0, 1, 5]] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = [0.0, 3.0] + assert b == b_answer + + def test_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(6) + C = [0, 1, 5] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_disconnected_graph(self): + """ + Group betweenness centrality in a disconnected graph + """ + G = nx.path_graph(5) + G.remove_edge(0, 1) + C = [1] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + C = [1, 2] + b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False) + b_answer = 5.0 + assert b == b_answer + + +class TestProminentGroup: + np = pytest.importorskip("numpy") + pd = pytest.importorskip("pandas") + + def test_prominent_group_single_node(self): + """ + Prominent group for single node + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, endpoints=False) + b_answer, g_answer = 4.0, [2] + assert b == b_answer and g == g_answer + + def test_prominent_group_with_c(self): + """ + Prominent group without some nodes + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, C=[2]) + b_answer, g_answer = 3.0, [1] + assert b == b_answer and g == g_answer + + def test_prominent_group_normalized_endpoints(self): + """ + Prominent group with normalized result, with endpoints + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True) + b_answer, g_answer = 1.7, [2, 5] + assert b == b_answer and g == g_answer + + def test_prominent_group_disconnected_graph(self): + """ + Prominent group of disconnected graph + """ + G = nx.path_graph(6) + G.remove_edge(0, 1) + k = 1 + b, g = nx.prominent_group(G, k, weight=None, normalized=False) + b_answer, g_answer = 4.0, [3] + assert b == b_answer and g == g_answer + + def test_prominent_group_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.prominent_group(nx.path_graph(5), 1, C=[10]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + k = 2 + b, g = nx.prominent_group(G, k, weight="weight", normalized=False) + b_answer, g_answer = 5.0, [1, 2] + assert b == b_answer and g == g_answer + + def test_prominent_group_greedy_algorithm(self): + """ + Group betweenness centrality in a greedy algorithm + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True) + b_answer, g_answer = 1.7, [6, 3] + assert b == b_answer and g == g_answer + + +class TestGroupClosenessCentrality: + def test_group_closeness_single_node(self): + """ + Group closeness centrality for a single node group + """ + G = nx.path_graph(5) + c = nx.group_closeness_centrality(G, [1]) + c_answer = nx.closeness_centrality(G, 1) + assert c == c_answer + + def test_group_closeness_disconnected(self): + """ + Group closeness centrality for a disconnected graph + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 0 + assert c == c_answer + + def test_group_closeness_multiple_node(self): + """ + Group closeness centrality for a group with more than + 1 node + """ + G = nx.path_graph(4) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 1 + assert c == c_answer + + def test_group_closeness_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8]) + + +class TestGroupDegreeCentrality: + def test_group_degree_centrality_single_node(self): + """ + Group degree centrality for a single node group + """ + G = nx.path_graph(4) + d = nx.group_degree_centrality(G, [1]) + d_answer = nx.degree_centrality(G)[1] + assert d == d_answer + + def test_group_degree_centrality_multiple_node(self): + """ + Group degree centrality for group with more than + 1 node + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_in_degree_centrality(self): + """ + Group in-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_in_degree_centrality(G, [1, 2]) + d_answer = 0 + assert d == d_answer + + def test_group_out_degree_centrality(self): + """ + Group out-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_out_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_degree_centrality_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NetworkXError + """ + with pytest.raises(nx.NetworkXError): + nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8]) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..450356ea970565eaee7612eb4c8c2d5364af50d7 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py @@ -0,0 +1,115 @@ +""" +Tests for degree centrality. +""" +import pytest + +import networkx as nx +from networkx.algorithms.centrality import harmonic_centrality + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph) + + cls.C5 = nx.cycle_graph(5) + + cls.T = nx.balanced_tree(r=2, h=2) + + cls.Gb = nx.DiGraph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)]) + + def test_p3_harmonic(self): + c = harmonic_centrality(self.P3) + d = {0: 1.5, 1: 2, 2: 1.5} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic(self): + c = harmonic_centrality(self.P4) + d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333} + for n in sorted(self.P4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_clique_complete(self): + c = harmonic_centrality(self.K5) + d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C4(self): + c = harmonic_centrality(self.C4) + d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5} + for n in sorted(self.C4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C5(self): + c = harmonic_centrality(self.C5) + d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4} + for n in sorted(self.C5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_bal_tree(self): + c = harmonic_centrality(self.T) + d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333} + for n in sorted(self.T): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_exampleGraph(self): + c = harmonic_centrality(self.Gb) + d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1} + for n in sorted(self.Gb): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_harmonic(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("a", "b", 10), + ("d", "c", 5), + ("a", "c", 1), + ("e", "f", 2), + ("f", "c", 1), + ("a", "f", 3), + ] + ) + c = harmonic_centrality(XG, distance="weight") + d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_empty(self): + G = nx.DiGraph() + c = harmonic_centrality(G, distance="weight") + d = {} + assert c == d + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + c = harmonic_centrality(G, distance="weight") + d = {0: 0} + assert c == d + + def test_cycle_c4_directed(self): + c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2]) + d = {0: 0.833, 1: 0.333} + for n in [0, 1]: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_harmonic_subset(self): + c = harmonic_centrality(self.P3, sources=[0, 1]) + d = {0: 1, 1: 1, 2: 1.5} + for n in self.P3: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic_subset(self): + c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1]) + d = {2: 1.5, 3: 0.8333333} + for n in [2, 3]: + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..0927f00bc5c31ad1134dae0c8f59367baed67bb6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py @@ -0,0 +1,345 @@ +import math + +import pytest + +import networkx as nx + + +class TestKatzCentrality: + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.katz_centrality(G, alpha, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality(G, 0.1, beta=beta) + + def test_bad_beta_number(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality(G, 0.1, beta="foo") + + +class TestKatzCentralityNumpy: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality_numpy(G, 0.1, beta=beta) + + def test_bad_beta_numbe(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality_numpy(G, 0.1, beta="foo") + + def test_K5_unweighted(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha, weight=None) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3_unweighted(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + +class TestKatzCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.alpha = 0.1 + cls.G.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + H = nx.DiGraph(edges) + cls.H = G.reverse() + cls.H.alpha = 0.1 + cls.H.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected): + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + super().setup_class() + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality_numpy(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality_numpy(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzEigenvectorVKatz: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_eigenvector_v_katz_random(self): + G = nx.gnp_random_graph(10, 0.5, seed=1234) + l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense())) + e = nx.eigenvector_centrality_numpy(G) + k = nx.katz_centrality_numpy(G, 1.0 / l) + for n in G: + assert e[n] == pytest.approx(k[n], abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..21aa28b0b7c155078ab9c1a25e14d9aafa65683d --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py @@ -0,0 +1,221 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +def test_laplacian_centrality_null_graph(): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + d = nx.laplacian_centrality(G, normalized=False) + + +def test_laplacian_centrality_single_node(): + """See gh-6571""" + G = nx.empty_graph(1) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0} + with pytest.raises(ZeroDivisionError): + nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_unconnected_nodes(): + """laplacian_centrality on a unconnected node graph should return 0 + + For graphs without edges, the Laplacian energy is 0 and is unchanged with + node removal, so:: + + LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0 + """ + G = nx.empty_graph(3) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0} + + +def test_laplacian_centrality_empty_graph(): + G = nx.empty_graph(3) + with pytest.raises(ZeroDivisionError): + d = nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_E(): + E = nx.Graph() + E.add_weighted_edges_from( + [(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)] + ) + d = nx.laplacian_centrality(E) + exact = { + 0: 0.700000, + 1: 0.900000, + 2: 0.280000, + 3: 0.220000, + 4: 0.260000, + 5: 0.040000, + } + + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 200 + dnn = nx.laplacian_centrality(E, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7) + + # Check unweighted not-normalized version + duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None) + print(duw_nn) + exact_uw_nn = { + 0: 18, + 1: 34, + 2: 18, + 3: 10, + 4: 16, + 5: 6, + } + for n, dc in duw_nn.items(): + assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7) + + # Check unweighted version + duw = nx.laplacian_centrality(E, weight=None) + full_energy = 42 + for n, dc in duw.items(): + assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_KC(): + KC = nx.karate_club_graph() + d = nx.laplacian_centrality(KC) + exact = { + 0: 0.2543593, + 1: 0.1724524, + 2: 0.2166053, + 3: 0.0964646, + 4: 0.0350344, + 5: 0.0571109, + 6: 0.0540713, + 7: 0.0788674, + 8: 0.1222204, + 9: 0.0217565, + 10: 0.0308751, + 11: 0.0215965, + 12: 0.0174372, + 13: 0.118861, + 14: 0.0366341, + 15: 0.0548712, + 16: 0.0172772, + 17: 0.0191969, + 18: 0.0225564, + 19: 0.0331147, + 20: 0.0279955, + 21: 0.0246361, + 22: 0.0382339, + 23: 0.1294193, + 24: 0.0227164, + 25: 0.0644697, + 26: 0.0281555, + 27: 0.075188, + 28: 0.0364742, + 29: 0.0707087, + 30: 0.0708687, + 31: 0.131019, + 32: 0.2370821, + 33: 0.3066709, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 12502 + dnn = nx.laplacian_centrality(KC, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_K(): + K = nx.krackhardt_kite_graph() + d = nx.laplacian_centrality(K) + exact = { + 0: 0.3010753, + 1: 0.3010753, + 2: 0.2258065, + 3: 0.483871, + 4: 0.2258065, + 5: 0.3870968, + 6: 0.3870968, + 7: 0.1935484, + 8: 0.0752688, + 9: 0.0322581, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 186 + dnn = nx.laplacian_centrality(K, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_P3(): + P3 = nx.path_graph(3) + d = nx.laplacian_centrality(P3) + exact = {0: 0.6, 1: 1.0, 2: 0.6} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_K5(): + K5 = nx.complete_graph(5) + d = nx.laplacian_centrality(K5) + exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_FF(): + FF = nx.florentine_families_graph() + d = nx.laplacian_centrality(FF) + exact = { + "Acciaiuoli": 0.0804598, + "Medici": 0.4022989, + "Castellani": 0.1724138, + "Peruzzi": 0.183908, + "Strozzi": 0.2528736, + "Barbadori": 0.137931, + "Ridolfi": 0.2183908, + "Tornabuoni": 0.2183908, + "Albizzi": 0.1954023, + "Salviati": 0.1149425, + "Pazzi": 0.0344828, + "Bischeri": 0.1954023, + "Guadagni": 0.2298851, + "Ginori": 0.045977, + "Lamberteschi": 0.0574713, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_DG(): + DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)]) + d = nx.laplacian_centrality(DG) + exact = { + 0: 0.2123352, + 5: 0.515391, + 1: 0.2123352, + 2: 0.2123352, + 3: 0.2123352, + 4: 0.2123352, + 6: 0.2952031, + 7: 0.2952031, + 8: 0.2952031, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 9.50704 + dnn = nx.laplacian_centrality(DG, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..bf096039cd76542cc4c963ab896ee8fc4b295224 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py @@ -0,0 +1,344 @@ +import pytest + +import networkx as nx + + +class TestLoadCentrality: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + cls.G = G + cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + cls.P2 = nx.path_graph(2) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + cls.F = nx.florentine_families_graph() + cls.LM = nx.les_miserables_graph() + cls.D = nx.cycle_graph(3, create_using=nx.DiGraph()) + cls.D.add_edges_from([(3, 0), (4, 3)]) + + def test_not_strongly_connected(self): + b = nx.load_centrality(self.D) + result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000} + for n in sorted(self.D): + assert result[n] == pytest.approx(b[n], abs=1e-3) + assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3) + + def test_P2_normalized_load(self): + G = self.P2 + c = nx.load_centrality(G, normalized=True) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_load(self): + b = nx.load_centrality(self.G, weight="weight", normalized=False) + for n in sorted(self.G): + assert b[n] == self.exact_weighted[n] + + def test_k5_load(self): + G = self.K5 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_load(self): + G = self.P3 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 1.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + c = nx.load_centrality(G, v=1) + assert c == pytest.approx(1.0, abs=1e-7) + c = nx.load_centrality(G, v=1, normalized=True) + assert c == pytest.approx(1.0, abs=1e-7) + + def test_p2_load(self): + G = nx.path_graph(2) + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G) + d = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G) + d = { + "Acciaiuoli": 0.000, + "Albizzi": 0.211, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.251, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.117, + "Salviati": 0.143, + "Strozzi": 0.106, + "Tornabuoni": 0.090, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_load(self): + G = self.LM + c = nx.load_centrality(G) + d = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.567, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.043, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.128, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.012, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.164, + "Gillenormand": 0.021, + "Magnon": 0.000, + "MlleGillenormand": 0.047, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.133, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.041, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.006, + "Bahorel": 0.002, + "Bossuet": 0.032, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_k5_load(self): + G = self.K5 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_p3_load(self): + G = self.P3 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 2.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G, normalized=False) + d = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G, normalized=False) + + d = { + "Acciaiuoli": 0.000, + "Albizzi": 38.333, + "Barbadori": 17.000, + "Bischeri": 19.000, + "Castellani": 10.000, + "Ginori": 0.000, + "Guadagni": 45.667, + "Lamberteschi": 0.000, + "Medici": 95.000, + "Pazzi": 0.000, + "Peruzzi": 4.000, + "Ridolfi": 21.333, + "Salviati": 26.000, + "Strozzi": 19.333, + "Tornabuoni": 16.333, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_load_betweenness_difference(self): + # Difference Between Load and Betweenness + # --------------------------------------- The smallest graph + # that shows the difference between load and betweenness is + # G=ladder_graph(3) (Graph B below) + + # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong + # Wang: Comment on "Scientific collaboration + # networks. II. Shortest paths, weighted networks, and + # centrality". https://arxiv.org/pdf/physics/0511084 + + # Notice that unlike here, their calculation adds to 1 to the + # betweenness of every node i for every path from i to every + # other node. This is exactly what it should be, based on + # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t, + # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore, + # they allow v to be the target node. + + # We follow Brandes 2001, who follows Freeman 1977 that make + # the sum for betweenness of v exclude paths where v is either + # the source or target node. To agree with their numbers, we + # must additionally, remove edge (4,8) from the graph, see AC + # example following (there is a mistake in the figure in their + # paper - personal communication). + + # A = nx.Graph() + # A.add_edges_from([(0,1), (1,2), (1,3), (2,4), + # (3,5), (4,6), (4,7), (4,8), + # (5,8), (6,9), (7,9), (8,9)]) + B = nx.Graph() # ladder_graph(3) + B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + c = nx.load_centrality(B, normalized=False) + d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750} + for n in sorted(B): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_c4_edge_load(self): + G = self.C4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_edge_load(self): + G = self.P4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_k5_edge_load(self): + G = self.K5 + c = nx.edge_load_centrality(G) + d = { + (0, 1): 5.000, + (0, 2): 5.000, + (0, 3): 5.000, + (0, 4): 5.000, + (1, 2): 5.000, + (1, 3): 5.000, + (1, 4): 5.000, + (2, 3): 5.000, + (2, 4): 5.000, + (3, 4): 5.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_tree_edge_load(self): + G = self.T + c = nx.edge_load_centrality(G) + d = { + (0, 1): 24.000, + (0, 2): 24.000, + (1, 3): 12.000, + (1, 4): 12.000, + (2, 5): 12.000, + (2, 6): 12.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb8f52965c975013d41be7c3de874cd86ee693a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py @@ -0,0 +1,87 @@ +import pytest + +import networkx as nx + + +def example1a_G(): + G = nx.Graph() + G.add_node(1, percolation=0.1) + G.add_node(2, percolation=0.2) + G.add_node(3, percolation=0.2) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.5) + G.add_node(8, percolation=0.5) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def example1b_G(): + G = nx.Graph() + G.add_node(1, percolation=0.3) + G.add_node(2, percolation=0.5) + G.add_node(3, percolation=0.5) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.1) + G.add_node(8, percolation=0.1) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def test_percolation_example1a(): + """percolation centrality: example 1a""" + G = example1a_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.625, 6: 0.667} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_percolation_example1b(): + """percolation centrality: example 1a""" + G = example1b_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.825, 6: 0.4} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_converge_to_betweenness(): + """percolation centrality: should converge to betweenness + centrality when all nodes are percolated the same""" + # taken from betweenness test test_florentine_families_graph + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + # If no initial state is provided, state for + # every node defaults to 1 + p_answer = nx.percolation_centrality(G) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + p_states = {k: 0.3 for k, v in b_answer.items()} + p_answer = nx.percolation_centrality(G, states=p_states) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + +def test_default_percolation(): + G = nx.erdos_renyi_graph(42, 0.42, seed=42) + assert nx.percolation_centrality(G) == pytest.approx(nx.betweenness_centrality(G)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..02ad8322cb6cf2a550afec0980ead8537abd55d5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py @@ -0,0 +1,117 @@ +"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module.""" +import pytest + +import networkx as nx + + +class TestGlobalReachingCentrality: + """Unit tests for the global reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.global_reaching_centrality(G, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.global_reaching_centrality(G, weight="weight") + + def test_directed_star(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.5 + assert grc(G) == 1 + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight=None) == 0.25 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.375 + + def test_cycle_directed_unweighted(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_undirected_unweighted(self): + G = nx.Graph() + G.add_edge(1, 2) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_directed_weighted(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)]) + assert nx.global_reaching_centrality(G) == 0 + + def test_cycle_undirected_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False) == 0 + + def test_directed_weighted(self): + G = nx.DiGraph() + G.add_edge("A", "B", weight=5) + G.add_edge("B", "C", weight=1) + G.add_edge("B", "D", weight=0.25) + G.add_edge("D", "E", weight=1) + + denom = len(G) - 1 + A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom + B_local = sum([1, 0.25, 0.625]) / denom + C_local = 0 + D_local = sum([1]) / denom + E_local = 0 + + local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local] + max_local = max(local_reach_ctrs) + expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom + grc = nx.global_reaching_centrality + actual = grc(G, normalized=False, weight="weight") + assert expected == pytest.approx(actual, abs=1e-7) + + +class TestLocalReachingCentrality: + """Unit tests for the local reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 0)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.local_reaching_centrality + assert grc(G, 1, weight=None, normalized=False) == 0.75 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=False, weight="weight" + ) + assert centrality == 1.5 + + def test_undirected_weighted_normalized(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=True, weight="weight" + ) + assert centrality == 1.0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3047866079fd9fe4cf43a6793cf160a0c0cdce --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py @@ -0,0 +1,82 @@ +""" +Tests for second order centrality. +""" + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_empty(): + with pytest.raises(nx.NetworkXException): + G = nx.empty_graph() + nx.second_order_centrality(G) + + +def test_non_connected(): + with pytest.raises(nx.NetworkXException): + G = nx.Graph() + G.add_node(0) + G.add_node(1) + nx.second_order_centrality(G) + + +def test_non_negative_edge_weights(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(2) + G.add_edge(0, 1, weight=-1) + nx.second_order_centrality(G) + + +def test_weight_attribute(): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w") + expected = {0: 3.431, 1: 3.082, 2: 5.612} + b = nx.second_order_centrality(G, weight="w") + + for n in sorted(G): + assert b[n] == pytest.approx(expected[n], abs=1e-2) + + +def test_one_node_graph(): + """Second order centrality: single node""" + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + assert nx.second_order_centrality(G)[0] == 0 + + +def test_P3(): + """Second order centrality: line graph, as defined in paper""" + G = nx.path_graph(3) + b_answer = {0: 3.741, 1: 1.414, 2: 3.741} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_K3(): + """Second order centrality: complete graph, as defined in paper""" + G = nx.complete_graph(3) + b_answer = {0: 1.414, 1: 1.414, 2: 1.414} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_ring_graph(): + """Second order centrality: ring graph, as defined in paper""" + G = nx.cycle_graph(5) + b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py new file mode 100644 index 0000000000000000000000000000000000000000..710927515baa4786e4be15ddf25ad34e423563d2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py @@ -0,0 +1,110 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.centrality.subgraph_alg import ( + communicability_betweenness_centrality, + estrada_index, + subgraph_centrality, + subgraph_centrality_exp, +) + + +class TestSubgraph: + def test_subgraph_centrality(self): + answer = {0: 1.5430806348152433, 1: 1.5430806348152433} + result = subgraph_centrality(nx.path_graph(2)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 1.6445956054135658, + "Albert": 2.4368257358712189, + "Aric": 2.4368257358712193, + "Dan": 3.1306328496328168, + "Franck": 2.3876142275231915, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = subgraph_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + result1 = subgraph_centrality_exp(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_subgraph_centrality_big_graph(self): + g199 = nx.complete_graph(199) + g200 = nx.complete_graph(200) + + comm199 = nx.subgraph_centrality(g199) + comm199_exp = nx.subgraph_centrality_exp(g199) + + comm200 = nx.subgraph_centrality(g200) + comm200_exp = nx.subgraph_centrality_exp(g200) + + def test_communicability_betweenness_centrality_small(self): + result = communicability_betweenness_centrality(nx.path_graph(2)) + assert result == {0: 0, 1: 0} + + result = communicability_betweenness_centrality(nx.path_graph(1)) + assert result == {0: 0} + + result = communicability_betweenness_centrality(nx.path_graph(0)) + assert result == {} + + answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313} + result = communicability_betweenness_centrality(nx.path_graph(3)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + result = communicability_betweenness_centrality(nx.complete_graph(3)) + for k, v in result.items(): + assert 0.49786143366223296 == pytest.approx(v, abs=1e-7) + + def test_communicability_betweenness_centrality(self): + answer = { + 0: 0.07017447951484615, + 1: 0.71565598701107991, + 2: 0.71565598701107991, + 3: 0.07017447951484615, + } + result = communicability_betweenness_centrality(nx.path_graph(4)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 0.060039074193949521, + "Albert": 0.315470761661372, + "Aric": 0.31547076166137211, + "Dan": 0.68297778678316201, + "Franck": 0.21977926617449497, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = communicability_betweenness_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_estrada_index(self): + answer = 1041.2470334195475 + result = estrada_index(nx.karate_club_graph()) + assert answer == pytest.approx(result, abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d6813160eed6da3cd1fd0b254b7352bd1bd4ad --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py @@ -0,0 +1,302 @@ +"""Test trophic levels, trophic differences and trophic coherence +""" +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_trophic_levels(): + """Trivial example""" + G = nx.DiGraph() + G.add_edge("a", "b") + G.add_edge("b", "c") + + d = nx.trophic_levels(G) + assert d == {"a": 1, "b": 2, "c": 3} + + +def test_trophic_levels_levine(): + """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83, + 195-207 + """ + S = nx.DiGraph() + S.add_edge(1, 2, weight=1.0) + S.add_edge(1, 3, weight=0.2) + S.add_edge(1, 4, weight=0.8) + S.add_edge(2, 3, weight=0.2) + S.add_edge(2, 5, weight=0.3) + S.add_edge(4, 3, weight=0.6) + S.add_edge(4, 5, weight=0.7) + S.add_edge(5, 4, weight=0.2) + + # save copy for later, test intermediate implementation details first + S2 = S.copy() + + # drop nodes of in-degree zero + z = [nid for nid, d in S.in_degree if d == 0] + for nid in z: + S.remove_node(nid) + + # find adjacency matrix + q = nx.linalg.graphmatrix.adjacency_matrix(S).T + + # fmt: off + expected_q = np.array([ + [0, 0, 0., 0], + [0.2, 0, 0.6, 0], + [0, 0, 0, 0.2], + [0.3, 0, 0.7, 0] + ]) + # fmt: on + assert np.array_equal(q.todense(), expected_q) + + # must be square, size of number of nodes + assert len(q.shape) == 2 + assert q.shape[0] == q.shape[1] + assert q.shape[0] == len(S) + + nn = q.shape[0] + + i = np.eye(nn) + n = np.linalg.inv(i - q) + y = np.asarray(n) @ np.ones(nn) + + expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814]) + assert np.allclose(y, expected_y) + + expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814} + + d = nx.trophic_levels(S2) + + for nid, level in d.items(): + expected_level = expected_d[nid] + assert expected_level == pytest.approx(level, abs=1e-7) + + +def test_trophic_levels_simple(): + matrix_a = np.array([[0, 0], [1, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + assert d[0] == pytest.approx(2, abs=1e-7) + assert d[1] == pytest.approx(1, abs=1e-7) + + +def test_trophic_levels_more_complex(): + # fmt: off + matrix = np.array([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + expected_result = [1, 2, 3, 4] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + # fmt: off + matrix = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + + expected_result = [1, 2, 2.5, 3.25] + print("Calculated result: ", d) + print("Expected Result: ", expected_result) + + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + +def test_trophic_levels_even_more_complex(): + # fmt: off + # Another, bigger matrix + matrix = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # Generated this linear system using pen and paper: + K = np.array([ + [1, 0, -1, 0, 0], + [0, 0.5, 0, -0.5, 0], + [0, 0, 1, 0, 0], + [0, -0.5, 0, 1, -0.5], + [0, 0, 0, 0, 1], + ]) + # fmt: on + result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5)) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + result_2 = nx.trophic_levels(G) + + for ind in range(5): + assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7) + + +def test_trophic_levels_singular_matrix(): + """Should raise an error with graphs with only non-basal nodes""" + matrix = np.identity(4) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_levels_singular_with_basal(): + """Should fail to compute if there are any parts of the graph which are not + reachable from any basal node (with in-degree zero). + """ + G = nx.DiGraph() + # a has in-degree zero + G.add_edge("a", "b") + + # b is one level above a, c and d + G.add_edge("c", "b") + G.add_edge("d", "b") + + # c and d form a loop, neither are reachable from a + G.add_edge("c", "d") + G.add_edge("d", "c") + + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + # if self-loops are allowed, smaller example: + G = nx.DiGraph() + G.add_edge("a", "b") # a has in-degree zero + G.add_edge("c", "b") # b is one level above a and c + G.add_edge("c", "c") # c has a self-loop + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_differences(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7) + assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7) + assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7) + assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7) + + +def test_trophic_incoherence_parameter_no_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 1] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # no self-loops case + # fmt: off + matrix_d = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_trophic_incoherence_parameter_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(2, abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py new file mode 100644 index 0000000000000000000000000000000000000000..12126818b4387a439493e8f66ba2e06e1a092416 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py @@ -0,0 +1,65 @@ +""" + Unit tests for VoteRank. +""" + + +import networkx as nx + + +class TestVoteRankCentrality: + # Example Graph present in reference paper + def test_voterank_centrality_1(self): + G = nx.Graph() + G.add_edges_from( + [ + (7, 8), + (7, 5), + (7, 9), + (5, 0), + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (1, 6), + (2, 6), + (3, 6), + (4, 6), + ] + ) + assert [0, 7, 6] == nx.voterank(G) + + def test_voterank_emptygraph(self): + G = nx.Graph() + assert [] == nx.voterank(G) + + # Graph unit test + def test_voterank_centrality_2(self): + G = nx.florentine_families_graph() + d = nx.voterank(G, 4) + exact = ["Medici", "Strozzi", "Guadagni", "Castellani"] + assert exact == d + + # DiGraph unit test + def test_voterank_centrality_3(self): + G = nx.gnc_graph(10, seed=7) + d = nx.voterank(G, 4) + exact = [3, 6, 8] + assert exact == d + + # MultiGraph unit test + def test_voterank_centrality_4(self): + G = nx.MultiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 1, 5, 4] + assert exact == nx.voterank(G) + + # MultiDiGraph unit test + def test_voterank_centrality_5(self): + G = nx.MultiDiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 0, 5, 4] + assert exact == nx.voterank(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/trophic.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..cfc7ea4f20677696f7b6a68348c51a913f5de91e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/trophic.py @@ -0,0 +1,162 @@ +"""Trophic levels""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"] + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_levels(G, weight="weight"): + r"""Compute the trophic levels of nodes. + + The trophic level of a node $i$ is + + .. math:: + + s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j + + where $k^{in}_i$ is the in-degree of i + + .. math:: + + k^{in}_i = \sum_{j} a_{ij} + + and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention. + + These are calculated using the method outlined in Levine [1]_. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + nodes : dict + Dictionary of nodes with trophic level as the value. + + References + ---------- + .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207 + """ + import numpy as np + + # find adjacency matrix + a = nx.adjacency_matrix(G, weight=weight).T.toarray() + + # drop rows/columns where in-degree is zero + rowsum = np.sum(a, axis=1) + p = a[rowsum != 0][:, rowsum != 0] + # normalise so sum of in-degree weights is 1 along each row + p = p / rowsum[rowsum != 0][:, np.newaxis] + + # calculate trophic levels + nn = p.shape[0] + i = np.eye(nn) + try: + n = np.linalg.inv(i - p) + except np.linalg.LinAlgError as err: + # LinAlgError is raised when there is a non-basal node + msg = ( + "Trophic levels are only defined for graphs where every " + + "node has a path from a basal node (basal nodes are nodes " + + "with no incoming edges)." + ) + raise nx.NetworkXError(msg) from err + y = n.sum(axis=1) + 1 + + levels = {} + + # all nodes with in-degree zero have trophic level == 1 + zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0) + for node_id in zero_node_ids: + levels[node_id] = 1 + + # all other nodes have levels as calculated + nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0) + for i, node_id in enumerate(nonzero_node_ids): + levels[node_id] = y[i] + + return levels + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_differences(G, weight="weight"): + r"""Compute the trophic differences of the edges of a directed graph. + + The trophic difference $x_ij$ for each edge is defined in Johnson et al. + [1]_ as: + + .. math:: + x_ij = s_j - s_i + + Where $s_i$ is the trophic level of node $i$. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + diffs : dict + Dictionary of edges with trophic differences as the value. + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + levels = trophic_levels(G, weight=weight) + diffs = {} + for u, v in G.edges: + diffs[(u, v)] = levels[v] - levels[u] + return diffs + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_incoherence_parameter(G, weight="weight", cannibalism=False): + r"""Compute the trophic incoherence parameter of a graph. + + Trophic coherence is defined as the homogeneity of the distribution of + trophic distances: the more similar, the more coherent. This is measured by + the standard deviation of the trophic differences and referred to as the + trophic incoherence parameter $q$ by [1]. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + cannibalism: Boolean + If set to False, self edges are not considered in the calculation + + Returns + ------- + trophic_incoherence_parameter : float + The trophic coherence of a graph + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + import numpy as np + + if cannibalism: + diffs = trophic_differences(G, weight=weight) + else: + # If no cannibalism, remove self-edges + self_loops = list(nx.selfloop_edges(G)) + if self_loops: + # Make a copy so we do not change G's edges in memory + G_2 = G.copy() + G_2.remove_edges_from(self_loops) + else: + # Avoid copy otherwise + G_2 = G + diffs = trophic_differences(G_2, weight=weight) + return np.std(list(diffs.values())) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py b/phivenv/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..f9cf43c7813b91a53d63f2e15c0ef7a538d42f25 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py @@ -0,0 +1,94 @@ +"""Algorithm to select influential nodes in a graph using VoteRank.""" +import networkx as nx + +__all__ = ["voterank"] + + +@nx._dispatch +def voterank(G, number_of_nodes=None): + """Select a list of influential nodes in a graph using VoteRank algorithm + + VoteRank [1]_ computes a ranking of the nodes in a graph G based on a + voting scheme. With VoteRank, all nodes vote for each of its in-neighbours + and the node with the highest votes is elected iteratively. The voting + ability of out-neighbors of elected nodes is decreased in subsequent turns. + + Parameters + ---------- + G : graph + A NetworkX graph. + + number_of_nodes : integer, optional + Number of ranked nodes to extract (default all nodes). + + Returns + ------- + voterank : list + Ordered list of computed seeds. + Only nodes with positive number of votes are returned. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)]) + >>> nx.voterank(G) + [0, 1] + + The algorithm can be used both for undirected and directed graphs. + However, the directed version is different in two ways: + (i) nodes only vote for their in-neighbors and + (ii) only the voting ability of elected node and its out-neighbors are updated: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)]) + >>> nx.voterank(G) + [2, 3] + + Notes + ----- + Each edge is treated independently in case of multigraphs. + + References + ---------- + .. [1] Zhang, J.-X. et al. (2016). + Identifying a set of influential spreaders in complex networks. + Sci. Rep. 6, 27823; doi: 10.1038/srep27823. + """ + influential_nodes = [] + vote_rank = {} + if len(G) == 0: + return influential_nodes + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + if G.is_directed(): + # For directed graphs compute average out-degree + avgDegree = sum(deg for _, deg in G.out_degree()) / len(G) + else: + # For undirected graphs compute average degree + avgDegree = sum(deg for _, deg in G.degree()) / len(G) + # step 1 - initiate all nodes to (0,1) (score, voting ability) + for n in G.nodes(): + vote_rank[n] = [0, 1] + # Repeat steps 1b to 4 until num_seeds are elected. + for _ in range(number_of_nodes): + # step 1b - reset rank + for n in G.nodes(): + vote_rank[n][0] = 0 + # step 2 - vote + for n, nbr in G.edges(): + # In directed graphs nodes only vote for their in-neighbors + vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + vote_rank[nbr][0] += vote_rank[n][1] + for n in influential_nodes: + vote_rank[n][0] = 0 + # step 3 - select top node + n = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[n][0] == 0: + return influential_nodes + influential_nodes.append(n) + # weaken the selected node + vote_rank[n] = [0, 0] + # step 4 - update voterank properties + for _, nbr in G.edges(n): + vote_rank[nbr][1] -= 1 / avgDegree + vote_rank[nbr][1] = max(vote_rank[nbr][1], 0) + return influential_nodes diff --git a/phivenv/Lib/site-packages/networkx/algorithms/chains.py b/phivenv/Lib/site-packages/networkx/algorithms/chains.py new file mode 100644 index 0000000000000000000000000000000000000000..289bc1c3dd4d0ce91f6c6393e91c812b85837f22 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/chains.py @@ -0,0 +1,172 @@ +"""Functions for finding chains in a graph.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["chain_decomposition"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def chain_decomposition(G, root=None): + """Returns the chain decomposition of a graph. + + The *chain decomposition* of a graph with respect a depth-first + search tree is a set of cycles or paths derived from the set of + fundamental cycles of the tree in the following manner. Consider + each fundamental cycle with respect to the given tree, represented + as a list of edges beginning with the nontree edge oriented away + from the root of the tree. For each fundamental cycle, if it + overlaps with any previous fundamental cycle, just take the initial + non-overlapping segment, which is a path instead of a cycle. Each + cycle or path is called a *chain*. For more information, see [1]_. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the chain + decomposition for the connected component containing this node + will be returned. This node indicates the root of the depth-first + search tree. + + Yields + ------ + chain : list + A list of edges representing a chain. There is no guarantee on + the orientation of the edges in each chain (for example, if a + chain includes the edge joining nodes 1 and 2, the chain may + include either (1, 2) or (2, 1)). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.chain_decomposition(G)) + [[(4, 5), (5, 3), (3, 4)]] + + Notes + ----- + The worst-case running time of this implementation is linear in the + number of nodes and number of edges [1]_. + + References + ---------- + .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex- + and 2-edge-connectivity." *Information Processing Letters*, + 113, 241–244. Elsevier. + + """ + + def _dfs_cycle_forest(G, root=None): + """Builds a directed graph composed of cycles from the given graph. + + `G` is an undirected simple graph. `root` is a node in the graph + from which the depth-first search is started. + + This function returns both the depth-first search cycle graph + (as a :class:`~networkx.DiGraph`) and the list of nodes in + depth-first preorder. The depth-first search cycle graph is a + directed graph whose edges are the edges of `G` oriented toward + the root if the edge is a tree edge and away from the root if + the edge is a non-tree edge. If `root` is not specified, this + performs a depth-first search on each connected component of `G` + and returns a directed forest instead. + + If `root` is not in the graph, this raises :exc:`KeyError`. + + """ + # Create a directed graph from the depth-first search tree with + # root node `root` in which tree edges are directed toward the + # root and nontree edges are directed away from the root. For + # each node with an incident nontree edge, this creates a + # directed cycle starting with the nontree edge and returning to + # that node. + # + # The `parent` node attribute stores the parent of each node in + # the DFS tree. The `nontree` edge attribute indicates whether + # the edge is a tree edge or a nontree edge. + # + # We also store the order of the nodes found in the depth-first + # search in the `nodes` list. + H = nx.DiGraph() + nodes = [] + for u, v, d in nx.dfs_labeled_edges(G, source=root): + if d == "forward": + # `dfs_labeled_edges()` yields (root, root, 'forward') + # if it is beginning the search on a new connected + # component. + if u == v: + H.add_node(v, parent=None) + nodes.append(v) + else: + H.add_node(v, parent=u) + H.add_edge(v, u, nontree=False) + nodes.append(v) + # `dfs_labeled_edges` considers nontree edges in both + # orientations, so we need to not add the edge if it its + # other orientation has been added. + elif d == "nontree" and v not in H[u]: + H.add_edge(v, u, nontree=True) + else: + # Do nothing on 'reverse' edges; we only care about + # forward and nontree edges. + pass + return H, nodes + + def _build_chain(G, u, v, visited): + """Generate the chain starting from the given nontree edge. + + `G` is a DFS cycle graph as constructed by + :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge + that begins a chain. `visited` is a set representing the nodes + in `G` that have already been visited. + + This function yields the edges in an initial segment of the + fundamental cycle of `G` starting with the nontree edge (`u`, + `v`) that includes all the edges up until the first node that + appears in `visited`. The tree edges are given by the 'parent' + node attribute. The `visited` set is updated to add each node in + an edge yielded by this function. + + """ + while v not in visited: + yield u, v + visited.add(v) + u, v = v, G.nodes[v]["parent"] + yield u, v + + # Check if the root is in the graph G. If not, raise NodeNotFound + if root is not None and root not in G: + raise nx.NodeNotFound(f"Root node {root} is not in graph") + + # Create a directed version of H that has the DFS edges directed + # toward the root and the nontree edges directed away from the root + # (in each connected component). + H, nodes = _dfs_cycle_forest(G, root) + + # Visit the nodes again in DFS order. For each node, and for each + # nontree edge leaving that node, compute the fundamental cycle for + # that nontree edge starting with that edge. If the fundamental + # cycle overlaps with any visited nodes, just take the prefix of the + # cycle up to the point of visited nodes. + # + # We repeat this process for each connected component (implicitly, + # since `nodes` already has a list of the nodes grouped by connected + # component). + visited = set() + for u in nodes: + visited.add(u) + # For each nontree edge going out of node u... + edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d) + for u, v in edges: + # Create the cycle or cycle prefix starting with the + # nontree edge. + chain = list(_build_chain(H, u, v, visited)) + yield chain diff --git a/phivenv/Lib/site-packages/networkx/algorithms/chordal.py b/phivenv/Lib/site-packages/networkx/algorithms/chordal.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa5679831768aad26db2dc53b40f4834186ceae --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/chordal.py @@ -0,0 +1,440 @@ +""" +Algorithms for chordal graphs. + +A graph is chordal if every cycle of length at least 4 has a chord +(an edge joining two nodes not adjacent in the cycle). +https://en.wikipedia.org/wiki/Chordal_graph +""" +import sys + +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_chordal", + "find_induced_nodes", + "chordal_graph_cliques", + "chordal_graph_treewidth", + "NetworkXTreewidthBoundExceeded", + "complete_to_chordal_graph", +] + + +class NetworkXTreewidthBoundExceeded(nx.NetworkXException): + """Exception raised when a treewidth bound has been provided and it has + been exceeded""" + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_chordal(G): + """Checks whether G is a chordal graph. + + A graph is chordal if every cycle of length at least 4 has a chord + (an edge joining two nodes not adjacent in the cycle). + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + chordal : bool + True if G is a chordal graph and False otherwise. + + Raises + ------ + NetworkXNotImplemented + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... ] + >>> G = nx.Graph(e) + >>> nx.is_chordal(G) + True + + Notes + ----- + The routine tries to go through every node following maximum cardinality + search. It returns False when it finds that the separator for any node + is not a clique. Based on the algorithms in [1]_. + + Self loops are ignored. + + References + ---------- + .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms + to test chordality of graphs, test acyclicity of hypergraphs, and + selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984), + pp. 566–579. + """ + if len(G.nodes) <= 3: + return True + return len(_find_chordality_breaker(G)) == 0 + + +@nx._dispatch +def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): + """Returns the set of induced nodes in the path from s to t. + + Parameters + ---------- + G : graph + A chordal NetworkX graph + s : node + Source node to look for induced nodes + t : node + Destination node to look for induced nodes + treewidth_bound: float + Maximum treewidth acceptable for the graph H. The search + for induced nodes will end as soon as the treewidth_bound is exceeded. + + Returns + ------- + induced_nodes : Set of nodes + The set of induced nodes in the path from s to t in G + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + If the input graph is an instance of one of these classes, a + :exc:`NetworkXError` is raised. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> G = nx.Graph() + >>> G = nx.generators.classic.path_graph(10) + >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + >>> sorted(induced_nodes) + [1, 2, 3, 4, 5, 6, 7, 8, 9] + + Notes + ----- + G must be a chordal graph and (s,t) an edge that is not in G. + + If a treewidth_bound is provided, the search for induced nodes will end + as soon as the treewidth_bound is exceeded. + + The algorithm is inspired by Algorithm 4 in [1]_. + A formal definition of induced node can also be found on that reference. + + Self Loops are ignored + + References + ---------- + .. [1] Learning Bounded Treewidth Bayesian Networks. + Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. + http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + H = nx.Graph(G) + H.add_edge(s, t) + induced_nodes = set() + triplet = _find_chordality_breaker(H, s, treewidth_bound) + while triplet: + (u, v, w) = triplet + induced_nodes.update(triplet) + for n in triplet: + if n != s: + H.add_edge(s, n) + triplet = _find_chordality_breaker(H, s, treewidth_bound) + if induced_nodes: + # Add t and the second node in the induced path from s to t. + induced_nodes.add(t) + for u in G[s]: + if len(induced_nodes & set(G[u])) == 2: + induced_nodes.add(u) + break + return induced_nodes + + +@nx._dispatch +def chordal_graph_cliques(G): + """Returns all maximal cliques of a chordal graph. + + The algorithm breaks the graph in connected components and performs a + maximum cardinality search in each component to get the cliques. + + Parameters + ---------- + G : graph + A NetworkX graph + + Yields + ------ + frozenset of nodes + Maximal cliques, each of which is a frozenset of + nodes in `G`. The order of cliques is arbitrary. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> cliques = [c for c in chordal_graph_cliques(G)] + >>> cliques[0] + frozenset({1, 2, 3}) + """ + for C in (G.subgraph(c).copy() for c in connected_components(G)): + if C.number_of_nodes() == 1: + if nx.number_of_selfloops(C) > 0: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(C.nodes()) + else: + unnumbered = set(C.nodes()) + v = arbitrary_element(C) + unnumbered.remove(v) + numbered = {v} + clique_wanna_be = {v} + while unnumbered: + v = _max_cardinality_node(C, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + new_clique_wanna_be = set(C.neighbors(v)) & numbered + sg = C.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + new_clique_wanna_be.add(v) + if not new_clique_wanna_be >= clique_wanna_be: + yield frozenset(clique_wanna_be) + clique_wanna_be = new_clique_wanna_be + else: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(clique_wanna_be) + + +@nx._dispatch +def chordal_graph_treewidth(G): + """Returns the treewidth of the chordal graph G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + treewidth : int + The size of the largest clique in the graph minus one. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> nx.chordal_graph_treewidth(G) + 3 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + max_clique = -1 + for clique in nx.chordal_graph_cliques(G): + max_clique = max(max_clique, len(clique)) + return max_clique - 1 + + +def _is_complete_graph(G): + """Returns True if G is a complete graph.""" + if nx.number_of_selfloops(G) > 0: + raise nx.NetworkXError("Self loop found in _is_complete_graph()") + n = G.number_of_nodes() + if n < 2: + return True + e = G.number_of_edges() + max_edges = (n * (n - 1)) / 2 + return e == max_edges + + +def _find_missing_edge(G): + """Given a non-complete graph G, returns a missing edge.""" + nodes = set(G) + for u in G: + missing = nodes - set(list(G[u].keys()) + [u]) + if missing: + return (u, missing.pop()) + + +def _max_cardinality_node(G, choices, wanna_connect): + """Returns a the node in choices that has more connections in G + to nodes in wanna_connect. + """ + max_number = -1 + for x in choices: + number = len([y for y in G[x] if y in wanna_connect]) + if number > max_number: + max_number = number + max_cardinality_node = x + return max_cardinality_node + + +def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize): + """Given a graph G, starts a max cardinality search + (starting from s if s is given and from an arbitrary node otherwise) + trying to find a non-chordal cycle. + + If it does find one, it returns (u,v,w) where u,v,w are the three + nodes that together with s are involved in the cycle. + + It ignores any self loops. + """ + unnumbered = set(G) + if s is None: + s = arbitrary_element(G) + unnumbered.remove(s) + numbered = {s} + current_treewidth = -1 + while unnumbered: # and current_treewidth <= treewidth_bound: + v = _max_cardinality_node(G, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + clique_wanna_be = set(G[v]) & numbered + sg = G.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + # The graph seems to be chordal by now. We update the treewidth + current_treewidth = max(current_treewidth, len(clique_wanna_be)) + if current_treewidth > treewidth_bound: + raise nx.NetworkXTreewidthBoundExceeded( + f"treewidth_bound exceeded: {current_treewidth}" + ) + else: + # sg is not a clique, + # look for an edge that is not included in sg + (u, w) = _find_missing_edge(sg) + return (u, v, w) + return () + + +@not_implemented_for("directed") +@nx._dispatch +def complete_to_chordal_graph(G): + """Return a copy of G completed to a chordal graph + + Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is + called chordal if for each cycle with length bigger than 3, there exist + two non-adjacent nodes connected by an edge (called a chord). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + H : NetworkX graph + The chordal enhancement of G + alpha : Dictionary + The elimination ordering of nodes of G + + Notes + ----- + There are different approaches to calculate the chordal + enhancement of a graph. The algorithm used here is called + MCS-M and gives at least minimal (local) triangulation of graph. Note + that this triangulation is not necessarily a global minimum. + + https://en.wikipedia.org/wiki/Chordal_graph + + References + ---------- + .. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004) + Maximum Cardinality Search for Computing Minimal Triangulations of + Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3. + + Examples + -------- + >>> from networkx.algorithms.chordal import complete_to_chordal_graph + >>> G = nx.wheel_graph(10) + >>> H, alpha = complete_to_chordal_graph(G) + """ + H = G.copy() + alpha = {node: 0 for node in H} + if nx.is_chordal(H): + return H, alpha + chords = set() + weight = {node: 0 for node in H.nodes()} + unnumbered_nodes = list(H.nodes()) + for i in range(len(H.nodes()), 0, -1): + # get the node in unnumbered_nodes with the maximum weight + z = max(unnumbered_nodes, key=lambda node: weight[node]) + unnumbered_nodes.remove(z) + alpha[z] = i + update_nodes = [] + for y in unnumbered_nodes: + if G.has_edge(y, z): + update_nodes.append(y) + else: + # y_weight will be bigger than node weights between y and z + y_weight = weight[y] + lower_nodes = [ + node for node in unnumbered_nodes if weight[node] < y_weight + ] + if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z): + update_nodes.append(y) + chords.add((z, y)) + # during calculation of paths the weights should not be updated + for node in update_nodes: + weight[node] += 1 + H.add_edges_from(chords) + return H, alpha diff --git a/phivenv/Lib/site-packages/networkx/algorithms/clique.py b/phivenv/Lib/site-packages/networkx/algorithms/clique.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd7e81665a1c4ce5ea060ef6397f35ab2ea848c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/clique.py @@ -0,0 +1,753 @@ +"""Functions for finding and manipulating cliques. + +Finding the largest clique in a graph is NP-complete problem, so most of +these algorithms have an exponential running time; for more information, +see the Wikipedia article on the clique problem [1]_. + +.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem + +""" +from collections import defaultdict, deque +from itertools import chain, combinations, islice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "find_cliques", + "find_cliques_recursive", + "make_max_clique_graph", + "make_clique_bipartite", + "node_clique_number", + "number_of_cliques", + "enumerate_all_cliques", + "max_weight_clique", +] + + +@not_implemented_for("directed") +@nx._dispatch +def enumerate_all_cliques(G): + """Returns all cliques in an undirected graph. + + This function returns an iterator over cliques, each of which is a + list of nodes. The iteration is ordered by cardinality of the + cliques: first all cliques of size one, then all cliques of size + two, etc. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + iterator + An iterator over cliques, each of which is a list of nodes in + `G`. The cliques are ordered according to size. + + Notes + ----- + To obtain a list of all cliques, use + `list(enumerate_all_cliques(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph (for example, when the graph is the complete + graph). This function avoids storing all cliques in memory by only + keeping current candidate node lists in memory during its search. + + The implementation is adapted from the algorithm by Zhang, et + al. (2005) [1]_ to output all cliques discovered. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J., + Langston, M.A., Samatova, N.F., + "Genome-Scale Computational Approaches to Memory-Intensive + Applications in Systems Biology". + *Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005 + Conference, pp. 12, 12--18 Nov. 2005. + . + + """ + index = {} + nbrs = {} + for u in G: + index[u] = len(index) + # Neighbors of u that appear after u in the iteration order of G. + nbrs[u] = {v for v in G[u] if v not in index} + + queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G) + # Loop invariants: + # 1. len(base) is nondecreasing. + # 2. (base + cnbrs) is sorted with respect to the iteration order of G. + # 3. cnbrs is a set of common neighbors of nodes in base. + while queue: + base, cnbrs = map(list, queue.popleft()) + yield base + for i, u in enumerate(cnbrs): + # Use generators to reduce memory consumption. + queue.append( + ( + chain(base, [u]), + filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)), + ) + ) + + +@not_implemented_for("directed") +@nx._dispatch +def find_cliques(G, nodes=None): + """Returns all maximal cliques in an undirected graph. + + For each node *n*, a *maximal clique for n* is a largest complete + subgraph containing *n*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is an iterative implementation, so should not + suffer from recursion depth issues. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are returned. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + Examples + -------- + >>> from pprint import pprint # For nice dict formatting + >>> G = nx.karate_club_graph() + >>> sum(1 for c in nx.find_cliques(G)) # The number of maximal cliques in G + 36 + >>> max(nx.find_cliques(G), key=len) # The largest maximal clique in G + [0, 1, 2, 3, 13] + + The size of the largest maximal clique is known as the *clique number* of + the graph, which can be found directly with: + + >>> max(len(c) for c in nx.find_cliques(G)) + 5 + + One can also compute the number of maximal cliques in `G` that contain a given + node. The following produces a dictionary keyed by node whose + values are the number of maximal cliques in `G` that contain the node: + + >>> pprint({n: sum(1 for c in nx.find_cliques(G) if n in c) for n in G}) + {0: 13, + 1: 6, + 2: 7, + 3: 3, + 4: 2, + 5: 3, + 6: 3, + 7: 1, + 8: 3, + 9: 2, + 10: 2, + 11: 1, + 12: 1, + 13: 2, + 14: 1, + 15: 1, + 16: 1, + 17: 1, + 18: 1, + 19: 2, + 20: 1, + 21: 1, + 22: 1, + 23: 3, + 24: 2, + 25: 2, + 26: 1, + 27: 3, + 28: 2, + 29: 2, + 30: 2, + 31: 4, + 32: 9, + 33: 14} + + Or, similarly, the maximal cliques in `G` that contain a given node. + For example, the 4 maximal cliques that contain node 31: + + >>> [c for c in nx.find_cliques(G) if 31 in c] + [[0, 31], [33, 32, 31], [33, 28, 31], [24, 25, 31]] + + See Also + -------- + find_cliques_recursive + A recursive version of the same algorithm. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques(G))`. However, be aware that in the worst-case, + the length of this list can be exponential in the number of nodes in + the graph. This function avoids storing all cliques in memory by + only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It + essentially unrolls the recursion used in the references to avoid + issues of recursion stack depth (for a recursive implementation, see + :func:`find_cliques_recursive`). + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand = set(G) + for node in Q: + if node not in cand: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand &= adj[node] + + if not cand: + yield Q[:] + return + + subg = cand.copy() + stack = [] + Q.append(None) + + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + + try: + while True: + if ext_u: + q = ext_u.pop() + cand.remove(q) + Q[-1] = q + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + stack.append((subg, cand, ext_u)) + Q.append(None) + subg = subg_q + cand = cand_q + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + else: + Q.pop() + subg, cand, ext_u = stack.pop() + except IndexError: + pass + + +# TODO Should this also be not implemented for directed graphs? +@nx._dispatch +def find_cliques_recursive(G, nodes=None): + """Returns all maximal cliques in a graph. + + For each node *v*, a *maximal clique for v* is a largest complete + subgraph containing *v*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is a recursive implementation, so may suffer from + recursion depth issues, but is included for pedagogical reasons. + For a non-recursive implementation, see :func:`find_cliques`. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are yielded. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + See Also + -------- + find_cliques + An iterative version of the same algorithm. See docstring for examples. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques_recursive(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph. This function avoids storing all cliques in memory + by only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a + non-recursive implementation, see :func:`find_cliques`. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return iter([]) + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand_init = set(G) + for node in Q: + if node not in cand_init: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand_init &= adj[node] + + if not cand_init: + return iter([Q]) + + subg_init = cand_init.copy() + + def expand(subg, cand): + u = max(subg, key=lambda u: len(cand & adj[u])) + for q in cand - adj[u]: + cand.remove(q) + Q.append(q) + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + yield from expand(subg_q, cand_q) + Q.pop() + + return expand(subg_init, cand_init) + + +@nx._dispatch +def make_max_clique_graph(G, create_using=None): + """Returns the maximal clique graph of the given graph. + + The nodes of the maximal clique graph of `G` are the cliques of + `G` and an edge joins two cliques if the cliques are not disjoint. + + Parameters + ---------- + G : NetworkX graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A graph whose nodes are the cliques of `G` and whose edges + join two cliques if they are not disjoint. + + Notes + ----- + This function behaves like the following code:: + + import networkx as nx + G = nx.make_clique_bipartite(G) + cliques = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0] + G = nx.bipartite.projected_graph(G, cliques) + G = nx.relabel_nodes(G, {-v: v - 1 for v in G}) + + It should be faster, though, since it skips all the intermediate + steps. + + """ + if create_using is None: + B = G.__class__() + else: + B = nx.empty_graph(0, create_using) + cliques = list(enumerate(set(c) for c in find_cliques(G))) + # Add a numbered node for each clique. + B.add_nodes_from(i for i, c in cliques) + # Join cliques by an edge if they share a node. + clique_pairs = combinations(cliques, 2) + B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2) + return B + + +@nx._dispatch +def make_clique_bipartite(G, fpos=None, create_using=None, name=None): + """Returns the bipartite clique graph corresponding to `G`. + + In the returned bipartite graph, the "bottom" nodes are the nodes of + `G` and the "top" nodes represent the maximal cliques of `G`. + There is an edge from node *v* to clique *C* in the returned graph + if and only if *v* is an element of *C*. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + fpos : bool + If True or not None, the returned graph will have an + additional attribute, `pos`, a dictionary mapping node to + position in the Euclidean plane. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A bipartite graph whose "bottom" set is the nodes of the graph + `G`, whose "top" set is the cliques of `G`, and whose edges + join nodes of `G` to the cliques that contain them. + + The nodes of the graph `G` have the node attribute + 'bipartite' set to 1 and the nodes representing cliques + have the node attribute 'bipartite' set to 0, as is the + convention for bipartite graphs in NetworkX. + + """ + B = nx.empty_graph(0, create_using) + B.clear() + # The "bottom" nodes in the bipartite graph are the nodes of the + # original graph, G. + B.add_nodes_from(G, bipartite=1) + for i, cl in enumerate(find_cliques(G)): + # The "top" nodes in the bipartite graph are the cliques. These + # nodes get negative numbers as labels. + name = -i - 1 + B.add_node(name, bipartite=0) + B.add_edges_from((v, name) for v in cl) + return B + + +@nx._dispatch +def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False): + """Returns the size of the largest maximal clique containing each given node. + + Returns a single or list depending on input nodes. + An optional list of cliques can be input if already computed. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list, optional (default=None) + A list of cliques, each of which is itself a list of nodes. + If not specified, the list of all cliques will be computed + using :func:`find_cliques`. + + Returns + ------- + int or dict + If `nodes` is a single node, returns the size of the + largest maximal clique in `G` containing that node. + Otherwise return a dict keyed by node to the size + of the largest maximal clique containing that node. + + See Also + -------- + find_cliques + find_cliques yields the maximal cliques of G. + It accepts a `nodes` argument which restricts consideration to + maximal cliques containing all the given `nodes`. + The search for the cliques is optimized for `nodes`. + """ + if cliques is None: + if nodes is not None: + # Use ego_graph to decrease size of graph + # check for single node + if nodes in G: + return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes))) + # handle multiple nodes + return { + n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes + } + + # nodes is None--find all cliques + cliques = list(find_cliques(G)) + + # single node requested + if nodes in G: + return max(len(c) for c in cliques if nodes in c) + + # multiple nodes requested + # preprocess all nodes (faster than one at a time for even 2 nodes) + size_for_n = defaultdict(int) + for c in cliques: + size_of_c = len(c) + for n in c: + if size_for_n[n] < size_of_c: + size_for_n[n] = size_of_c + if nodes is None: + return size_for_n + return {n: size_for_n[n] for n in nodes} + + +def number_of_cliques(G, nodes=None, cliques=None): + """Returns the number of maximal cliques for each node. + + Returns a single or list depending on input nodes. + Optional list of cliques can be input if already computed. + """ + if cliques is None: + cliques = list(find_cliques(G)) + + if nodes is None: + nodes = list(G.nodes()) # none, get entire graph + + if not isinstance(nodes, list): # check for a list + v = nodes + # assume it is a single value + numcliq = len([1 for c in cliques if v in c]) + else: + numcliq = {} + for v in nodes: + numcliq[v] = len([1 for c in cliques if v in c]) + return numcliq + + +class MaxWeightClique: + """A class for the maximum weight clique algorithm. + + This class is a helper for the `max_weight_clique` function. The class + should not normally be used directly. + + Parameters + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Attributes + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + node_weights: dict + The weight of each node + incumbent_nodes : list + The nodes of the incumbent clique (the best clique found so far) + incumbent_weight: int + The weight of the incumbent clique + """ + + def __init__(self, G, weight): + self.G = G + self.incumbent_nodes = [] + self.incumbent_weight = 0 + + if weight is None: + self.node_weights = {v: 1 for v in G.nodes()} + else: + for v in G.nodes(): + if weight not in G.nodes[v]: + errmsg = f"Node {v!r} does not have the requested weight field." + raise KeyError(errmsg) + if not isinstance(G.nodes[v][weight], int): + errmsg = f"The {weight!r} field of node {v!r} is not an integer." + raise ValueError(errmsg) + self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()} + + def update_incumbent_if_improved(self, C, C_weight): + """Update the incumbent if the node set C has greater weight. + + C is assumed to be a clique. + """ + if C_weight > self.incumbent_weight: + self.incumbent_nodes = C[:] + self.incumbent_weight = C_weight + + def greedily_find_independent_set(self, P): + """Greedily find an independent set of nodes from a set of + nodes P.""" + independent_set = [] + P = P[:] + while P: + v = P[0] + independent_set.append(v) + P = [w for w in P if v != w and not self.G.has_edge(v, w)] + return independent_set + + def find_branching_nodes(self, P, target): + """Find a set of nodes to branch on.""" + residual_wt = {v: self.node_weights[v] for v in P} + total_wt = 0 + P = P[:] + while P: + independent_set = self.greedily_find_independent_set(P) + min_wt_in_class = min(residual_wt[v] for v in independent_set) + total_wt += min_wt_in_class + if total_wt > target: + break + for v in independent_set: + residual_wt[v] -= min_wt_in_class + P = [v for v in P if residual_wt[v] != 0] + return P + + def expand(self, C, C_weight, P): + """Look for the best clique that contains all the nodes in C and zero or + more of the nodes in P, backtracking if it can be shown that no such + clique has greater weight than the incumbent. + """ + self.update_incumbent_if_improved(C, C_weight) + branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight) + while branching_nodes: + v = branching_nodes.pop() + P.remove(v) + new_C = C + [v] + new_C_weight = C_weight + self.node_weights[v] + new_P = [w for w in P if self.G.has_edge(v, w)] + self.expand(new_C, new_C_weight, new_P) + + def find_max_weight_clique(self): + """Find a maximum weight clique.""" + # Sort nodes in reverse order of degree for speed + nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True) + nodes = [v for v in nodes if self.node_weights[v] > 0] + self.expand([], 0, nodes) + + +@not_implemented_for("directed") +@nx._dispatch(node_attrs="weight") +def max_weight_clique(G, weight="weight"): + """Find a maximum weight clique in G. + + A *clique* in a graph is a set of nodes such that every two distinct nodes + are adjacent. The *weight* of a clique is the sum of the weights of its + nodes. A *maximum weight clique* of graph G is a clique C in G such that + no clique in G has weight greater than the weight of C. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Returns + ------- + clique : list + the nodes of a maximum weight clique + weight : int + the weight of a maximum weight clique + + Notes + ----- + The implementation is recursive, and therefore it may run into recursion + depth issues if G contains a clique whose number of nodes is close to the + recursion depth limit. + + At each search node, the algorithm greedily constructs a weighted + independent set cover of part of the graph in order to find a small set of + nodes on which to branch. The algorithm is very similar to the algorithm + of Tavares et al. [1]_, other than the fact that the NetworkX version does + not use bitsets. This style of algorithm for maximum weight clique (and + maximum weight independent set, which is the same problem but on the + complement graph) has a decades-long history. See Algorithm B of Warren + and Hicks [2]_ and the references in that paper. + + References + ---------- + .. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um + algoritmo de branch and bound para o problema da clique máxima + ponderada. Proceedings of XLVII SBPO 1 (2015). + + .. [2] Warren, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound + for the Maximum Weight Independent Set Problem. Technical Report, + Texas A&M University (2016). + """ + + mwc = MaxWeightClique(G, weight) + mwc.find_max_weight_clique() + return mwc.incumbent_nodes, mwc.incumbent_weight diff --git a/phivenv/Lib/site-packages/networkx/algorithms/cluster.py b/phivenv/Lib/site-packages/networkx/algorithms/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..0500852a7ce87439058cc627d5f6256b4ba09ed1 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/cluster.py @@ -0,0 +1,605 @@ +"""Algorithms to characterize the number of triangles in a graph.""" + +from collections import Counter +from itertools import chain, combinations + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "triangles", + "average_clustering", + "clustering", + "transitivity", + "square_clustering", + "generalized_degree", +] + + +@not_implemented_for("directed") +@nx._dispatch +def triangles(G, nodes=None): + """Compute the number of triangles. + + Finds the number of triangles that include a node as one vertex. + + Parameters + ---------- + G : graph + A networkx graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + Returns + ------- + out : dict or int + If `nodes` is a container of nodes, returns number of triangles keyed by node (dict). + If `nodes` is a specific node, returns number of triangles for the node (int). + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.triangles(G, 0)) + 6 + >>> print(nx.triangles(G)) + {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} + >>> print(list(nx.triangles(G, [0, 1]).values())) + [6, 6] + + Notes + ----- + Self loops are ignored. + + """ + if nodes is not None: + # If `nodes` represents a single node, return only its number of triangles + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[2] // 2 + + # if `nodes` is a container of nodes, then return a + # dictionary mapping node to number of triangles. + return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)} + + # if nodes is None, then compute triangles for the complete graph + + # dict used to avoid visiting the same nodes twice + # this allows calculating/counting each triangle only once + later_neighbors = {} + + # iterate over the nodes in a graph + for node, neighbors in G.adjacency(): + later_neighbors[node] = { + n for n in neighbors if n not in later_neighbors and n != node + } + + # instantiate Counter for each node to include isolated nodes + # add 1 to the count if a nodes neighbor's neighbor is also a neighbor + triangle_counts = Counter(dict.fromkeys(G, 0)) + for node1, neighbors in later_neighbors.items(): + for node2 in neighbors: + third_nodes = neighbors & later_neighbors[node2] + m = len(third_nodes) + triangle_counts[node1] += m + triangle_counts[node2] += m + triangle_counts.update(third_nodes) + + return dict(triangle_counts) + + +@not_implemented_for("multigraph") +def _triangles_and_degree_iter(G, nodes=None): + """Return an iterator of (node, degree, triangles, generalized degree). + + This double counts triangles so you may want to divide by 2. + See degree(), triangles() and generalized_degree() for definitions + and details. + + """ + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + for v, v_nbrs in nodes_nbrs: + vs = set(v_nbrs) - {v} + gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs) + ntriangles = sum(k * val for k, val in gen_degree.items()) + yield (v, len(vs), ntriangles, gen_degree) + + +@not_implemented_for("multigraph") +def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of (node, degree, weighted_triangles). + + Used for weighted clustering. + Note: this returns the geometric average weight of edges in the triangle. + Also, each triangle is counted twice (each direction). + So you may want to divide by 2. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, nbrs in nodes_nbrs: + inbrs = set(nbrs) - {i} + weighted_triangles = 0 + seen = set() + for j in inbrs: + seen.add(j) + # This avoids counting twice -- we double at the end. + jnbrs = set(G[j]) - seen + # Only compute the edge weight once, before the inner inner + # loop. + wij = wt(i, j) + weighted_triangles += sum( + np.cbrt([(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs]) + ) + yield (i, len(inbrs), 2 * weighted_triangles) + + +@not_implemented_for("multigraph") +def _directed_triangles_and_degree_iter(G, nodes=None): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_triangles). + + Used for directed clustering. + Note that unlike `_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in chain(ipreds, isuccs): + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + 1 + for k in chain( + (ipreds & jpreds), + (ipreds & jsuccs), + (isuccs & jpreds), + (isuccs & jsuccs), + ) + ) + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@not_implemented_for("multigraph") +def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_weighted_triangles). + + Used for directed weighted clustering. + Note that unlike `_weighted_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in ipreds: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + for j in isuccs: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@nx._dispatch(edge_attrs="weight") +def average_clustering(G, nodes=None, weight=None, count_zeros=True): + r"""Compute the average clustering coefficient for the graph G. + + The clustering coefficient for the graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where :math:`n` is the number of nodes in `G`. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute average clustering for nodes in this container. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + count_zeros : bool + If False include only the nodes with nonzero clustering in the average. + + Returns + ------- + avg : float + Average clustering + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.average_clustering(G)) + 1.0 + + Notes + ----- + This is a space saving routine; it might be faster + to use the clustering function to get a list and then take the average. + + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated + nodes and leafs on clustering measures for small-world networks. + https://arxiv.org/abs/0802.2512 + """ + c = clustering(G, nodes, weight=weight).values() + if not count_zeros: + c = [v for v in c if abs(v) > 0] + return sum(c) / len(c) + + +@nx._dispatch(edge_attrs="weight") +def clustering(G, nodes=None, weight=None): + r"""Compute the clustering coefficient for nodes. + + For unweighted graphs, the clustering of a node :math:`u` + is the fraction of possible triangles through that node that exist, + + .. math:: + + c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, + + where :math:`T(u)` is the number of triangles through node :math:`u` and + :math:`deg(u)` is the degree of :math:`u`. + + For weighted graphs, there are several ways to define clustering [1]_. + the one used here is defined + as the geometric average of the subgraph edge weights [2]_, + + .. math:: + + c_u = \frac{1}{deg(u)(deg(u)-1))} + \sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. + + The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight + in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`. + + The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`. + + Additionally, this weighted definition has been generalized to support negative edge weights [3]_. + + For directed graphs, the clustering is similarly defined as the fraction + of all possible directed triangles or geometric average of the subgraph + edge weights for unweighted and weighted directed graph respectively [4]_. + + .. math:: + + c_u = \frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u))}, + + where :math:`T(u)` is the number of directed triangles through node + :math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of + :math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of + :math:`u`. + + + Parameters + ---------- + G : graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + out : float, or dictionary + Clustering coefficient at specified nodes + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.clustering(G, 0)) + 1.0 + >>> print(nx.clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Intensity and coherence of motifs in weighted complex + networks by J. P. Onnela, J. Saramäki, J. Kertész, and K. Kaski, + Physical Review E, 71(6), 065103 (2005). + .. [3] Generalization of Clustering Coefficients to Signed Correlation Networks + by G. Costantini and M. Perugini, PloS one, 9(2), e88669 (2014). + .. [4] Clustering in complex directed networks by G. Fagiolo, + Physical Review E, 76(2), 026107 (2007). + """ + if G.is_directed(): + if weight is not None: + td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + td_iter = _directed_triangles_and_degree_iter(G, nodes) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + # The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T + if weight is not None: + td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter} + else: + td_iter = _triangles_and_degree_iter(G, nodes) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter} + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clusterc[nodes] + return clusterc + + +@nx._dispatch +def transitivity(G): + r"""Compute graph transitivity, the fraction of all possible triangles + present in G. + + Possible triangles are identified by the number of "triads" + (two edges with a shared vertex). + + The transitivity is + + .. math:: + + T = 3\frac{\#triangles}{\#triads}. + + Parameters + ---------- + G : graph + + Returns + ------- + out : float + Transitivity + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.transitivity(G)) + 1.0 + """ + triangles_contri = [ + (t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G) + ] + # If the graph is empty + if len(triangles_contri) == 0: + return 0 + triangles, contri = map(sum, zip(*triangles_contri)) + return 0 if triangles == 0 else triangles / contri + + +@nx._dispatch +def square_clustering(G, nodes=None): + r"""Compute the squares clustering coefficient for nodes. + + For each node return the fraction of possible squares that exist at + the node [1]_ + + .. math:: + C_4(v) = \frac{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, + + where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and + :math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u - + (1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where + :math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0 + otherwise. [2]_ + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute clustering for nodes in this container. + + Returns + ------- + c4 : dictionary + A dictionary keyed by node with the square clustering coefficient value. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.square_clustering(G, 0)) + 1.0 + >>> print(nx.square_clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + While :math:`C_3(v)` (triangle clustering) gives the probability that + two neighbors of node v are connected with each other, :math:`C_4(v)` is + the probability that two neighbors of node v share a common + neighbor different from v. This algorithm can be applied to both + bipartite and unipartite networks. + + References + ---------- + .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 + Cycles and clustering in bipartite networks. + Physical Review E (72) 056127. + .. [2] Zhang, Peng et al. Clustering Coefficient and Community Structure of + Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875. + https://arxiv.org/abs/0710.0117v1 + """ + if nodes is None: + node_iter = G + else: + node_iter = G.nbunch_iter(nodes) + clustering = {} + for v in node_iter: + clustering[v] = 0 + potential = 0 + for u, w in combinations(G[v], 2): + squares = len((set(G[u]) & set(G[w])) - {v}) + clustering[v] += squares + degm = squares + 1 + if w in G[u]: + degm += 1 + potential += (len(G[u]) - degm) + (len(G[w]) - degm) + squares + if potential > 0: + clustering[v] /= potential + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clustering[nodes] + return clustering + + +@not_implemented_for("directed") +@nx._dispatch +def generalized_degree(G, nodes=None): + r"""Compute the generalized degree for nodes. + + For each node, the generalized degree shows how many edges of given + triangle multiplicity the node is connected to. The triangle multiplicity + of an edge is the number of triangles an edge participates in. The + generalized degree of node :math:`i` can be written as a vector + :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where + :math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that + participate in :math:`j` triangles. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute the generalized degree for nodes in this container. + + Returns + ------- + out : Counter, or dictionary of Counters + Generalized degree of specified nodes. The Counter is keyed by edge + triangle multiplicity. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.generalized_degree(G, 0)) + Counter({3: 4}) + >>> print(nx.generalized_degree(G)) + {0: Counter({3: 4}), 1: Counter({3: 4}), 2: Counter({3: 4}), 3: Counter({3: 4}), 4: Counter({3: 4})} + + To recover the number of triangles attached to a node: + + >>> k1 = nx.generalized_degree(G, 0) + >>> sum([k * v for k, v in k1.items()]) / 2 == nx.triangles(G, 0) + True + + Notes + ----- + In a network of N nodes, the highest triangle multiplicity an edge can have + is N-2. + + The return value does not include a `zero` entry if no edges of a + particular triangle multiplicity are present. + + The number of triangles node :math:`i` is attached to can be recovered from + the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, + k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`. + + References + ---------- + .. [1] Networks with arbitrary edge multiplicities by V. Zlatić, + D. Garlaschelli and G. Caldarelli, EPL (Europhysics Letters), + Volume 97, Number 2 (2012). + https://iopscience.iop.org/article/10.1209/0295-5075/97/28005 + """ + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[3] + return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39381d9f163a5400f362b91a89215bfc915a8022 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.coloring.greedy_coloring import * +from networkx.algorithms.coloring.equitable_coloring import equitable_color + +__all__ = ["greedy_color", "equitable_color"] diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eda633e580c5316217986b504d525648de421841 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f07eeb96eb696543488962c837c7de19c0969ca2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9ac83d4cb25e1aff9ae44cba9888784cbc6c236 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py b/phivenv/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..af1fb5a7e7c20392a82673406b923e89b6e525f2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py @@ -0,0 +1,505 @@ +""" +Equitable coloring of graphs with bounded degree. +""" + +from collections import defaultdict + +import networkx as nx + +__all__ = ["equitable_color"] + + +@nx._dispatch +def is_coloring(G, coloring): + """Determine if the coloring is a valid coloring for the graph G.""" + # Verify that the coloring is valid. + return all(coloring[s] != coloring[d] for s, d in G.edges) + + +@nx._dispatch +def is_equitable(G, coloring, num_colors=None): + """Determines if the coloring is valid and equitable for the graph G.""" + + if not is_coloring(G, coloring): + return False + + # Verify whether it is equitable. + color_set_size = defaultdict(int) + for color in coloring.values(): + color_set_size[color] += 1 + + if num_colors is not None: + for color in range(num_colors): + if color not in color_set_size: + # These colors do not have any vertices attached to them. + color_set_size[color] = 0 + + # If there are more than 2 distinct values, the coloring cannot be equitable + all_set_sizes = set(color_set_size.values()) + if len(all_set_sizes) == 0 and num_colors is None: # Was an empty graph + return True + elif len(all_set_sizes) == 1: + return True + elif len(all_set_sizes) == 2: + a, b = list(all_set_sizes) + return abs(a - b) <= 1 + else: # len(all_set_sizes) > 2: + return False + + +def make_C_from_F(F): + C = defaultdict(list) + for node, color in F.items(): + C[color].append(node) + + return C + + +def make_N_from_L_C(L, C): + nodes = L.keys() + colors = C.keys() + return { + (node, color): sum(1 for v in L[node] if v in C[color]) + for node in nodes + for color in colors + } + + +def make_H_from_C_N(C, N): + return { + (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) for c1 in C for c2 in C + } + + +def change_color(u, X, Y, N, H, F, C, L): + """Change the color of 'u' from X to Y and update N, H, F, C.""" + assert F[u] == X and X != Y + + # Change the class of 'u' from X to Y + F[u] = Y + + for k in C: + # 'u' witnesses an edge from k -> Y instead of from k -> X now. + if N[u, k] == 0: + H[(X, k)] -= 1 + H[(Y, k)] += 1 + + for v in L[u]: + # 'v' has lost a neighbor in X and gained one in Y + N[(v, X)] -= 1 + N[(v, Y)] += 1 + + if N[(v, X)] == 0: + # 'v' witnesses F[v] -> X + H[(F[v], X)] += 1 + + if N[(v, Y)] == 1: + # 'v' no longer witnesses F[v] -> Y + H[(F[v], Y)] -= 1 + + C[X].remove(u) + C[Y].append(u) + + +def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L): + """Move witness along a path from src_color to dst_color.""" + X = src_color + while X != dst_color: + Y = T_cal[X] + # Move _any_ witness from X to Y = T_cal[X] + w = next(x for x in C[X] if N[(x, Y)] == 0) + change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L) + X = Y + + +@nx._dispatch +def pad_graph(G, num_colors): + """Add a disconnected complete clique K_p such that the number of nodes in + the graph becomes a multiple of `num_colors`. + + Assumes that the graph's nodes are labelled using integers. + + Returns the number of nodes with each color. + """ + + n_ = len(G) + r = num_colors - 1 + + # Ensure that the number of nodes in G is a multiple of (r + 1) + s = n_ // (r + 1) + if n_ != s * (r + 1): + p = (r + 1) - n_ % (r + 1) + s += 1 + + # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p] + K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)}) + G.add_edges_from(K.edges) + + return s + + +def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None): + """Procedure P as described in the paper.""" + + if excluded_colors is None: + excluded_colors = set() + + A_cal = set() + T_cal = {} + R_cal = [] + + # BFS to determine A_cal, i.e. colors reachable from V- + reachable = [V_minus] + marked = set(reachable) + idx = 0 + + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + A_cal.add(pop) + R_cal.append(pop) + + # TODO: Checking whether a color has been visited can be made faster by + # using a look-up table instead of testing for membership in a set by a + # logarithmic factor. + next_layer = [] + for k in C: + if ( + H[(k, pop)] > 0 + and k not in A_cal + and k not in excluded_colors + and k not in marked + ): + next_layer.append(k) + + for dst in next_layer: + # Record that `dst` can reach `pop` + T_cal[dst] = pop + + marked.update(next_layer) + reachable.extend(next_layer) + + # Variables for the algorithm + b = len(C) - len(A_cal) + + if V_plus in A_cal: + # Easy case: V+ is in A_cal + # Move one node from V+ to V- using T_cal to find the parents. + move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L) + else: + # If there is a solo edge, we can resolve the situation by + # moving witnesses from B to A, making G[A] equitable and then + # recursively balancing G[B - w] with a different V_minus and + # but the same V_plus. + + A_0 = set() + A_cal_0 = set() + num_terminal_sets_found = 0 + made_equitable = False + + for W_1 in R_cal[::-1]: + for v in C[W_1]: + X = None + + for U in C: + if N[(v, U)] == 0 and U in A_cal and U != W_1: + X = U + + # v does not witness an edge in H[A_cal] + if X is None: + continue + + for U in C: + # Note: Departing from the paper here. + if N[(v, U)] >= 1 and U not in A_cal: + X_prime = U + w = v + + try: + # Finding the solo neighbor of w in X_prime + y = next( + node + for node in L[w] + if F[node] == X_prime and N[(node, W_1)] == 1 + ) + except StopIteration: + pass + else: + W = W_1 + + # Move w from W to X, now X has one extra node. + change_color(w, W, X, N=N, H=H, F=F, C=C, L=L) + + # Move witness from X to V_minus, making the coloring + # equitable. + move_witnesses( + src_color=X, + dst_color=V_minus, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal, + L=L, + ) + + # Move y from X_prime to W, making W the correct size. + change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L) + + # Then call the procedure on G[B - y] + procedure_P( + V_minus=X_prime, + V_plus=V_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors.union(A_cal), + ) + made_equitable = True + break + + if made_equitable: + break + else: + # No node in W_1 was found such that + # it had a solo-neighbor. + A_cal_0.add(W_1) + A_0.update(C[W_1]) + num_terminal_sets_found += 1 + + if num_terminal_sets_found == b: + # Otherwise, construct the maximal independent set and find + # a pair of z_1, z_2 as in Case II. + + # BFS to determine B_cal': the set of colors reachable from V+ + B_cal_prime = set() + T_cal_prime = {} + + reachable = [V_plus] + marked = set(reachable) + idx = 0 + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + B_cal_prime.add(pop) + + # No need to check for excluded_colors here because + # they only exclude colors from A_cal + next_layer = [ + k + for k in C + if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked + ] + + for dst in next_layer: + T_cal_prime[pop] = dst + + marked.update(next_layer) + reachable.extend(next_layer) + + # Construct the independent set of G[B'] + I_set = set() + I_covered = set() + W_covering = {} + + B_prime = [node for k in B_cal_prime for node in C[k]] + + # Add the nodes in V_plus to I first. + for z in C[V_plus] + B_prime: + if z in I_covered or F[z] not in B_cal_prime: + continue + + I_set.add(z) + I_covered.add(z) + I_covered.update(list(L[z])) + + for w in L[z]: + if F[w] in A_cal_0 and N[(z, F[w])] == 1: + if w not in W_covering: + W_covering[w] = z + else: + # Found z1, z2 which have the same solo + # neighbor in some W + z_1 = W_covering[w] + # z_2 = z + + Z = F[z_1] + W = F[w] + + # shift nodes along W, V- + move_witnesses( + W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L + ) + + # shift nodes along V+ to Z + move_witnesses( + V_plus, + Z, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal_prime, + L=L, + ) + + # change color of z_1 to W + change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L) + + # change color of w to some color in B_cal + W_plus = next( + k for k in C if N[(w, k)] == 0 and k not in A_cal + ) + change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L) + + # recurse with G[B \cup W*] + excluded_colors.update( + [k for k in C if k != W and k not in B_cal_prime] + ) + procedure_P( + V_minus=W, + V_plus=W_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors, + ) + + made_equitable = True + break + + if made_equitable: + break + else: + assert False, ( + "Must find a w which is the solo neighbor " + "of two vertices in B_cal_prime." + ) + + if made_equitable: + break + + +@nx._dispatch +def equitable_color(G, num_colors): + """Provides an equitable coloring for nodes of `G`. + + Attempts to color a graph using `num_colors` colors, where no neighbors of + a node can have same color as the node itself and the number of nodes with + each color differ by at most 1. `num_colors` must be greater than the + maximum degree of `G`. The algorithm is described in [1]_ and has + complexity O(num_colors * n**2). + + Parameters + ---------- + G : networkX graph + The nodes of this graph will be colored. + + num_colors : number of colors to use + This number must be at least one more than the maximum degree of nodes + in the graph. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.coloring.equitable_color(G, num_colors=3) # doctest: +SKIP + {0: 2, 1: 1, 2: 2, 3: 0} + + Raises + ------ + NetworkXAlgorithmError + If `num_colors` is not at least the maximum degree of the graph `G` + + References + ---------- + .. [1] Kierstead, H. A., Kostochka, A. V., Mydlarz, M., & Szemerédi, E. + (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2), + 217-224. + """ + + # Map nodes to integers for simplicity later. + nodes_to_int = {} + int_to_nodes = {} + + for idx, node in enumerate(G.nodes): + nodes_to_int[node] = idx + int_to_nodes[idx] = node + + G = nx.relabel_nodes(G, nodes_to_int, copy=True) + + # Basic graph statistics and sanity check. + if len(G.nodes) > 0: + r_ = max(G.degree(node) for node in G.nodes) + else: + r_ = 0 + + if r_ >= num_colors: + raise nx.NetworkXAlgorithmError( + f"Graph has maximum degree {r_}, needs " + f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring." + ) + + # Ensure that the number of nodes in G is a multiple of (r + 1) + pad_graph(G, num_colors) + + # Starting the algorithm. + # L = {node: list(G.neighbors(node)) for node in G.nodes} + L_ = {node: [] for node in G.nodes} + + # Arbitrary equitable allocation of colors to nodes. + F = {node: idx % num_colors for idx, node in enumerate(G.nodes)} + + C = make_C_from_F(F) + + # The neighborhood is empty initially. + N = make_N_from_L_C(L_, C) + + # Currently all nodes witness all edges. + H = make_H_from_C_N(C, N) + + # Start of algorithm. + edges_seen = set() + + for u in sorted(G.nodes): + for v in sorted(G.neighbors(u)): + # Do not double count edges if (v, u) has already been seen. + if (v, u) in edges_seen: + continue + + edges_seen.add((u, v)) + + L_[u].append(v) + L_[v].append(u) + + N[(u, F[v])] += 1 + N[(v, F[u])] += 1 + + if F[u] != F[v]: + # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]? + if N[(u, F[v])] == 1: + H[F[u], F[v]] -= 1 # u cannot witness an edge between F[u], F[v] + + if N[(v, F[u])] == 1: + H[F[v], F[u]] -= 1 # v cannot witness an edge between F[v], F[u] + + if N[(u, F[u])] != 0: + # Find the first color where 'u' does not have any neighbors. + Y = next(k for k in C if N[(u, k)] == 0) + X = F[u] + change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_) + + # Procedure P + procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_) + + return {int_to_nodes[x]: F[x] for x in int_to_nodes} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py b/phivenv/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..170b2275d2960c2c2b35a29ad162d0f519994df8 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py @@ -0,0 +1,572 @@ +""" +Greedy graph coloring using various strategies. +""" +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.utils import arbitrary_element, py_random_state + +__all__ = [ + "greedy_color", + "strategy_connected_sequential", + "strategy_connected_sequential_bfs", + "strategy_connected_sequential_dfs", + "strategy_independent_set", + "strategy_largest_first", + "strategy_random_sequential", + "strategy_saturation_largest_first", + "strategy_smallest_last", +] + + +@nx._dispatch +def strategy_largest_first(G, colors): + """Returns a list of the nodes of ``G`` in decreasing order by + degree. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return sorted(G, key=G.degree, reverse=True) + + +@py_random_state(2) +@nx._dispatch +def strategy_random_sequential(G, colors, seed=None): + """Returns a random permutation of the nodes of ``G`` as a list. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + nodes = list(G) + seed.shuffle(nodes) + return nodes + + +@nx._dispatch +def strategy_smallest_last(G, colors): + """Returns a deque of the nodes of ``G``, "smallest" last. + + Specifically, the degrees of each node are tracked in a bucket queue. + From this, the node of minimum degree is repeatedly popped from the + graph, updating its neighbors' degrees. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + This implementation of the strategy runs in $O(n + m)$ time + (ignoring polylogarithmic factors), where $n$ is the number of nodes + and $m$ is the number of edges. + + This strategy is related to :func:`strategy_independent_set`: if we + interpret each node removed as an independent set of size one, then + this strategy chooses an independent set of size one instead of a + maximal independent set. + + """ + H = G.copy() + result = deque() + + # Build initial degree list (i.e. the bucket queue data structure) + degrees = defaultdict(set) # set(), for fast random-access removals + lbound = float("inf") + for node, d in H.degree(): + degrees[d].add(node) + lbound = min(lbound, d) # Lower bound on min-degree. + + def find_min_degree(): + # Save time by starting the iterator at `lbound`, not 0. + # The value that we find will be our new `lbound`, which we set later. + return next(d for d in itertools.count(lbound) if d in degrees) + + for _ in G: + # Pop a min-degree node and add it to the list. + min_degree = find_min_degree() + u = degrees[min_degree].pop() + if not degrees[min_degree]: # Clean up the degree list. + del degrees[min_degree] + result.appendleft(u) + + # Update degrees of removed node's neighbors. + for v in H[u]: + degree = H.degree(v) + degrees[degree].remove(v) + if not degrees[degree]: # Clean up the degree list. + del degrees[degree] + degrees[degree - 1].add(v) + + # Finally, remove the node. + H.remove_node(u) + lbound = min_degree - 1 # Subtract 1 in case of tied neighbors. + + return result + + +def _maximal_independent_set(G): + """Returns a maximal independent set of nodes in ``G`` by repeatedly + choosing an independent node of minimum degree (with respect to the + subgraph of unchosen nodes). + + """ + result = set() + remaining = set(G) + while remaining: + G = G.subgraph(remaining) + v = min(remaining, key=G.degree) + result.add(v) + remaining -= set(G[v]) | {v} + return result + + +@nx._dispatch +def strategy_independent_set(G, colors): + """Uses a greedy independent set removal strategy to determine the + colors. + + This function updates ``colors`` **in-place** and return ``None``, + unlike the other strategy functions in this module. + + This algorithm repeatedly finds and removes a maximal independent + set, assigning each node in the set an unused color. + + ``G`` is a NetworkX graph. + + This strategy is related to :func:`strategy_smallest_last`: in that + strategy, an independent set of size one is chosen at each step + instead of a maximal independent set. + + """ + remaining_nodes = set(G) + while len(remaining_nodes) > 0: + nodes = _maximal_independent_set(G.subgraph(remaining_nodes)) + remaining_nodes -= nodes + yield from nodes + + +@nx._dispatch +def strategy_connected_sequential_bfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "bfs") + + +@nx._dispatch +def strategy_connected_sequential_dfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + depth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "dfs") + + +@nx._dispatch +def strategy_connected_sequential(G, colors, traversal="bfs"): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first or depth-first traversal. + + ``traversal`` must be one of the strings ``'dfs'`` or ``'bfs'``, + representing depth-first traversal or breadth-first traversal, + respectively. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + if traversal == "bfs": + traverse = nx.bfs_edges + elif traversal == "dfs": + traverse = nx.dfs_edges + else: + raise nx.NetworkXError( + "Please specify one of the strings 'bfs' or" + " 'dfs' for connected sequential ordering" + ) + for component in nx.connected_components(G): + source = arbitrary_element(component) + # Yield the source node, then all the nodes in the specified + # traversal order. + yield source + for _, end in traverse(G.subgraph(component), source): + yield end + + +@nx._dispatch +def strategy_saturation_largest_first(G, colors): + """Iterates over all the nodes of ``G`` in "saturation order" (also + known as "DSATUR"). + + ``G`` is a NetworkX graph. ``colors`` is a dictionary mapping nodes of + ``G`` to colors, for those nodes that have already been colored. + + """ + distinct_colors = {v: set() for v in G} + + # Add the node color assignments given in colors to the + # distinct colors set for each neighbor of that node + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Check that the color assignments in colors are valid + # i.e. no neighboring nodes have the same color + if len(colors) >= 2: + for node, color in colors.items(): + if color in distinct_colors[node]: + raise nx.NetworkXError("Neighboring nodes must have different colors") + + # If 0 nodes have been colored, simply choose the node of highest degree. + if not colors: + node = max(G, key=G.degree) + yield node + # Add the color 0 to the distinct colors set for each + # neighbor of that node. + for v in G[node]: + distinct_colors[v].add(0) + + while len(G) != len(colors): + # Update the distinct color sets for the neighbors. + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Compute the maximum saturation and the set of nodes that + # achieve that saturation. + saturation = {v: len(c) for v, c in distinct_colors.items() if v not in colors} + # Yield the node with the highest saturation, and break ties by + # degree. + node = max(saturation, key=lambda v: (saturation[v], G.degree(v))) + yield node + + +#: Dictionary mapping name of a strategy as a string to the strategy function. +STRATEGIES = { + "largest_first": strategy_largest_first, + "random_sequential": strategy_random_sequential, + "smallest_last": strategy_smallest_last, + "independent_set": strategy_independent_set, + "connected_sequential_bfs": strategy_connected_sequential_bfs, + "connected_sequential_dfs": strategy_connected_sequential_dfs, + "connected_sequential": strategy_connected_sequential, + "saturation_largest_first": strategy_saturation_largest_first, + "DSATUR": strategy_saturation_largest_first, +} + + +@nx._dispatch +def greedy_color(G, strategy="largest_first", interchange=False): + """Color a graph using various strategies of greedy graph coloring. + + Attempts to color a graph using as few colors as possible, where no + neighbours of a node can have same color as the node itself. The + given strategy determines the order in which nodes are colored. + + The strategies are described in [1]_, and smallest-last is based on + [2]_. + + Parameters + ---------- + G : NetworkX graph + + strategy : string or function(G, colors) + A function (or a string representing a function) that provides + the coloring strategy, by returning nodes in the ordering they + should be colored. ``G`` is the graph, and ``colors`` is a + dictionary of the currently assigned colors, keyed by nodes. The + function must return an iterable over all the nodes in ``G``. + + If the strategy function is an iterator generator (that is, a + function with ``yield`` statements), keep in mind that the + ``colors`` dictionary will be updated after each ``yield``, since + this function chooses colors greedily. + + If ``strategy`` is a string, it must be one of the following, + each of which represents one of the built-in strategy functions. + + * ``'largest_first'`` + * ``'random_sequential'`` + * ``'smallest_last'`` + * ``'independent_set'`` + * ``'connected_sequential_bfs'`` + * ``'connected_sequential_dfs'`` + * ``'connected_sequential'`` (alias for the previous strategy) + * ``'saturation_largest_first'`` + * ``'DSATUR'`` (alias for the previous strategy) + + interchange: bool + Will use the color interchange algorithm described by [3]_ if set + to ``True``. + + Note that ``saturation_largest_first`` and ``independent_set`` + do not work with interchange. Furthermore, if you use + interchange with your own strategy function, you cannot rely + on the values in the ``colors`` argument. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> d = nx.coloring.greedy_color(G, strategy="largest_first") + >>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}] + True + + Raises + ------ + NetworkXPointlessConcept + If ``strategy`` is ``saturation_largest_first`` or + ``independent_set`` and ``interchange`` is ``True``. + + References + ---------- + .. [1] Adrian Kosowski, and Krzysztof Manuszewski, + Classical Coloring of Graphs, Graph Colorings, 2-19, 2004. + ISBN 0-8218-3458-4. + .. [2] David W. Matula, and Leland L. Beck, "Smallest-last + ordering and clustering and graph coloring algorithms." *J. ACM* 30, + 3 (July 1983), 417–427. + .. [3] Maciej M. Sysło, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + + """ + if len(G) == 0: + return {} + # Determine the strategy provided by the caller. + strategy = STRATEGIES.get(strategy, strategy) + if not callable(strategy): + raise nx.NetworkXError( + "strategy must be callable or a valid string. " f"{strategy} not valid." + ) + # Perform some validation on the arguments before executing any + # strategy functions. + if interchange: + if strategy is strategy_independent_set: + msg = "interchange cannot be used with independent_set" + raise nx.NetworkXPointlessConcept(msg) + if strategy is strategy_saturation_largest_first: + msg = "interchange cannot be used with" " saturation_largest_first" + raise nx.NetworkXPointlessConcept(msg) + colors = {} + nodes = strategy(G, colors) + if interchange: + return _greedy_coloring_with_interchange(G, nodes) + for u in nodes: + # Set to keep track of colors of neighbours + neighbour_colors = {colors[v] for v in G[u] if v in colors} + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + # Assign the new color to the current node. + colors[u] = color + return colors + + +# Tools for coloring with interchanges +class _Node: + __slots__ = ["node_id", "color", "adj_list", "adj_color"] + + def __init__(self, node_id, n): + self.node_id = node_id + self.color = -1 + self.adj_list = None + self.adj_color = [None for _ in range(n)] + + def __repr__(self): + return ( + f"Node_id: {self.node_id}, Color: {self.color}, " + f"Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})" + ) + + def assign_color(self, adj_entry, color): + adj_entry.col_prev = None + adj_entry.col_next = self.adj_color[color] + self.adj_color[color] = adj_entry + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry + + def clear_color(self, adj_entry, color): + if adj_entry.col_prev is None: + self.adj_color[color] = adj_entry.col_next + else: + adj_entry.col_prev.col_next = adj_entry.col_next + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry.col_prev + + def iter_neighbors(self): + adj_node = self.adj_list + while adj_node is not None: + yield adj_node + adj_node = adj_node.next + + def iter_neighbors_color(self, color): + adj_color_node = self.adj_color[color] + while adj_color_node is not None: + yield adj_color_node.node_id + adj_color_node = adj_color_node.col_next + + +class _AdjEntry: + __slots__ = ["node_id", "next", "mate", "col_next", "col_prev"] + + def __init__(self, node_id): + self.node_id = node_id + self.next = None + self.mate = None + self.col_next = None + self.col_prev = None + + def __repr__(self): + col_next = None if self.col_next is None else self.col_next.node_id + col_prev = None if self.col_prev is None else self.col_prev.node_id + return ( + f"Node_id: {self.node_id}, Next: ({self.next}), " + f"Mate: ({self.mate.node_id}), " + f"col_next: ({col_next}), col_prev: ({col_prev})" + ) + + +def _greedy_coloring_with_interchange(G, nodes): + """Return a coloring for `original_graph` using interchange approach + + This procedure is an adaption of the algorithm described by [1]_, + and is an implementation of coloring with interchange. Please be + advised, that the datastructures used are rather complex because + they are optimized to minimize the time spent identifying + subcomponents of the graph, which are possible candidates for color + interchange. + + Parameters + ---------- + G : NetworkX graph + The graph to be colored + + nodes : list + nodes ordered using the strategy of choice + + Returns + ------- + dict : + A dictionary keyed by node to a color value + + References + ---------- + .. [1] Maciej M. Syslo, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + """ + n = len(G) + + graph = {node: _Node(node, n) for node in G} + + for node1, node2 in G.edges(): + adj_entry1 = _AdjEntry(node2) + adj_entry2 = _AdjEntry(node1) + adj_entry1.mate = adj_entry2 + adj_entry2.mate = adj_entry1 + node1_head = graph[node1].adj_list + adj_entry1.next = node1_head + graph[node1].adj_list = adj_entry1 + node2_head = graph[node2].adj_list + adj_entry2.next = node2_head + graph[node2].adj_list = adj_entry2 + + k = 0 + for node in nodes: + # Find the smallest possible, unused color + neighbors = graph[node].iter_neighbors() + col_used = {graph[adj_node.node_id].color for adj_node in neighbors} + col_used.discard(-1) + k1 = next(itertools.dropwhile(lambda x: x in col_used, itertools.count())) + + # k1 is now the lowest available color + if k1 > k: + connected = True + visited = set() + col1 = -1 + col2 = -1 + while connected and col1 < k: + col1 += 1 + neighbor_cols = graph[node].iter_neighbors_color(col1) + col1_adj = list(neighbor_cols) + + col2 = col1 + while connected and col2 < k: + col2 += 1 + visited = set(col1_adj) + frontier = list(col1_adj) + i = 0 + while i < len(frontier): + search_node = frontier[i] + i += 1 + col_opp = col2 if graph[search_node].color == col1 else col1 + neighbor_cols = graph[search_node].iter_neighbors_color(col_opp) + + for neighbor in neighbor_cols: + if neighbor not in visited: + visited.add(neighbor) + frontier.append(neighbor) + + # Search if node is not adj to any col2 vertex + connected = ( + len( + visited.intersection(graph[node].iter_neighbors_color(col2)) + ) + > 0 + ) + + # If connected is false then we can swap !!! + if not connected: + # Update all the nodes in the component + for search_node in visited: + graph[search_node].color = ( + col2 if graph[search_node].color == col1 else col1 + ) + col2_adj = graph[search_node].adj_color[col2] + graph[search_node].adj_color[col2] = graph[search_node].adj_color[ + col1 + ] + graph[search_node].adj_color[col1] = col2_adj + + # Update all the neighboring nodes + for search_node in visited: + col = graph[search_node].color + col_opp = col1 if col == col2 else col2 + for adj_node in graph[search_node].iter_neighbors(): + if graph[adj_node.node_id].color != col_opp: + # Direct reference to entry + adj_mate = adj_node.mate + graph[adj_node.node_id].clear_color(adj_mate, col_opp) + graph[adj_node.node_id].assign_color(adj_mate, col) + k1 = col1 + + # We can color this node color k1 + graph[node].color = k1 + k = max(k1, k) + + # Update the neighbors of this node + for adj_node in graph[node].iter_neighbors(): + adj_mate = adj_node.mate + graph[adj_node.node_id].assign_color(adj_mate, k1) + + return {node.node_id: node.color for node in graph.values()} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__init__.py b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5501abf3124f13ddf72eaad7a505a9841abeddc Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228df863ff43ee0cb9d78555fa32a60de3bf7c65 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a4e39589ea981445f6e9e222087714ef88e141 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py @@ -0,0 +1,865 @@ +"""Greedy coloring test suite. + +""" + +import itertools + +import pytest + +import networkx as nx + +is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring +is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable + + +ALL_STRATEGIES = [ + "largest_first", + "random_sequential", + "smallest_last", + "independent_set", + "connected_sequential_bfs", + "connected_sequential_dfs", + "connected_sequential", + "saturation_largest_first", + "DSATUR", +] + +# List of strategies where interchange=True results in an error +INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"] + + +class TestColoring: + def test_basic_cases(self): + def check_basic_case(graph_func, n_nodes, strategy, interchange): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + assert verify_length(coloring, n_nodes) + assert verify_coloring(graph, coloring) + + for graph_func, n_nodes in BASIC_TEST_CASES.items(): + for interchange in [True, False]: + for strategy in ALL_STRATEGIES: + check_basic_case(graph_func, n_nodes, strategy, False) + if strategy not in INTERCHANGE_INVALID: + check_basic_case(graph_func, n_nodes, strategy, True) + + def test_special_cases(self): + def check_special_case(strategy, graph_func, interchange, colors): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + if not hasattr(colors, "__len__"): + colors = [colors] + assert any(verify_length(coloring, n_colors) for n_colors in colors) + assert verify_coloring(graph, coloring) + + for strategy, arglist in SPECIAL_TEST_CASES.items(): + for args in arglist: + check_special_case(strategy, args[0], args[1], args[2]) + + def test_interchange_invalid(self): + graph = one_node_graph() + for strategy in INTERCHANGE_INVALID: + pytest.raises( + nx.NetworkXPointlessConcept, + nx.coloring.greedy_color, + graph, + strategy=strategy, + interchange=True, + ) + + def test_bad_inputs(self): + graph = one_node_graph() + pytest.raises( + nx.NetworkXError, + nx.coloring.greedy_color, + graph, + strategy="invalid strategy", + ) + + def test_strategy_as_function(self): + graph = lf_shc() + colors_1 = nx.coloring.greedy_color(graph, "largest_first") + colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first) + assert colors_1 == colors_2 + + def test_seed_argument(self): + graph = lf_shc() + rs = nx.coloring.strategy_random_sequential + c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1)) + for u, v in graph.edges: + assert c1[u] != c1[v] + + def test_is_coloring(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_coloring(G, coloring) + + coloring[0] = 1 + assert not is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_is_equitable(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_equitable(G, coloring) + + G.add_edges_from([(2, 3), (2, 4), (2, 5)]) + coloring[3] = 1 + coloring[4] = 1 + coloring[5] = 1 + assert is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_num_colors(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3)]) + pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2) + + def test_equitable_color(self): + G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_empty(self): + G = nx.empty_graph() + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_large(self): + G = nx.fast_gnp_random_graph(100, 0.1, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring, num_colors=max_degree(G) + 1) + + def test_case_V_plus_not_in_A_cal(self): + # Hand crafted case to avoid the easy case. + L = { + 0: [2, 5], + 1: [3, 4], + 2: [0, 8], + 3: [1, 7], + 4: [1, 6], + 5: [0, 6], + 6: [4, 5], + 7: [3], + 8: [2], + } + + F = { + # Color 0 + 0: 0, + 1: 0, + # Color 1 + 2: 1, + 3: 1, + 4: 1, + 5: 1, + # Color 2 + 6: 2, + 7: 2, + 8: 2, + } + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_cast_no_solo(self): + L = { + 0: [8, 9], + 1: [10, 11], + 2: [8], + 3: [9], + 4: [10, 11], + 5: [8], + 6: [9], + 7: [10, 11], + 8: [0, 2, 5], + 9: [0, 3, 6], + 10: [1, 4, 7], + 11: [1, 4, 7], + } + + F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1} + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_hard_prob(self): + # Tests for two levels of recursion. + num_colors, s = 5, 5 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 10), + (0, 11), + (0, 12), + (0, 23), + (10, 4), + (10, 9), + (10, 20), + (11, 4), + (11, 8), + (11, 16), + (12, 9), + (12, 22), + (12, 23), + (23, 7), + (1, 17), + (1, 18), + (1, 19), + (1, 24), + (17, 5), + (17, 13), + (17, 22), + (18, 5), + (19, 5), + (19, 6), + (19, 8), + (24, 7), + (24, 16), + (2, 4), + (2, 13), + (2, 14), + (2, 15), + (4, 6), + (13, 5), + (13, 21), + (14, 6), + (14, 15), + (15, 6), + (15, 21), + (3, 16), + (3, 20), + (3, 21), + (3, 22), + (16, 8), + (20, 8), + (21, 9), + (22, 7), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_hardest_prob(self): + # Tests for two levels of recursion. + num_colors, s = 10, 4 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 19), + (0, 24), + (0, 29), + (0, 30), + (0, 35), + (19, 3), + (19, 7), + (19, 9), + (19, 15), + (19, 21), + (19, 24), + (19, 30), + (19, 38), + (24, 5), + (24, 11), + (24, 13), + (24, 20), + (24, 30), + (24, 37), + (24, 38), + (29, 6), + (29, 10), + (29, 13), + (29, 15), + (29, 16), + (29, 17), + (29, 20), + (29, 26), + (30, 6), + (30, 10), + (30, 15), + (30, 22), + (30, 23), + (30, 39), + (35, 6), + (35, 9), + (35, 14), + (35, 18), + (35, 22), + (35, 23), + (35, 25), + (35, 27), + (1, 20), + (1, 26), + (1, 31), + (1, 34), + (1, 38), + (20, 4), + (20, 8), + (20, 14), + (20, 18), + (20, 28), + (20, 33), + (26, 7), + (26, 10), + (26, 14), + (26, 18), + (26, 21), + (26, 32), + (26, 39), + (31, 5), + (31, 8), + (31, 13), + (31, 16), + (31, 17), + (31, 21), + (31, 25), + (31, 27), + (34, 7), + (34, 8), + (34, 13), + (34, 18), + (34, 22), + (34, 23), + (34, 25), + (34, 27), + (38, 4), + (38, 9), + (38, 12), + (38, 14), + (38, 21), + (38, 27), + (2, 3), + (2, 18), + (2, 21), + (2, 28), + (2, 32), + (2, 33), + (2, 36), + (2, 37), + (2, 39), + (3, 5), + (3, 9), + (3, 13), + (3, 22), + (3, 23), + (3, 25), + (3, 27), + (18, 6), + (18, 11), + (18, 15), + (18, 39), + (21, 4), + (21, 10), + (21, 14), + (21, 36), + (28, 6), + (28, 10), + (28, 14), + (28, 16), + (28, 17), + (28, 25), + (28, 27), + (32, 5), + (32, 10), + (32, 12), + (32, 16), + (32, 17), + (32, 22), + (32, 23), + (33, 7), + (33, 10), + (33, 12), + (33, 16), + (33, 17), + (33, 25), + (33, 27), + (36, 5), + (36, 8), + (36, 15), + (36, 16), + (36, 17), + (36, 25), + (36, 27), + (37, 5), + (37, 11), + (37, 15), + (37, 16), + (37, 17), + (37, 22), + (37, 23), + (39, 7), + (39, 8), + (39, 15), + (39, 22), + (39, 23), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_strategy_saturation_largest_first(self): + def color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=None, + nodes_to_add_between_calls=1, + ): + color_assignments = [] + aux_colored_nodes = colored_nodes.copy() + + node_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( + G, aux_colored_nodes + ) + + for u in node_iterator: + # Set to keep track of colors of neighbours + neighbour_colors = { + aux_colored_nodes[v] for v in G[u] if v in aux_colored_nodes + } + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + aux_colored_nodes[u] = color + color_assignments.append((u, color)) + + # Color nodes between iterations + for i in range(nodes_to_add_between_calls - 1): + if not len(color_assignments) + len(colored_nodes) >= len( + full_color_assignment + ): + full_color_assignment_node, color = full_color_assignment[ + len(color_assignments) + len(colored_nodes) + ] + + # Assign the new color to the current node. + aux_colored_nodes[full_color_assignment_node] = color + color_assignments.append((full_color_assignment_node, color)) + + return color_assignments, aux_colored_nodes + + for G, _, _ in SPECIAL_TEST_CASES["saturation_largest_first"]: + G = G() + + # Check that function still works when nodes are colored between iterations + for nodes_to_add_between_calls in range(1, 5): + # Get a full color assignment, (including the order in which nodes were colored) + colored_nodes = {} + full_color_assignment, full_colored_nodes = color_remaining_nodes( + G, colored_nodes + ) + + # For each node in the color assignment, add it to colored_nodes and re-run the function + for ind, (node, color) in enumerate(full_color_assignment): + colored_nodes[node] = color + + ( + partial_color_assignment, + partial_colored_nodes, + ) = color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=full_color_assignment, + nodes_to_add_between_calls=nodes_to_add_between_calls, + ) + + # Check that the color assignment and order of remaining nodes are the same + assert full_color_assignment[ind + 1 :] == partial_color_assignment + assert full_colored_nodes == partial_colored_nodes + + +# ############################ Utility functions ############################ +def verify_coloring(graph, coloring): + for node in graph.nodes(): + if node not in coloring: + return False + + color = coloring[node] + for neighbor in graph.neighbors(node): + if coloring[neighbor] == color: + return False + + return True + + +def verify_length(coloring, expected): + coloring = dict_to_sets(coloring) + return len(coloring) == expected + + +def dict_to_sets(colors): + if len(colors) == 0: + return [] + + k = max(colors.values()) + 1 + sets = [set() for _ in range(k)] + + for node, color in colors.items(): + sets[color].add(node) + + return sets + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + return graph + + +def rs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def slf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def slf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (2, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 8), + (7, 8), + ] + ) + return graph + + +def lf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)]) + return graph + + +def lf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 7), + (1, 6), + (1, 3), + (1, 4), + (7, 2), + (2, 6), + (2, 3), + (2, 5), + (5, 3), + (5, 4), + (4, 3), + ] + ) + return graph + + +def sl_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def sl_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 4), + (2, 8), + (8, 4), + (8, 6), + (8, 7), + (7, 5), + (7, 6), + (3, 4), + (4, 6), + (6, 5), + (5, 3), + ] + ) + return graph + + +def gis_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def gis_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)]) + return graph + + +def cs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)]) + return graph + + +def rsi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 5), + (1, 6), + (1, 7), + (2, 3), + (2, 8), + (2, 9), + (3, 4), + (3, 8), + (3, 9), + (4, 5), + (4, 6), + (4, 7), + (5, 6), + ] + ) + return graph + + +def sli_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 6), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + ) + return graph + + +def sli_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 7), + (2, 8), + (2, 9), + (3, 6), + (3, 7), + (3, 9), + (4, 5), + (4, 6), + (4, 8), + (4, 9), + (5, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 9), + (7, 8), + (8, 9), + ] + ) + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify the number of expected colors. +BASIC_TEST_CASES = { + empty_graph: 0, + one_node_graph: 1, + two_node_graph: 2, + disconnected: 2, + three_node_clique: 3, +} + + +# -------------------------------------------------------------------------- +# Special test cases. Each strategy has a list of tuples of the form +# (graph function, interchange, valid # of colors) +SPECIAL_TEST_CASES = { + "random_sequential": [ + (rs_shc, False, (2, 3)), + (rs_shc, True, 2), + (rsi_shc, True, (3, 4)), + ], + "saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)], + "largest_first": [ + (lf_shc, False, (2, 3)), + (lf_hc, False, 4), + (lf_shc, True, 2), + (lf_hc, True, 3), + (lfi_shc, True, (3, 4)), + (lfi_hc, True, 4), + ], + "smallest_last": [ + (sl_shc, False, (3, 4)), + (sl_hc, False, 5), + (sl_shc, True, 3), + (sl_hc, True, 4), + (sli_shc, True, (3, 4)), + (sli_hc, True, 5), + ], + "independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)], + "connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)], + "connected_sequential_dfs": [(cs_shc, False, (3, 4))], +} + + +# -------------------------------------------------------------------------- +# Helper functions to test +# (graph function, interchange, valid # of colors) + + +def check_state(L, N, H, F, C): + s = len(C[0]) + num_colors = len(C.keys()) + + assert all(u in L[v] for u in L for v in L[u]) + assert all(F[u] != F[v] for u in L for v in L[u]) + assert all(len(L[u]) < num_colors for u in L) + assert all(len(C[x]) == s for x in C) + assert all(H[(c1, c2)] >= 0 for c1 in C for c2 in C) + assert all(N[(u, F[u])] == 0 for u in F) + + +def max_degree(G): + """Get the maximum degree of any node in G.""" + return max(G.degree(node) for node in G.nodes) if len(G.nodes) > 0 else 0 + + +def make_params_from_graph(G, F): + """Returns {N, L, H, C} from the given graph.""" + num_nodes = len(G) + L = {u: [] for u in range(num_nodes)} + for u, v in G.edges: + L[u].append(v) + L[v].append(u) + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + return {"N": N, "F": F, "C": C, "H": H, "L": L} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/communicability_alg.py b/phivenv/Lib/site-packages/networkx/algorithms/communicability_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..c9144a7b84fd42108e050f47315fee97430a66e6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/communicability_alg.py @@ -0,0 +1,162 @@ +""" +Communicability. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["communicability", "communicability_exp"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability(G): + r"""Returns communicability between all pairs of nodes in G. + + The communicability between pairs of nodes in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability_exp: + Communicability between all pairs of nodes in G using spectral + decomposition. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses a spectral decomposition of the adjacency matrix. + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes `u` and `v` based on the graph spectrum + is [1]_ + + .. math:: + C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}}, + + where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal + eigenvector of the adjacency matrix associated with the eigenvalue + `\lambda_{j}`. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability(G) + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + w, vec = np.linalg.eigh(A) + expw = np.exp(w) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + # computing communicabilities + for u in G: + c[u] = {} + for v in G: + s = 0 + p = mapping[u] + q = mapping[v] + for j in range(len(nodelist)): + s += vec[:, j][p] * vec[:, j][q] * expw[j] + c[u][v] = float(s) + return c + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability_exp(G): + r"""Returns communicability between all pairs of nodes in G. + + Communicability between pair of node (u,v) of node in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability: + Communicability between pairs of nodes in G. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses matrix exponentiation of the adjacency matrix. + + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes u and v is [1]_, + + .. math:: + C(u,v) = (e^A)_{uv}, + + where `A` is the adjacency matrix of G. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability_exp(G) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + # communicability matrix + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + for u in G: + c[u] = {} + for v in G: + c[u][v] = float(expA[mapping[u], mapping[v]]) + return c diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py b/phivenv/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0029ae7ff6c52331f16d6f20b97743ddd12509 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py @@ -0,0 +1,150 @@ +"""Asynchronous Fluid Communities algorithm for community detection.""" + +from collections import Counter + +import networkx as nx +from networkx.algorithms.components import is_connected +from networkx.exception import NetworkXError +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["asyn_fluidc"] + + +@not_implemented_for("directed", "multigraph") +@py_random_state(3) +@nx._dispatch +def asyn_fluidc(G, k, max_iter=100, seed=None): + """Returns communities in `G` as detected by Fluid Communities algorithm. + + The asynchronous fluid communities algorithm is described in + [1]_. The algorithm is based on the simple idea of fluids interacting + in an environment, expanding and pushing each other. Its initialization is + random, so found communities may vary on different executions. + + The algorithm proceeds as follows. First each of the initial k communities + is initialized in a random vertex in the graph. Then the algorithm iterates + over all vertices in a random order, updating the community of each vertex + based on its own community and the communities of its neighbours. This + process is performed several times until convergence. + At all times, each community has a total density of 1, which is equally + distributed among the vertices it contains. If a vertex changes of + community, vertex densities of affected communities are adjusted + immediately. When a complete iteration over all vertices is done, such that + no vertex changes the community it belongs to, the algorithm has converged + and returns. + + This is the original version of the algorithm described in [1]_. + Unfortunately, it does not support weighted graphs yet. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + k : integer + The number of communities to be found. + + max_iter : integer + The number of maximum iterations allowed. By default 100. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + k variable is not an optional argument. + + References + ---------- + .. [1] Parés F., Garcia-Gasulla D. et al. "Fluid Communities: A + Competitive and Highly Scalable Community Detection Algorithm". + [https://arxiv.org/pdf/1703.09307.pdf]. + """ + # Initial checks + if not isinstance(k, int): + raise NetworkXError("k must be an integer.") + if not k > 0: + raise NetworkXError("k must be greater than 0.") + if not is_connected(G): + raise NetworkXError("Fluid Communities require connected Graphs.") + if len(G) < k: + raise NetworkXError("k cannot be bigger than the number of nodes.") + # Initialization + max_density = 1.0 + vertices = list(G) + seed.shuffle(vertices) + communities = {n: i for i, n in enumerate(vertices[:k])} + density = {} + com_to_numvertices = {} + for vertex in communities: + com_to_numvertices[communities[vertex]] = 1 + density[communities[vertex]] = max_density + # Set up control variables and start iterating + iter_count = 0 + cont = True + while cont: + cont = False + iter_count += 1 + # Loop over all vertices in graph in a random order + vertices = list(G) + seed.shuffle(vertices) + for vertex in vertices: + # Updating rule + com_counter = Counter() + # Take into account self vertex community + try: + com_counter.update({communities[vertex]: density[communities[vertex]]}) + except KeyError: + pass + # Gather neighbour vertex communities + for v in G[vertex]: + try: + com_counter.update({communities[v]: density[communities[v]]}) + except KeyError: + continue + # Check which is the community with highest density + new_com = -1 + if len(com_counter.keys()) > 0: + max_freq = max(com_counter.values()) + best_communities = [ + com + for com, freq in com_counter.items() + if (max_freq - freq) < 0.0001 + ] + # If actual vertex com in best communities, it is preserved + try: + if communities[vertex] in best_communities: + new_com = communities[vertex] + except KeyError: + pass + # If vertex community changes... + if new_com == -1: + # Set flag of non-convergence + cont = True + # Randomly chose a new community from candidates + new_com = seed.choice(best_communities) + # Update previous community status + try: + com_to_numvertices[communities[vertex]] -= 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + except KeyError: + pass + # Update new community status + communities[vertex] = new_com + com_to_numvertices[communities[vertex]] += 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + # If maximum iterations reached --> output actual results + if iter_count > max_iter: + break + # Return results by grouping communities as list of vertices + return iter(groups(communities).values()) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/centrality.py b/phivenv/Lib/site-packages/networkx/algorithms/community/centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..efdc98460e4db7ccbbb988c77b125525e3efd18a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/centrality.py @@ -0,0 +1,171 @@ +"""Functions for computing communities based on centrality notions.""" + +import networkx as nx + +__all__ = ["girvan_newman"] + + +@nx._dispatch(preserve_edge_attrs="most_valuable_edge") +def girvan_newman(G, most_valuable_edge=None): + """Finds communities in a graph using the Girvan–Newman method. + + Parameters + ---------- + G : NetworkX graph + + most_valuable_edge : function + Function that takes a graph as input and outputs an edge. The + edge returned by this function will be recomputed and removed at + each iteration of the algorithm. + + If not specified, the edge with the highest + :func:`networkx.edge_betweenness_centrality` will be used. + + Returns + ------- + iterator + Iterator over tuples of sets of nodes in `G`. Each set of node + is a community, each tuple is a sequence of communities at a + particular level of the algorithm. + + Examples + -------- + To get the first pair of communities:: + + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To get only the first *k* tuples of communities, use + :func:`itertools.islice`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 2 + >>> comp = nx.community.girvan_newman(G) + >>> for communities in itertools.islice(comp, k): + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + + To stop getting tuples of communities once the number of communities + is greater than *k*, use :func:`itertools.takewhile`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 4 + >>> comp = nx.community.girvan_newman(G) + >>> limited = itertools.takewhile(lambda c: len(c) <= k, comp) + >>> for communities in limited: + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5], [6, 7]) + + To just choose an edge to remove based on the weight:: + + >>> from operator import itemgetter + >>> G = nx.path_graph(10) + >>> edges = G.edges() + >>> nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, "weight") + >>> def heaviest(G): + ... u, v, w = max(G.edges(data="weight"), key=itemgetter(2)) + ... return (u, v) + ... + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=heaviest) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4, 5, 6, 7, 8], [9]) + + To utilize edge weights when choosing an edge with, for example, the + highest betweenness centrality:: + + >>> from networkx import edge_betweenness_centrality as betweenness + >>> def most_central_edge(G): + ... centrality = betweenness(G, weight="weight") + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To specify a different ranking algorithm for edges, use the + `most_valuable_edge` keyword argument:: + + >>> from networkx import edge_betweenness_centrality + >>> from random import random + >>> def most_central_edge(G): + ... centrality = edge_betweenness_centrality(G) + ... max_cent = max(centrality.values()) + ... # Scale the centrality values so they are between 0 and 1, + ... # and add some random noise. + ... centrality = {e: c / max_cent for e, c in centrality.items()} + ... # Add some random noise. + ... centrality = {e: c + random() for e, c in centrality.items()} + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + + Notes + ----- + The Girvan–Newman algorithm detects communities by progressively + removing edges from the original graph. The algorithm removes the + "most valuable" edge, traditionally the edge with the highest + betweenness centrality, at each step. As the graph breaks down into + pieces, the tightly knit community structure is exposed and the + result can be depicted as a dendrogram. + + """ + # If the graph is already empty, simply return its connected + # components. + if G.number_of_edges() == 0: + yield tuple(nx.connected_components(G)) + return + # If no function is provided for computing the most valuable edge, + # use the edge betweenness centrality. + if most_valuable_edge is None: + + def most_valuable_edge(G): + """Returns the edge with the highest betweenness centrality + in the graph `G`. + + """ + # We have guaranteed that the graph is non-empty, so this + # dictionary will never be empty. + betweenness = nx.edge_betweenness_centrality(G) + return max(betweenness, key=betweenness.get) + + # The copy of G here must include the edge weight data. + g = G.copy().to_undirected() + # Self-loops must be removed because their removal has no effect on + # the connected components of the graph. + g.remove_edges_from(nx.selfloop_edges(g)) + while g.number_of_edges() > 0: + yield _without_most_central_edges(g, most_valuable_edge) + + +def _without_most_central_edges(G, most_valuable_edge): + """Returns the connected components of the graph that results from + repeatedly removing the most "valuable" edge in the graph. + + `G` must be a non-empty graph. This function modifies the graph `G` + in-place; that is, it removes edges on the graph `G`. + + `most_valuable_edge` is a function that takes the graph `G` as input + (or a subgraph with one or more edges of `G` removed) and returns an + edge. That edge will be removed and this process will be repeated + until the number of connected components in the graph increases. + + """ + original_num_components = nx.number_connected_components(G) + num_new_components = original_num_components + while num_new_components <= original_num_components: + edge = most_valuable_edge(G) + G.remove_edge(*edge) + new_components = tuple(nx.connected_components(G)) + num_new_components = len(new_components) + return new_components diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/community_utils.py b/phivenv/Lib/site-packages/networkx/algorithms/community/community_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5e4727eec42c5e4c7b25060a18d9894a9b8c4514 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/community_utils.py @@ -0,0 +1,29 @@ +"""Helper functions for community-finding algorithms.""" +import networkx as nx + +__all__ = ["is_partition"] + + +@nx._dispatch +def is_partition(G, communities): + """Returns *True* if `communities` is a partition of the nodes of `G`. + + A partition of a universe set is a family of pairwise disjoint sets + whose union is the entire universe set. + + Parameters + ---------- + G : NetworkX graph. + + communities : list or iterable of sets of nodes + If not a list, the iterable is converted internally to a list. + If it is an iterator it is exhausted. + + """ + # Alternate implementation: + # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G) + if not isinstance(communities, list): + communities = list(communities) + nodes = {n for c in communities for n in c if n in G} + + return len(G) == len(nodes) == sum(len(c) for c in communities) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/kclique.py b/phivenv/Lib/site-packages/networkx/algorithms/community/kclique.py new file mode 100644 index 0000000000000000000000000000000000000000..60433669cee8c9fa38bde0956ccd0c3d319fc715 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/kclique.py @@ -0,0 +1,79 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["k_clique_communities"] + + +@nx._dispatch +def k_clique_communities(G, k, cliques=None): + """Find k-clique communities in graph using the percolation method. + + A k-clique community is the union of all cliques of size k that + can be reached through adjacent (sharing k-1 nodes) k-cliques. + + Parameters + ---------- + G : NetworkX graph + + k : int + Size of smallest clique + + cliques: list or generator + Precomputed cliques (use networkx.find_cliques(G)) + + Returns + ------- + Yields sets of nodes, one for each k-clique community. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> K5 = nx.convert_node_labels_to_integers(G, first_label=2) + >>> G.add_edges_from(K5.edges()) + >>> c = list(nx.community.k_clique_communities(G, 4)) + >>> sorted(list(c[0])) + [0, 1, 2, 3, 4, 5, 6] + >>> list(nx.community.k_clique_communities(G, 6)) + [] + + References + ---------- + .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek, + Uncovering the overlapping community structure of complex networks + in nature and society Nature 435, 814-818, 2005, + doi:10.1038/nature03607 + """ + if k < 2: + raise nx.NetworkXError(f"k={k}, k must be greater than 1.") + if cliques is None: + cliques = nx.find_cliques(G) + cliques = [frozenset(c) for c in cliques if len(c) >= k] + + # First index which nodes are in which cliques + membership_dict = defaultdict(list) + for clique in cliques: + for node in clique: + membership_dict[node].append(clique) + + # For each clique, see which adjacent cliques percolate + perc_graph = nx.Graph() + perc_graph.add_nodes_from(cliques) + for clique in cliques: + for adj_clique in _get_adjacent_cliques(clique, membership_dict): + if len(clique.intersection(adj_clique)) >= (k - 1): + perc_graph.add_edge(clique, adj_clique) + + # Connected components of clique graph with perc edges + # are the percolated cliques + for component in nx.connected_components(perc_graph): + yield (frozenset.union(*component)) + + +def _get_adjacent_cliques(clique, membership_dict): + adjacent_cliques = set() + for n in clique: + for adj_clique in membership_dict[n]: + if clique != adj_clique: + adjacent_cliques.add(adj_clique) + return adjacent_cliques diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py b/phivenv/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py new file mode 100644 index 0000000000000000000000000000000000000000..a18c7779b5be52ed25a526b0e1a02d10d3222aa6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py @@ -0,0 +1,139 @@ +"""Functions for computing the Kernighan–Lin bipartition algorithm.""" + +from itertools import count + +import networkx as nx +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import BinaryHeap, not_implemented_for, py_random_state + +__all__ = ["kernighan_lin_bisection"] + + +def _kernighan_lin_sweep(edges, side): + """ + This is a modified form of Kernighan-Lin, which moves single nodes at a + time, alternating between sides to keep the bisection balanced. We keep + two min-heaps of swap costs to make optimal-next-move selection fast. + """ + costs0, costs1 = costs = BinaryHeap(), BinaryHeap() + for u, side_u, edges_u in zip(count(), side, edges): + cost_u = sum(w if side[v] else -w for v, w in edges_u) + costs[side_u].insert(u, cost_u if side_u else -cost_u) + + def _update_costs(costs_x, x): + for y, w in edges[x]: + costs_y = costs[side[y]] + cost_y = costs_y.get(y) + if cost_y is not None: + cost_y += 2 * (-w if costs_x is costs_y else w) + costs_y.insert(y, cost_y, True) + + i = 0 + totcost = 0 + while costs0 and costs1: + u, cost_u = costs0.pop() + _update_costs(costs0, u) + v, cost_v = costs1.pop() + _update_costs(costs1, v) + totcost += cost_u + cost_v + i += 1 + yield totcost, i, (u, v) + + +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatch(edge_attrs="weight") +def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None): + """Partition a graph into two blocks using the Kernighan–Lin + algorithm. + + This algorithm partitions a network into two sets by iteratively + swapping pairs of nodes to reduce the edge cut between the two sets. The + pairs are chosen according to a modified form of Kernighan-Lin [1]_, which + moves node individually, alternating between sides to keep the bisection + balanced. + + Parameters + ---------- + G : NetworkX graph + Graph must be undirected. + + partition : tuple + Pair of iterables containing an initial partition. If not + specified, a random balanced partition is used. + + max_iter : int + Maximum number of times to attempt swaps to find an + improvement before giving up. + + weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Only used if partition is None + + Returns + ------- + partition : tuple + A pair of sets of nodes representing the bipartition. + + Raises + ------ + NetworkXError + If partition is not a valid partition of the nodes of the graph. + + References + ---------- + .. [1] Kernighan, B. W.; Lin, Shen (1970). + "An efficient heuristic procedure for partitioning graphs." + *Bell Systems Technical Journal* 49: 291--307. + Oxford University Press 2011. + + """ + n = len(G) + labels = list(G) + seed.shuffle(labels) + index = {v: i for i, v in enumerate(labels)} + + if partition is None: + side = [0] * (n // 2) + [1] * ((n + 1) // 2) + else: + try: + A, B = partition + except (TypeError, ValueError) as err: + raise nx.NetworkXError("partition must be two sets") from err + if not is_partition(G, (A, B)): + raise nx.NetworkXError("partition invalid") + side = [0] * n + for a in A: + side[index[a]] = 1 + + if G.is_multigraph(): + edges = [ + [ + (index[u], sum(e.get(weight, 1) for e in d.values())) + for u, d in G[v].items() + ] + for v in labels + ] + else: + edges = [ + [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels + ] + + for i in range(max_iter): + costs = list(_kernighan_lin_sweep(edges, side)) + min_cost, min_i, _ = min(costs) + if min_cost >= 0: + break + + for _, _, (u, v) in costs[:min_i]: + side[u] = 1 + side[v] = 0 + + A = {u for u, s in zip(labels, side) if s == 0} + B = {u for u, s in zip(labels, side) if s == 1} + return A, B diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/label_propagation.py b/phivenv/Lib/site-packages/networkx/algorithms/community/label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..c10938d627c23368b040c0e00d19177503029511 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/label_propagation.py @@ -0,0 +1,337 @@ +""" +Label propagation community detection algorithms. +""" +from collections import Counter, defaultdict, deque + +import networkx as nx +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = [ + "label_propagation_communities", + "asyn_lpa_communities", + "fast_label_propagation_communities", +] + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def fast_label_propagation_communities(G, *, weight=None, seed=None): + """Returns communities in `G` as detected by fast label propagation. + + The fast label propagation algorithm is described in [1]_. The algorithm is + probabilistic and the found communities may vary in different executions. + + The algorithm operates as follows. First, the community label of each node is + set to a unique label. The algorithm then repeatedly updates the labels of + the nodes to the most frequent label in their neighborhood. In case of ties, + a random label is chosen from the most frequent labels. + + The algorithm maintains a queue of nodes that still need to be processed. + Initially, all nodes are added to the queue in a random order. Then the nodes + are removed from the queue one by one and processed. If a node updates its label, + all its neighbors that have a different label are added to the queue (if not + already in the queue). The algorithm stops when the queue is empty. + + Parameters + ---------- + G : Graph, DiGraph, MultiGraph, or MultiDiGraph + Any NetworkX graph. + + weight : string, or None (default) + The edge attribute representing a non-negative weight of an edge. If None, + each edge is assumed to have weight one. The weight of an edge is used in + determining the frequency with which a label appears among the neighbors of + a node (edge with weight `w` is equivalent to `w` unweighted edges). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge directions are ignored for directed graphs. + Edge weights must be non-negative numbers. + + References + ---------- + .. [1] Vincent A. Traag & Lovro Šubelj. "Large network community detection by + fast label propagation." Scientific Reports 13 (2023): 2701. + https://doi.org/10.1038/s41598-023-29610-z + """ + + # Queue of nodes to be processed. + nodes_queue = deque(G) + seed.shuffle(nodes_queue) + + # Set of nodes in the queue. + nodes_set = set(G) + + # Assign unique label to each node. + comms = {node: i for i, node in enumerate(G)} + + while nodes_queue: + # Remove next node from the queue to process. + node = nodes_queue.popleft() + nodes_set.remove(node) + + # Isolated nodes retain their initial label. + if G.degree(node) > 0: + # Compute frequency of labels in node's neighborhood. + label_freqs = _fast_label_count(G, comms, node, weight) + max_freq = max(label_freqs.values()) + + # Always sample new label from most frequent labels. + comm = seed.choice( + [comm for comm in label_freqs if label_freqs[comm] == max_freq] + ) + + if comms[node] != comm: + comms[node] = comm + + # Add neighbors that have different label to the queue. + for nbr in nx.all_neighbors(G, node): + if comms[nbr] != comm and nbr not in nodes_set: + nodes_queue.append(nbr) + nodes_set.add(nbr) + + yield from groups(comms).values() + + +def _fast_label_count(G, comms, node, weight=None): + """Computes the frequency of labels in the neighborhood of a node. + + Returns a dictionary keyed by label to the frequency of that label. + """ + + if weight is None: + # Unweighted (un)directed simple graph. + if not G.is_multigraph(): + label_freqs = Counter(map(comms.get, nx.all_neighbors(G, node))) + + # Unweighted (un)directed multigraph. + else: + label_freqs = defaultdict(int) + for nbr in G[node]: + label_freqs[comms[nbr]] += len(G[node][nbr]) + + if G.is_directed(): + for nbr in G.pred[node]: + label_freqs[comms[nbr]] += len(G.pred[node][nbr]) + + else: + # Weighted undirected simple/multigraph. + label_freqs = defaultdict(float) + for _, nbr, w in G.edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + # Weighted directed simple/multigraph. + if G.is_directed(): + for nbr, _, w in G.in_edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + return label_freqs + + +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def asyn_lpa_communities(G, weight=None, seed=None): + """Returns communities in `G` as detected by asynchronous label + propagation. + + The asynchronous label propagation algorithm is described in + [1]_. The algorithm is probabilistic and the found communities may + vary on different executions. + + The algorithm proceeds as follows. After initializing each node with + a unique label, the algorithm repeatedly sets the label of a node to + be the label that appears most frequently among that nodes + neighbors. The algorithm halts when each node has the label that + appears most frequently among its neighbors. The algorithm is + asynchronous because each node is updated without waiting for + updates on the remaining nodes. + + This generalized version of the algorithm in [1]_ accepts edge + weights. + + Parameters + ---------- + G : Graph + + weight : string + The edge attribute representing the weight of an edge. + If None, each edge is assumed to have weight one. In this + algorithm, the weight of an edge is used in determining the + frequency with which a label appears among the neighbors of a + node: a higher weight means the label appears more often. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge weight attributes must be numerical. + + References + ---------- + .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near + linear time algorithm to detect community structures in large-scale + networks." Physical Review E 76.3 (2007): 036106. + """ + + labels = {n: i for i, n in enumerate(G)} + cont = True + + while cont: + cont = False + nodes = list(G) + seed.shuffle(nodes) + + for node in nodes: + if not G[node]: + continue + + # Get label frequencies among adjacent nodes. + # Depending on the order they are processed in, + # some nodes will be in iteration t and others in t-1, + # making the algorithm asynchronous. + if weight is None: + # initialising a Counter from an iterator of labels is + # faster for getting unweighted label frequencies + label_freq = Counter(map(labels.get, G[node])) + else: + # updating a defaultdict is substantially faster + # for getting weighted label frequencies + label_freq = defaultdict(float) + for _, v, wt in G.edges(node, data=weight, default=1): + label_freq[labels[v]] += wt + + # Get the labels that appear with maximum frequency. + max_freq = max(label_freq.values()) + best_labels = [ + label for label, freq in label_freq.items() if freq == max_freq + ] + + # If the node does not have one of the maximum frequency labels, + # randomly choose one of them and update the node's label. + # Continue the iteration as long as at least one node + # doesn't have a maximum frequency label. + if labels[node] not in best_labels: + labels[node] = seed.choice(best_labels) + cont = True + + yield from groups(labels).values() + + +@not_implemented_for("directed") +@nx._dispatch +def label_propagation_communities(G): + """Generates community sets determined by label propagation + + Finds communities in `G` using a semi-synchronous label propagation + method [1]_. This method combines the advantages of both the synchronous + and asynchronous models. Not implemented for directed graphs. + + Parameters + ---------- + G : graph + An undirected NetworkX graph. + + Returns + ------- + communities : iterable + A dict_values object that contains a set of nodes for each community. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed + + References + ---------- + .. [1] Cordasco, G., & Gargano, L. (2010, December). Community detection + via semi-synchronous label propagation algorithms. In Business + Applications of Social Network Analysis (BASNA), 2010 IEEE International + Workshop on (pp. 1-8). IEEE. + """ + coloring = _color_network(G) + # Create a unique label for each node in the graph + labeling = {v: k for k, v in enumerate(G)} + while not _labeling_complete(labeling, G): + # Update the labels of every node with the same color. + for color, nodes in coloring.items(): + for n in nodes: + _update_label(n, labeling, G) + + clusters = defaultdict(set) + for node, label in labeling.items(): + clusters[label].add(node) + return clusters.values() + + +def _color_network(G): + """Colors the network so that neighboring nodes all have distinct colors. + + Returns a dict keyed by color to a set of nodes with that color. + """ + coloring = {} # color => set(node) + colors = nx.coloring.greedy_color(G) + for node, color in colors.items(): + if color in coloring: + coloring[color].add(node) + else: + coloring[color] = {node} + return coloring + + +def _labeling_complete(labeling, G): + """Determines whether or not LPA is done. + + Label propagation is complete when all nodes have a label that is + in the set of highest frequency labels amongst its neighbors. + + Nodes with no neighbors are considered complete. + """ + return all( + labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0 + ) + + +def _most_frequent_labels(node, labeling, G): + """Returns a set of all labels with maximum frequency in `labeling`. + + Input `labeling` should be a dict keyed by node to labels. + """ + if not G[node]: + # Nodes with no neighbors are themselves a community and are labeled + # accordingly, hence the immediate if statement. + return {labeling[node]} + + # Compute the frequencies of all neighbours of node + freqs = Counter(labeling[q] for q in G[node]) + max_freq = max(freqs.values()) + return {label for label, freq in freqs.items() if freq == max_freq} + + +def _update_label(node, labeling, G): + """Updates the label of a node using the Prec-Max tie breaking algorithm + + The algorithm is explained in: 'Community Detection via Semi-Synchronous + Label Propagation Algorithms' Cordasco and Gargano, 2011 + """ + high_labels = _most_frequent_labels(node, labeling, G) + if len(high_labels) == 1: + labeling[node] = high_labels.pop() + elif len(high_labels) > 1: + # Prec-Max + if labeling[node] not in high_labels: + labeling[node] = max(high_labels) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/louvain.py b/phivenv/Lib/site-packages/networkx/algorithms/community/louvain.py new file mode 100644 index 0000000000000000000000000000000000000000..772f4d79d69aea7b092565c5d24bc3b66c796d56 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/louvain.py @@ -0,0 +1,373 @@ +"""Function for detecting communities based on Louvain Community Detection +Algorithm""" + +from collections import defaultdict, deque + +import networkx as nx +from networkx.algorithms.community import modularity +from networkx.utils import py_random_state + +__all__ = ["louvain_communities", "louvain_partitions"] + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def louvain_communities( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + r"""Find the best partition of a graph using the Louvain Community Detection + Algorithm. + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The algorithm works in 2 steps. On the first step it assigns every node to be + in its own community and then for each node it tries to find the maximum positive + modularity gain by moving each node to all of its neighbor communities. If no positive + gain is achieved the node remains in its original community. + + The modularity gain obtained by moving an isolated node $i$ into a community $C$ can + easily be calculated by the following formula (combining [1]_ [2]_ and some algebra): + + .. math:: + \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2} + + where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links + from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$, + $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$ + is the resolution parameter. + + For the directed case the modularity gain can be computed using this formula according to [3]_ + + .. math:: + \Delta Q = \frac{k_{i,in}}{m} + - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2} + + where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and + $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident + to nodes in $C$. + + The first phase continues until no individual move can improve the modularity. + + The second phase consists in building a new network whose nodes are now the communities + found in the first phase. To do so, the weights of the links between the new nodes are given by + the sum of the weight of the links between nodes in the corresponding two communities. Once this + phase is complete it is possible to reapply the first phase creating bigger communities with + increased modularity. + + The above two phases are executed until no modularity gain is achieved (or is less than + the `threshold`). + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.petersen_graph() + >>> nx.community.louvain_communities(G, seed=123) + [{0, 4, 5, 7, 9}, {1, 2, 3, 6, 8}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008). https://doi.org/10.1088/1742-5468/2008/10/P10008 + .. [2] Traag, V.A., Waltman, L. & van Eck, N.J. From Louvain to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + .. [3] Nicolas Dugué, Anthony Perez. Directed Louvain : maximizing modularity in directed networks. + [Research Report] Université d’Orléans. 2015. hal-01231784. https://hal.archives-ouvertes.fr/hal-01231784 + + See Also + -------- + louvain_partitions + """ + + d = louvain_partitions(G, weight, resolution, threshold, seed) + q = deque(d, maxlen=1) + return q.pop() + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def louvain_partitions( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + """Yields partitions for each level of the Louvain Community Detection Algorithm + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The partitions at each level (step of the algorithm) form a dendrogram of communities. + A dendrogram is a diagram representing a tree and each level represents + a partition of the G graph. The top level contains the smallest communities + and as you traverse to the bottom of the tree the communities get bigger + and the overall modularity increases making the partition better. + + Each level is generated by executing the two phases of the Louvain Community + Detection Algorithm. + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008) + + See Also + -------- + louvain_communities + """ + + partition = [{u} for u in G.nodes()] + if nx.is_empty(G): + yield partition + return + mod = modularity(G, partition, resolution=resolution, weight=weight) + is_directed = G.is_directed() + if G.is_multigraph(): + graph = _convert_multigraph(G, weight, is_directed) + else: + graph = G.__class__() + graph.add_nodes_from(G) + graph.add_weighted_edges_from(G.edges(data=weight, default=1)) + + m = graph.size(weight="weight") + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + improvement = True + while improvement: + # gh-5901 protect the sets in the yielded list from further manipulation here + yield [s.copy() for s in partition] + new_mod = modularity( + graph, inner_partition, resolution=resolution, weight="weight" + ) + if new_mod - mod <= threshold: + return + mod = new_mod + graph = _gen_graph(graph, inner_partition) + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + + +def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): + """Calculate one level of the Louvain partitions tree + + Parameters + ---------- + G : NetworkX Graph/DiGraph + The graph from which to detect communities + m : number + The size of the graph `G`. + partition : list of sets of nodes + A valid partition of the graph `G` + resolution : positive number + The resolution parameter for computing the modularity of a partition + is_directed : bool + True if `G` is a directed graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + """ + node2com = {u: i for i, u in enumerate(G.nodes())} + inner_partition = [{u} for u in G.nodes()] + if is_directed: + in_degrees = dict(G.in_degree(weight="weight")) + out_degrees = dict(G.out_degree(weight="weight")) + Stot_in = list(in_degrees.values()) + Stot_out = list(out_degrees.values()) + # Calculate weights for both in and out neighbours without considering self-loops + nbrs = {} + for u in G: + nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + else: + degrees = dict(G.degree(weight="weight")) + Stot = list(degrees.values()) + nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G} + rand_nodes = list(G.nodes) + seed.shuffle(rand_nodes) + nb_moves = 1 + improvement = False + while nb_moves > 0: + nb_moves = 0 + for u in rand_nodes: + best_mod = 0 + best_com = node2com[u] + weights2com = _neighbor_weights(nbrs[u], node2com) + if is_directed: + in_degree = in_degrees[u] + out_degree = out_degrees[u] + Stot_in[best_com] -= in_degree + Stot_out[best_com] -= out_degree + remove_cost = ( + -weights2com[best_com] / m + + resolution + * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com]) + / m**2 + ) + else: + degree = degrees[u] + Stot[best_com] -= degree + remove_cost = -weights2com[best_com] / m + resolution * ( + Stot[best_com] * degree + ) / (2 * m**2) + for nbr_com, wt in weights2com.items(): + if is_directed: + gain = ( + remove_cost + + wt / m + - resolution + * ( + out_degree * Stot_in[nbr_com] + + in_degree * Stot_out[nbr_com] + ) + / m**2 + ) + else: + gain = ( + remove_cost + + wt / m + - resolution * (Stot[nbr_com] * degree) / (2 * m**2) + ) + if gain > best_mod: + best_mod = gain + best_com = nbr_com + if is_directed: + Stot_in[best_com] += in_degree + Stot_out[best_com] += out_degree + else: + Stot[best_com] += degree + if best_com != node2com[u]: + com = G.nodes[u].get("nodes", {u}) + partition[node2com[u]].difference_update(com) + inner_partition[node2com[u]].remove(u) + partition[best_com].update(com) + inner_partition[best_com].add(u) + improvement = True + nb_moves += 1 + node2com[u] = best_com + partition = list(filter(len, partition)) + inner_partition = list(filter(len, inner_partition)) + return partition, inner_partition, improvement + + +def _neighbor_weights(nbrs, node2com): + """Calculate weights between node and its neighbor communities. + + Parameters + ---------- + nbrs : dictionary + Dictionary with nodes' neighbours as keys and their edge weight as value. + node2com : dictionary + Dictionary with all graph's nodes as keys and their community index as value. + + """ + weights = defaultdict(float) + for nbr, wt in nbrs.items(): + weights[node2com[nbr]] += wt + return weights + + +def _gen_graph(G, partition): + """Generate a new graph based on the partitions of a given graph""" + H = G.__class__() + node2com = {} + for i, part in enumerate(partition): + nodes = set() + for node in part: + node2com[node] = i + nodes.update(G.nodes[node].get("nodes", {node})) + H.add_node(i, nodes=nodes) + + for node1, node2, wt in G.edges(data=True): + wt = wt["weight"] + com1 = node2com[node1] + com2 = node2com[node2] + temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"] + H.add_edge(com1, com2, weight=wt + temp) + return H + + +def _convert_multigraph(G, weight, is_directed): + """Convert a Multigraph to normal Graph""" + if is_directed: + H = nx.DiGraph() + else: + H = nx.Graph() + H.add_nodes_from(G) + for u, v, wt in G.edges(data=weight, default=1): + if H.has_edge(u, v): + H[u][v]["weight"] += wt + else: + H.add_edge(u, v, weight=wt) + return H diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/lukes.py b/phivenv/Lib/site-packages/networkx/algorithms/community/lukes.py new file mode 100644 index 0000000000000000000000000000000000000000..600a4db63d6d572dcebbe9d99738d3fecb6c5896 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/lukes.py @@ -0,0 +1,226 @@ +"""Lukes Algorithm for exact optimal weighted tree partitioning.""" + +from copy import deepcopy +from functools import lru_cache +from random import choice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["lukes_partitioning"] + +D_EDGE_W = "weight" +D_EDGE_VALUE = 1.0 +D_NODE_W = "weight" +D_NODE_VALUE = 1 +PKEY = "partitions" +CLUSTER_EVAL_CACHE_SIZE = 2048 + + +def _split_n_from(n, min_size_of_first_part): + # splits j in two parts of which the first is at least + # the second argument + assert n >= min_size_of_first_part + for p1 in range(min_size_of_first_part, n + 1): + yield p1, n - p1 + + +@nx._dispatch(node_attrs="node_weight", edge_attrs="edge_weight") +def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None): + """Optimal partitioning of a weighted tree using the Lukes algorithm. + + This algorithm partitions a connected, acyclic graph featuring integer + node weights and float edge weights. The resulting clusters are such + that the total weight of the nodes in each cluster does not exceed + max_size and that the weight of the edges that are cut by the partition + is minimum. The algorithm is based on [1]_. + + Parameters + ---------- + G : NetworkX graph + + max_size : int + Maximum weight a partition can have in terms of sum of + node_weight for all nodes in the partition + + edge_weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + node_weight : key + Node data key to use as weight. If None, the weights are all + set to one. The data must be int. + + Returns + ------- + partition : list + A list of sets of nodes representing the clusters of the + partition. + + Raises + ------ + NotATree + If G is not a tree. + TypeError + If any of the values of node_weight is not int. + + References + ---------- + .. [1] Lukes, J. A. (1974). + "Efficient Algorithm for the Partitioning of Trees." + IBM Journal of Research and Development, 18(3), 217–224. + + """ + # First sanity check and tree preparation + if not nx.is_tree(G): + raise nx.NotATree("lukes_partitioning works only on trees") + else: + if nx.is_directed(G): + root = [n for n, d in G.in_degree() if d == 0] + assert len(root) == 1 + root = root[0] + t_G = deepcopy(G) + else: + root = choice(list(G.nodes)) + # this has the desirable side effect of not inheriting attributes + t_G = nx.dfs_tree(G, root) + + # Since we do not want to screw up the original graph, + # if we have a blank attribute, we make a deepcopy + if edge_weight is None or node_weight is None: + safe_G = deepcopy(G) + if edge_weight is None: + nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) + edge_weight = D_EDGE_W + if node_weight is None: + nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) + node_weight = D_NODE_W + else: + safe_G = G + + # Second sanity check + # The values of node_weight MUST BE int. + # I cannot see any room for duck typing without incurring serious + # danger of subtle bugs. + all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() + for x in all_n_attr: + if not isinstance(x, int): + raise TypeError( + "lukes_partitioning needs integer " + f"values for node_weight ({node_weight})" + ) + + # SUBROUTINES ----------------------- + # these functions are defined here for two reasons: + # - brevity: we can leverage global "safe_G" + # - caching: signatures are hashable + + @not_implemented_for("undirected") + # this is intended to be called only on t_G + def _leaves(gr): + for x in gr.nodes: + if not nx.descendants(gr, x): + yield x + + @not_implemented_for("undirected") + def _a_parent_of_leaves_only(gr): + tleaves = set(_leaves(gr)) + for n in set(gr.nodes) - tleaves: + if all(x in tleaves for x in nx.descendants(gr, n)): + return n + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _value_of_cluster(cluster): + valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster] + return sum(safe_G.edges[e][edge_weight] for e in valid_edges) + + def _value_of_partition(partition): + return sum(_value_of_cluster(frozenset(c)) for c in partition) + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _weight_of_cluster(cluster): + return sum(safe_G.nodes[n][node_weight] for n in cluster) + + def _pivot(partition, node): + ccx = [c for c in partition if node in c] + assert len(ccx) == 1 + return ccx[0] + + def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weight): + ccx = _pivot(partition_1, x) + cci = _pivot(partition_2, i) + merged_xi = ccx.union(cci) + + # We first check if we can do the merge. + # If so, we do the actual calculations, otherwise we concatenate + if _weight_of_cluster(frozenset(merged_xi)) <= ref_weight: + cp1 = list(filter(lambda x: x != ccx, partition_1)) + cp2 = list(filter(lambda x: x != cci, partition_2)) + + option_2 = [merged_xi] + cp1 + cp2 + return option_2, _value_of_partition(option_2) + else: + option_1 = partition_1 + partition_2 + return option_1, _value_of_partition(option_1) + + # INITIALIZATION ----------------------- + leaves = set(_leaves(t_G)) + for lv in leaves: + t_G.nodes[lv][PKEY] = {} + slot = safe_G.nodes[lv][node_weight] + t_G.nodes[lv][PKEY][slot] = [{lv}] + t_G.nodes[lv][PKEY][0] = [{lv}] + + for inner in [x for x in t_G.nodes if x not in leaves]: + t_G.nodes[inner][PKEY] = {} + slot = safe_G.nodes[inner][node_weight] + t_G.nodes[inner][PKEY][slot] = [{inner}] + + # CORE ALGORITHM ----------------------- + while True: + x_node = _a_parent_of_leaves_only(t_G) + weight_of_x = safe_G.nodes[x_node][node_weight] + best_value = 0 + best_partition = None + bp_buffer = {} + x_descendants = nx.descendants(t_G, x_node) + for i_node in x_descendants: + for j in range(weight_of_x, max_size + 1): + for a, b in _split_n_from(j, weight_of_x): + if ( + a not in t_G.nodes[x_node][PKEY] + or b not in t_G.nodes[i_node][PKEY] + ): + # it's not possible to form this particular weight sum + continue + + part1 = t_G.nodes[x_node][PKEY][a] + part2 = t_G.nodes[i_node][PKEY][b] + part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j) + + if j not in bp_buffer or bp_buffer[j][1] < value: + # we annotate in the buffer the best partition for j + bp_buffer[j] = part, value + + # we also keep track of the overall best partition + if best_value <= value: + best_value = value + best_partition = part + + # as illustrated in Lukes, once we finished a child, we can + # discharge the partitions we found into the graph + # (the key phrase is make all x == x') + # so that they are used by the subsequent children + for w, (best_part_for_vl, vl) in bp_buffer.items(): + t_G.nodes[x_node][PKEY][w] = best_part_for_vl + bp_buffer.clear() + + # the absolute best partition for this node + # across all weights has to be stored at 0 + t_G.nodes[x_node][PKEY][0] = best_partition + t_G.remove_nodes_from(x_descendants) + + if x_node == root: + # the 0-labeled partition of root + # is the optimal one for the whole tree + return t_G.nodes[root][PKEY][0] diff --git a/phivenv/Lib/site-packages/networkx/algorithms/community/modularity_max.py b/phivenv/Lib/site-packages/networkx/algorithms/community/modularity_max.py new file mode 100644 index 0000000000000000000000000000000000000000..aba3267c33e8dcba06e1fa6ed95d5ec0b6b45dfe --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/community/modularity_max.py @@ -0,0 +1,448 @@ +"""Functions for detecting communities based on modularity.""" + +from collections import defaultdict + +import networkx as nx +from networkx.algorithms.community.quality import modularity +from networkx.utils import not_implemented_for +from networkx.utils.mapped_queue import MappedQueue + +__all__ = [ + "greedy_modularity_communities", + "naive_greedy_modularity_communities", +] + + +def _greedy_modularity_communities_generator(G, weight=None, resolution=1): + r"""Yield community partitions of G and the modularity change at each step. + + This function performs Clauset-Newman-Moore greedy modularity maximization [2]_ + At each step of the process it yields the change in modularity that will occur in + the next step followed by yielding the new community partition after that step. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until one community contains all nodes (the partition has one set). + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Yields + ------ + Alternating yield statements produce the following two objects: + + communities: dict_values + A dict_values of frozensets of nodes, one for each community. + This represents a partition of the nodes of the graph into communities. + The first yield is the partition with each node in its own community. + + dq: float + The change in modularity when merging the next two communities + that leads to the largest modularity. + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + directed = G.is_directed() + N = G.number_of_nodes() + + # Count edges (or the sum of edge-weights for weighted graphs) + m = G.size(weight) + q0 = 1 / m + + # Calculate degrees (notation from the papers) + # a : the fraction of (weighted) out-degree for each node + # b : the fraction of (weighted) in-degree for each node + if directed: + a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)} + b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)} + else: + a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)} + + # this preliminary step collects the edge weights for each node pair + # It handles multigraph and digraph and works fine for graph. + dq_dict = defaultdict(lambda: defaultdict(float)) + for u, v, wt in G.edges(data=weight, default=1): + if u == v: + continue + dq_dict[u][v] += wt + dq_dict[v][u] += wt + + # now scale and subtract the expected edge-weights term + for u, nbrdict in dq_dict.items(): + for v, wt in nbrdict.items(): + dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v]) + + # Use -dq to get a max_heap instead of a min_heap + # dq_heap holds a heap for each node's neighbors + dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G} + # H -> all_dq_heap holds a heap with the best items for each node + H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0]) + + # Initialize single-node communities + communities = {n: frozenset([n]) for n in G} + yield communities.values() + + # Merge the two communities that lead to the largest modularity + while len(H) > 1: + # Find best merge + # Remove from heap of row maxes + # Ties will be broken by choosing the pair with lowest min community id + try: + negdq, u, v = H.pop() + except IndexError: + break + dq = -negdq + yield dq + # Remove best merge from row u heap + dq_heap[u].pop() + # Push new row max onto H + if len(dq_heap[u]) > 0: + H.push(dq_heap[u].heap[0]) + # If this element was also at the root of row v, we need to remove the + # duplicate entry from H + if dq_heap[v].heap[0] == (v, u): + H.remove((v, u)) + # Remove best merge from row v heap + dq_heap[v].remove((v, u)) + # Push new row max onto H + if len(dq_heap[v]) > 0: + H.push(dq_heap[v].heap[0]) + else: + # Duplicate wasn't in H, just remove from row v heap + dq_heap[v].remove((v, u)) + + # Perform merge + communities[v] = frozenset(communities[u] | communities[v]) + del communities[u] + + # Get neighbor communities connected to the merged communities + u_nbrs = set(dq_dict[u]) + v_nbrs = set(dq_dict[v]) + all_nbrs = (u_nbrs | v_nbrs) - {u, v} + both_nbrs = u_nbrs & v_nbrs + # Update dq for merge of u into v + for w in all_nbrs: + # Calculate new dq value + if w in both_nbrs: + dq_vw = dq_dict[v][w] + dq_dict[u][w] + elif w in v_nbrs: + dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u]) + else: # w in u_nbrs + dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v]) + # Update rows v and w + for row, col in [(v, w), (w, v)]: + dq_heap_row = dq_heap[row] + # Update dict for v,w only (u is removed below) + dq_dict[row][col] = dq_vw + # Save old max of per-row heap + if len(dq_heap_row) > 0: + d_oldmax = dq_heap_row.heap[0] + else: + d_oldmax = None + # Add/update heaps + d = (row, col) + d_negdq = -dq_vw + # Save old value for finding heap index + if w in v_nbrs: + # Update existing element in per-row heap + dq_heap_row.update(d, d, priority=d_negdq) + else: + # We're creating a new nonzero element, add to heap + dq_heap_row.push(d, priority=d_negdq) + # Update heap of row maxes if necessary + if d_oldmax is None: + # No entries previously in this row, push new max + H.push(d, priority=d_negdq) + else: + # We've updated an entry in this row, has the max changed? + row_max = dq_heap_row.heap[0] + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + H.update(d_oldmax, row_max) + + # Remove row/col u from dq_dict matrix + for w in dq_dict[u]: + # Remove from dict + dq_old = dq_dict[w][u] + del dq_dict[w][u] + # Remove from heaps if we haven't already + if w != v: + # Remove both row and column + for row, col in [(w, u), (u, w)]: + dq_heap_row = dq_heap[row] + # Check if replaced dq is row max + d_old = (row, col) + if dq_heap_row.heap[0] == d_old: + # Update per-row heap and heap of row maxes + dq_heap_row.remove(d_old) + H.remove(d_old) + # Update row max + if len(dq_heap_row) > 0: + H.push(dq_heap_row.heap[0]) + else: + # Only update per-row heap + dq_heap_row.remove(d_old) + + del dq_dict[u] + # Mark row u as deleted, but keep placeholder + dq_heap[u] = MappedQueue() + # Merge u into v and update a + a[v] += a[u] + a[u] = 0 + if directed: + b[v] += b[u] + b[u] = 0 + + yield communities.values() + + +@nx._dispatch(edge_attrs="weight") +def greedy_modularity_communities( + G, + weight=None, + resolution=1, + cutoff=1, + best_n=None, +): + r"""Find communities in G using greedy modularity maximization. + + This function uses Clauset-Newman-Moore greedy modularity maximization [2]_ + to find the community partition with the largest modularity. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until no further increase in modularity is possible (a maximum). + Two keyword arguments adjust the stopping condition. `cutoff` is a lower + limit on the number of communities so you can stop the process before + reaching a maximum (used to save computation time). `best_n` is an upper + limit on the number of communities so you can make the process continue + until at most n communities remain even if the maximum modularity occurs + for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float, optional (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + cutoff : int, optional (default=1) + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + best_n : int or None, optional (default=None) + A maximum number of communities above which the merging process will + not stop. This forces community merging to continue after modularity + starts to decrease until `best_n` communities remain. + If ``None``, don't force it to continue beyond a maximum. + + Raises + ------ + ValueError : If the `cutoff` or `best_n` value is not in the range + ``[1, G.number_of_nodes()]``, or if `best_n` < `cutoff`. + + Returns + ------- + communities: list + A list of frozensets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + if (cutoff < 1) or (cutoff > G.number_of_nodes()): + raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.") + if best_n is not None: + if (best_n < 1) or (best_n > G.number_of_nodes()): + raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.") + if best_n < cutoff: + raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}") + if best_n == 1: + return [set(G)] + else: + best_n = G.number_of_nodes() + + # retrieve generator object to construct output + community_gen = _greedy_modularity_communities_generator( + G, weight=weight, resolution=resolution + ) + + # construct the first best community + communities = next(community_gen) + + # continue merging communities until one of the breaking criteria is satisfied + while len(communities) > cutoff: + try: + dq = next(community_gen) + # StopIteration occurs when communities are the connected components + except StopIteration: + communities = sorted(communities, key=len, reverse=True) + # if best_n requires more merging, merge big sets for highest modularity + while len(communities) > best_n: + comm1, comm2, *rest = communities + communities = [comm1 ^ comm2] + communities.extend(rest) + return communities + + # keep going unless max_mod is reached or best_n says to merge more + if dq < 0 and len(communities) <= best_n: + break + communities = next(community_gen) + + return sorted(communities, key=len, reverse=True) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def naive_greedy_modularity_communities(G, resolution=1, weight=None): + r"""Find communities in G using greedy modularity maximization. + + This implementation is O(n^4), much slower than alternatives, but it is + provided as an easy-to-understand reference implementation. + + Greedy modularity maximization begins with each node in its own community + and joins the pair of communities that most increases modularity until no + such pair exists. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + list + A list of sets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.naive_greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + greedy_modularity_communities + modularity + """ + # First create one community for each node + communities = [frozenset([u]) for u in G.nodes()] + # Track merges + merges = [] + # Greedily merge communities until no improvement is possible + old_modularity = None + new_modularity = modularity(G, communities, resolution=resolution, weight=weight) + while old_modularity is None or new_modularity > old_modularity: + # Save modularity for comparison + old_modularity = new_modularity + # Find best pair to merge + trial_communities = list(communities) + to_merge = None + for i, u in enumerate(communities): + for j, v in enumerate(communities): + # Skip i==j and empty communities + if j <= i or len(u) == 0 or len(v) == 0: + continue + # Merge communities u and v + trial_communities[j] = u | v + trial_communities[i] = frozenset([]) + trial_modularity = modularity( + G, trial_communities, resolution=resolution, weight=weight + ) + if trial_modularity >= new_modularity: + # Check if strictly better or tie + if trial_modularity > new_modularity: + # Found new best, save modularity and group indexes + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): + # Break ties by choosing pair with lowest min id + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + # Un-merge + trial_communities[i] = u + trial_communities[j] = v + if to_merge is not None: + # If the best merge improves modularity, use it + merges.append(to_merge) + i, j, dq = to_merge + u, v = communities[i], communities[j] + communities[j] = u | v + communities[i] = frozenset([]) + # Remove empty communities and sort + return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/core.py b/phivenv/Lib/site-packages/networkx/algorithms/core.py new file mode 100644 index 0000000000000000000000000000000000000000..09a1275c794b96529439499e872dbcf5dbd04d44 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/core.py @@ -0,0 +1,545 @@ +""" +Find the k-cores of a graph. + +The k-core is found by recursively pruning nodes with degrees less than k. + +See the following references for details: + +An O(m) Algorithm for Cores Decomposition of Networks +Vladimir Batagelj and Matjaz Zaversnik, 2003. +https://arxiv.org/abs/cs.DS/0310049 + +Generalized Cores +Vladimir Batagelj and Matjaz Zaversnik, 2002. +https://arxiv.org/pdf/cs/0202039 + +For directed graphs a more general notion is that of D-cores which +looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core +is the k-core. + +D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy +Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011. +http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf + +Multi-scale structure and topological anomaly detection via a new network \ +statistic: The onion decomposition +L. Hébert-Dufresne, J. A. Grochow, and A. Allard +Scientific Reports 6, 31708 (2016) +http://doi.org/10.1038/srep31708 + +""" +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = [ + "core_number", + "k_core", + "k_shell", + "k_crust", + "k_corona", + "k_truss", + "onion_layers", +] + + +@not_implemented_for("multigraph") +@nx._dispatch +def core_number(G): + """Returns the core number for each vertex. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + The core number of a node is the largest value k of a k-core containing + that node. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + + Returns + ------- + core_number : dictionary + A dictionary keyed by node to the core number. + + Raises + ------ + NetworkXError + The k-core is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + degrees = dict(G.degree()) + # Sort nodes by degree. + nodes = sorted(degrees, key=degrees.get) + bin_boundaries = [0] + curr_degree = 0 + for i, v in enumerate(nodes): + if degrees[v] > curr_degree: + bin_boundaries.extend([i] * (degrees[v] - curr_degree)) + curr_degree = degrees[v] + node_pos = {v: pos for pos, v in enumerate(nodes)} + # The initial guess for the core number of a node is its degree. + core = degrees + nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} + for v in nodes: + for u in nbrs[v]: + if core[u] > core[v]: + nbrs[u].remove(v) + pos = node_pos[u] + bin_start = bin_boundaries[core[u]] + node_pos[u] = bin_start + node_pos[nodes[bin_start]] = pos + nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start] + bin_boundaries[core[u]] += 1 + core[u] -= 1 + return core + + +def _core_subgraph(G, k_filter, k=None, core=None): + """Returns the subgraph induced by nodes passing filter `k_filter`. + + Parameters + ---------- + G : NetworkX graph + The graph or directed graph to process + k_filter : filter function + This function filters the nodes chosen. It takes three inputs: + A node of G, the filter's cutoff, and the core dict of the graph. + The function should return a Boolean value. + k : int, optional + The order of the core. If not specified use the max core number. + This value is used as the cutoff for the filter. + core : dict, optional + Precomputed core numbers keyed by node for the graph `G`. + If not specified, the core numbers will be computed from `G`. + + """ + if core is None: + core = core_number(G) + if k is None: + k = max(core.values()) + nodes = (v for v in core if k_filter(v, k, core)) + return G.subgraph(nodes).copy() + + +@nx._dispatch(preserve_all_attrs=True) +def k_core(G, k=None, core_number=None): + """Returns the k-core of G. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int, optional + The order of the core. If not specified return the main core. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-core subgraph + + Raises + ------ + NetworkXError + The k-core is not defined for graphs with self loops or parallel edges. + + Notes + ----- + The main core is the core with the largest degree. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + + def k_filter(v, k, c): + return c[v] >= k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx._dispatch(preserve_all_attrs=True) +def k_shell(G, k=None, core_number=None): + """Returns the k-shell of G. + + The k-shell is the subgraph induced by nodes with core number k. + That is, nodes in the k-core that are not in the (k+1)-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the outer shell. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + + Returns + ------- + G : NetworkX graph + The k-shell subgraph + + Raises + ------ + NetworkXError + The k-shell is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This is similar to k_corona but in that case only neighbors in the + k-core are considered. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + k_corona + + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + + def k_filter(v, k, c): + return c[v] == k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx._dispatch(preserve_all_attrs=True) +def k_crust(G, k=None, core_number=None): + """Returns the k-crust of G. + + The k-crust is the graph G with the edges of the k-core removed + and isolated nodes found after the removal of edges are also removed. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the main crust. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-crust subgraph + + Raises + ------ + NetworkXError + The k-crust is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This definition of k-crust is different than the definition in [1]_. + The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + # Default for k is one less than in _core_subgraph, so just inline. + # Filter is c[v] <= k + if core_number is None: + core_number = nx.core_number(G) + if k is None: + k = max(core_number.values()) - 1 + nodes = (v for v in core_number if core_number[v] <= k) + return G.subgraph(nodes).copy() + + +@nx._dispatch(preserve_all_attrs=True) +def k_corona(G, k, core_number=None): + """Returns the k-corona of G. + + The k-corona is the subgraph of nodes in the k-core which have + exactly k neighbours in the k-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int + The order of the corona. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-corona subgraph + + Raises + ------ + NetworkXError + The k-corona is not defined for graphs with self loops or + parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] k -core (bootstrap) percolation on complex networks: + Critical phenomena and nonlocal effects, + A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes, + Phys. Rev. E 73, 056101 (2006) + http://link.aps.org/doi/10.1103/PhysRevE.73.056101 + """ + + def func(v, k, c): + return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k) + + return _core_subgraph(G, func, k, core_number) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(preserve_all_attrs=True) +def k_truss(G, k): + """Returns the k-truss of `G`. + + The k-truss is the maximal induced subgraph of `G` which contains at least + three vertices where every edge is incident to at least `k-2` triangles. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + k : int + The order of the truss + + Returns + ------- + H : NetworkX graph + The k-truss subgraph + + Raises + ------ + NetworkXError + + The k-truss is not defined for graphs with self loops, directed graphs + and multigraphs. + + Notes + ----- + A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core. + + Not implemented for digraphs or graphs with parallel edges or self loops. + + Graph, node, and edge attributes are copied to the subgraph. + + K-trusses were originally defined in [2] which states that the k-truss + is the maximal induced subgraph where each edge belongs to at least + `k-2` triangles. A more recent paper, [1], uses a slightly different + definition requiring that each edge belong to at least `k` triangles. + This implementation uses the original definition of `k-2` triangles. + + References + ---------- + .. [1] Bounds and Algorithms for k-truss. Paul Burkhardt, Vance Faber, + David G. Harris, 2018. https://arxiv.org/abs/1806.05523v2 + .. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan + Cohen, 2005. + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + + H = G.copy() + + n_dropped = 1 + while n_dropped > 0: + n_dropped = 0 + to_drop = [] + seen = set() + for u in H: + nbrs_u = set(H[u]) + seen.add(u) + new_nbrs = [v for v in nbrs_u if v not in seen] + for v in new_nbrs: + if len(nbrs_u & set(H[v])) < (k - 2): + to_drop.append((u, v)) + H.remove_edges_from(to_drop) + n_dropped = len(to_drop) + H.remove_nodes_from(list(nx.isolates(H))) + + return H + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def onion_layers(G): + """Returns the layer of each vertex in an onion decomposition of the graph. + + The onion decomposition refines the k-core decomposition by providing + information on the internal organization of each k-shell. It is usually + used alongside the `core numbers`. + + Parameters + ---------- + G : NetworkX graph + A simple graph without self loops or parallel edges + + Returns + ------- + od_layers : dictionary + A dictionary keyed by vertex to the onion layer. The layers are + contiguous integers starting at 1. + + Raises + ------ + NetworkXError + The onion decomposition is not implemented for graphs with self loops + or parallel edges or for directed graphs. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + Not implemented for directed graphs. + + See Also + -------- + core_number + + References + ---------- + .. [1] Multi-scale structure and topological anomaly detection via a new + network statistic: The onion decomposition + L. Hébert-Dufresne, J. A. Grochow, and A. Allard + Scientific Reports 6, 31708 (2016) + http://doi.org/10.1038/srep31708 + .. [2] Percolation and the effective structure of complex networks + A. Allard and L. Hébert-Dufresne + Physical Review X 9, 011023 (2019) + http://doi.org/10.1103/PhysRevX.9.011023 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph contains self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + # Dictionaries to register the k-core/onion decompositions. + od_layers = {} + # Adjacency list + neighbors = {v: list(nx.all_neighbors(G, v)) for v in G} + # Effective degree of nodes. + degrees = dict(G.degree()) + # Performs the onion decomposition. + current_core = 1 + current_layer = 1 + # Sets vertices of degree 0 to layer 1, if any. + isolated_nodes = list(nx.isolates(G)) + if len(isolated_nodes) > 0: + for v in isolated_nodes: + od_layers[v] = current_layer + degrees.pop(v) + current_layer = 2 + # Finds the layer for the remaining nodes. + while len(degrees) > 0: + # Sets the order for looking at nodes. + nodes = sorted(degrees, key=degrees.get) + # Sets properly the current core. + min_degree = degrees[nodes[0]] + if min_degree > current_core: + current_core = min_degree + # Identifies vertices in the current layer. + this_layer = [] + for n in nodes: + if degrees[n] > current_core: + break + this_layer.append(n) + # Identifies the core/layer of the vertices in the current layer. + for v in this_layer: + od_layers[v] = current_layer + for n in neighbors[v]: + neighbors[n].remove(v) + degrees[n] = degrees[n] - 1 + degrees.pop(v) + # Updates the layer count. + current_layer = current_layer + 1 + # Returns the dictionaries containing the onion layer of each vertices. + return od_layers diff --git a/phivenv/Lib/site-packages/networkx/algorithms/covering.py b/phivenv/Lib/site-packages/networkx/algorithms/covering.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b44985aa42661f843eb7dfa1f055f53a39fc83 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/covering.py @@ -0,0 +1,142 @@ +""" Functions related to graph covers.""" + +from functools import partial +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = ["min_edge_cover", "is_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def min_edge_cover(G, matching_algorithm=None): + """Returns the min cardinality edge cover of the graph as a set of edges. + + A smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. This function follows that process. A maximum matching + algorithm can be specified for the first step of the algorithm. + The resulting set may return a set with one 2-tuple for each edge, + (the usual case) or with both 2-tuples `(u, v)` and `(v, u)` for + each edge. The latter is only done when a bipartite matching algorithm + is specified as `matching_algorithm`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching for `G`. + The function must take one input, the graph `G`, and return + either a set of edges (with only one direction for the pair of nodes) + or a dictionary mapping each node to its mate. If not specified, + :func:`~networkx.algorithms.matching.max_weight_matching` is used. + Common bipartite matching functions include + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + or + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`. + + Returns + ------- + min_cover : set + + A set of the edges in a minimum edge cover in the form of tuples. + It contains only one of the equivalent 2-tuples `(u, v)` and `(v, u)` + for each edge. If a bipartite method is used to compute the matching, + the returned set contains both the 2-tuples `(u, v)` and `(v, u)` + for each edge of a minimum edge cover. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> sorted(nx.min_edge_cover(G)) + [(2, 1), (3, 0)] + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + The minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + + Minimum edge cover for `G` can also be found using the `min_edge_covering` + function in :mod:`networkx.algorithms.bipartite.covering` which is + simply this function with a default matching algorithm of + :func:`~networkx.algorithms.bipartite.matching.hopcraft_karp_matching` + """ + if len(G) == 0: + return set() + if nx.number_of_isolates(G) > 0: + # ``min_cover`` does not exist as there is an isolated node + raise nx.NetworkXException( + "Graph has a node with no edge incident on it, " "so no edge cover exists." + ) + if matching_algorithm is None: + matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True) + maximum_matching = matching_algorithm(G) + # ``min_cover`` is superset of ``maximum_matching`` + try: + # bipartite matching algs return dict so convert if needed + min_cover = set(maximum_matching.items()) + bipartite_cover = True + except AttributeError: + min_cover = maximum_matching + bipartite_cover = False + # iterate for uncovered nodes + uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover} + for v in uncovered_nodes: + # Since `v` is uncovered, each edge incident to `v` will join it + # with a covered node (otherwise, if there were an edge joining + # uncovered nodes `u` and `v`, the maximum matching algorithm + # would have found it), so we can choose an arbitrary edge + # incident to `v`. (This applies only in a simple graph, not a + # multigraph.) + u = arbitrary_element(G[v]) + min_cover.add((u, v)) + if bipartite_cover: + min_cover.add((v, u)) + return min_cover + + +@not_implemented_for("directed") +@nx._dispatch +def is_edge_cover(G, cover): + """Decides whether a set of edges is a valid edge cover of the graph. + + Given a set of edges, whether it is an edge covering can + be decided if we just check whether all nodes of the graph + has an edge from the set, incident on it. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + cover : set + Set of edges to be checked. + + Returns + ------- + bool + Whether the set of edges is a valid edge cover of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> cover = {(2, 1), (3, 0)} + >>> nx.is_edge_cover(G, cover) + True + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + """ + return set(G) <= set(chain.from_iterable(cover)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/cuts.py b/phivenv/Lib/site-packages/networkx/algorithms/cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..ce455eb47c86ad369abc2ce7e426cc21b5c7c4b1 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/cuts.py @@ -0,0 +1,400 @@ +"""Functions for finding and evaluating cuts in a graph. + +""" + +from itertools import chain + +import networkx as nx + +__all__ = [ + "boundary_expansion", + "conductance", + "cut_size", + "edge_expansion", + "mixing_expansion", + "node_expansion", + "normalized_cut_size", + "volume", +] + + +# TODO STILL NEED TO UPDATE ALL THE DOCUMENTATION! + + +@nx._dispatch(edge_attrs="weight") +def cut_size(G, S, T=None, weight=None): + """Returns the size of the cut between two sets of nodes. + + A *cut* is a partition of the nodes of a graph into two sets. The + *cut size* is the sum of the weights of the edges "between" the two + sets of nodes. + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. If not specified, this is taken to + be the set complement of `S`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + Total weight of all edges from nodes in set `S` to nodes in + set `T` (and, in the case of directed graphs, all edges from + nodes in `T` to nodes in `S`). + + Examples + -------- + In the graph with two cliques joined by a single edges, the natural + bipartition of the graph into two blocks, one for each clique, + yields a cut of weight one:: + + >>> G = nx.barbell_graph(3, 0) + >>> S = {0, 1, 2} + >>> T = {3, 4, 5} + >>> nx.cut_size(G, S, T) + 1 + + Each parallel edge in a multigraph is counted when determining the + cut size:: + + >>> G = nx.MultiGraph(["ab", "ab"]) + >>> S = {"a"} + >>> T = {"b"} + >>> nx.cut_size(G, S, T) + 2 + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + """ + edges = nx.edge_boundary(G, S, T, data=weight, default=1) + if G.is_directed(): + edges = chain(edges, nx.edge_boundary(G, T, S, data=weight, default=1)) + return sum(weight for u, v, weight in edges) + + +@nx._dispatch(edge_attrs="weight") +def volume(G, S, weight=None): + """Returns the volume of a set of nodes. + + The *volume* of a set *S* is the sum of the (out-)degrees of nodes + in *S* (taking into account parallel edges in multigraphs). [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The volume of the set of nodes represented by `S` in the graph + `G`. + + See also + -------- + conductance + cut_size + edge_expansion + edge_boundary + normalized_cut_size + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + degree = G.out_degree if G.is_directed() else G.degree + return sum(d for v, d in degree(S, weight=weight)) + + +@nx._dispatch(edge_attrs="weight") +def normalized_cut_size(G, S, T=None, weight=None): + """Returns the normalized size of the cut between two sets of nodes. + + The *normalized cut size* is the cut size times the sum of the + reciprocal sizes of the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The normalized cut size between the two sets `S` and `T`. + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + See also + -------- + conductance + cut_size + edge_expansion + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges * ((1 / volume_S) + (1 / volume_T)) + + +@nx._dispatch(edge_attrs="weight") +def conductance(G, S, T=None, weight=None): + """Returns the conductance of two sets of nodes. + + The *conductance* is the quotient of the cut size and the smaller of + the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The conductance between the two sets `S` and `T`. + + See also + -------- + cut_size + edge_expansion + normalized_cut_size + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges / min(volume_S, volume_T) + + +@nx._dispatch(edge_attrs="weight") +def edge_expansion(G, S, T=None, weight=None): + """Returns the edge expansion between two node sets. + + The *edge expansion* is the quotient of the cut size and the smaller + of the cardinalities of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The edge expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Fan Chung. + *Spectral Graph Theory*. + (CBMS Regional Conference Series in Mathematics, No. 92), + American Mathematical Society, 1997, ISBN 0-8218-0315-8 + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + return num_cut_edges / min(len(S), len(T)) + + +@nx._dispatch(edge_attrs="weight") +def mixing_expansion(G, S, T=None, weight=None): + """Returns the mixing expansion between two node sets. + + The *mixing expansion* is the quotient of the cut size and twice the + number of edges in the graph. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The mixing expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + edge_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + num_cut_edges = cut_size(G, S, T=T, weight=weight) + num_total_edges = G.number_of_edges() + return num_cut_edges / (2 * num_total_edges) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatch +def node_expansion(G, S): + """Returns the node expansion of the set `S`. + + The *node expansion* is the quotient of the size of the node + boundary of *S* and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The node expansion of the set `S`. + + See also + -------- + boundary_expansion + edge_expansion + mixing_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + neighborhood = set(chain.from_iterable(G.neighbors(v) for v in S)) + return len(neighborhood) / len(S) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatch +def boundary_expansion(G, S): + """Returns the boundary expansion of the set `S`. + + The *boundary expansion* is the quotient of the size + of the node boundary and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The boundary expansion of the set `S`. + + See also + -------- + edge_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends in Theoretical Computer Science* + 7.1–3 (2011): 1–336. + + + """ + return len(nx.node_boundary(G, S)) / len(S) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/cycles.py b/phivenv/Lib/site-packages/networkx/algorithms/cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..9149e9eb10dcf4dfd99cfeab80f90ede47a71d95 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/cycles.py @@ -0,0 +1,1230 @@ +""" +======================== +Cycle finding algorithms +======================== +""" + +from collections import Counter, defaultdict +from itertools import combinations, product +from math import inf + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "cycle_basis", + "simple_cycles", + "recursive_simple_cycles", + "find_cycle", + "minimum_cycle_basis", + "chordless_cycles", + "girth", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def cycle_basis(G, root=None): + """Returns a list of cycles which form a basis for cycles of G. + + A basis for cycles of a network is a minimal collection of + cycles such that any cycle in the network can be written + as a sum of cycles in the basis. Here summation of cycles + is defined as "exclusive or" of the edges. Cycle bases are + useful, e.g. when deriving equations for electric circuits + using Kirchhoff's Laws. + + Parameters + ---------- + G : NetworkX Graph + root : node, optional + Specify starting node for basis. + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.cycle_basis(G, 0) + [[3, 4, 5, 0], [1, 2, 3, 0]] + + Notes + ----- + This is adapted from algorithm CACM 491 [1]_. + + References + ---------- + .. [1] Paton, K. An algorithm for finding a fundamental set of + cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. + + See Also + -------- + simple_cycles + """ + gnodes = dict.fromkeys(G) # set-like object that maintains node order + cycles = [] + while gnodes: # loop over connected components + if root is None: + root = gnodes.popitem()[0] + stack = [root] + pred = {root: root} + used = {root: set()} + while stack: # walk the spanning tree finding cycles + z = stack.pop() # use last-in so cycles easier to find + zused = used[z] + for nbr in G[z]: + if nbr not in used: # new node + pred[nbr] = z + stack.append(nbr) + used[nbr] = {z} + elif nbr == z: # self loops + cycles.append([z]) + elif nbr not in zused: # found a cycle + pn = used[nbr] + cycle = [nbr, z] + p = pred[z] + while p not in pn: + cycle.append(p) + p = pred[p] + cycle.append(p) + cycles.append(cycle) + used[nbr].add(z) + for node in pred: + gnodes.pop(node, None) + root = None + return cycles + + +@nx._dispatch +def simple_cycles(G, length_bound=None): + """Find simple cycles (elementary circuits) of a graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. In a directed graph, two simple cycles are distinct + if they are not cyclic permutations of each other. In an undirected graph, + two simple cycles are distinct if they are not cyclic permutations of each + other nor of the other's reversal. + + Optionally, the cycles are bounded in length. In the unbounded case, we use + a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In + the bounded case, we use a version of the algorithm of Gupta and + Suzumura[2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_. + + The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some + well-known preprocessing techniques. When G is directed, we restrict our + attention to strongly connected components of G, generate all simple cycles + containing a certain node, remove that node, and further decompose the + remainder into strongly connected components. When G is undirected, we + restrict our attention to biconnected components, generate all simple cycles + containing a particular edge, remove that edge, and further decompose the + remainder into biconnected components. + + Note that multigraphs are supported by this function -- and in undirected + multigraphs, a pair of parallel edges is considered a cycle of length 2. + Likewise, self-loops are considered to be cycles of length 1. We define + cycles as sequences of nodes; so the presence of loops and parallel edges + does not change the number of simple cycles in a graph. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None, optional (default=None) + If length_bound is an int, generate all simple cycles of G with length at + most length_bound. Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> sorted(nx.simple_cycles(G)) + [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + + To filter the cycles so that they don't include certain nodes or edges, + copy your graph and eliminate those nodes or edges before calling. + For example, to exclude self-loops from the above example: + + >>> H = G.copy() + >>> H.remove_edges_from(nx.selfloop_edges(G)) + >>> sorted(nx.simple_cycles(H)) + [[0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + When length_bound is None, the time complexity is $O((n+e)(c+1))$ for $n$ + nodes, $e$ edges and $c$ simple circuits. Otherwise, when length_bound > 1, + the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of + the nodes of G and $k$ = length_bound. + + Raises + ------ + ValueError + when length_bound < 0. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + .. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + .. [3] Enumerating the cycles of a digraph: a new preprocessing strategy. + G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. + .. [4] A search strategy for the elementary cycles of a directed graph. + J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, + v. 16, no. 2, 192-204, 1976. + .. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs + R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and + G. Sacomoto https://arxiv.org/abs/1205.2766 + + See Also + -------- + cycle_basis + chordless_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + if G.is_multigraph() and not directed: + visited = set() + for u, Gu in G.adj.items(): + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + yield from ([u, v] for v, m in multiplicity if m > 1) + visited.add(u) + + # explicitly filter out loops; implicitly filter out parallel edges + if directed: + G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + else: + G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + + # this case is not strictly necessary but improves performance + if length_bound is not None and length_bound == 2: + if directed: + visited = set() + for u, Gu in G.adj.items(): + yield from ( + [v, u] for v in visited.intersection(Gu) if G.has_edge(v, u) + ) + visited.add(u) + return + + if directed: + yield from _directed_cycle_search(G, length_bound) + else: + yield from _undirected_cycle_search(G, length_bound) + + +def _directed_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for directed graphs. + + We generate all cycles of G through binary partition. + + 1. Pick a node v in G which belongs to at least one cycle + a. Generate all cycles of G which contain the node v. + b. Recursively generate all cycles of G \\ v. + + This is accomplished through the following: + + 1. Compute the strongly connected components SCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each simple cycle P containing v in G[C], yield P. + 4. Add the biconnected components of G[C \\ v] to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to + simple cycles of length at most length_bound. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + scc = nx.strongly_connected_components + components = [c for c in scc(G) if len(c) >= 2] + while components: + c = components.pop() + Gc = G.subgraph(c) + v = next(iter(c)) + if length_bound is None: + yield from _johnson_cycle_search(Gc, [v]) + else: + yield from _bounded_cycle_search(Gc, [v], length_bound) + # delete v after searching G, to make sure we can find v + G.remove_node(v) + components.extend(c for c in scc(Gc) if len(c) >= 2) + + +def _undirected_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for undirected graphs. + + We generate all cycles of G through binary partition. + + 1. Pick an edge (u, v) in G which belongs to at least one cycle + a. Generate all cycles of G which contain the edge (u, v) + b. Recursively generate all cycles of G \\ (u, v) + + This is accomplished through the following: + + 1. Compute the biconnected components BCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P. + 4. Add the biconnected components of G[C] \\ (u, v) to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to simple paths + of length at most length_bound. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + bcc = nx.biconnected_components + components = [c for c in bcc(G) if len(c) >= 3] + while components: + c = components.pop() + Gc = G.subgraph(c) + uv = list(next(iter(Gc.edges))) + G.remove_edge(*uv) + # delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u] + if length_bound is None: + yield from _johnson_cycle_search(Gc, uv) + else: + yield from _bounded_cycle_search(Gc, uv, length_bound) + components.extend(c for c in bcc(Gc) if len(c) >= 3) + + +class _NeighborhoodCache(dict): + """Very lightweight graph wrapper which caches neighborhoods as list. + + This dict subclass uses the __missing__ functionality to query graphs for + their neighborhoods, and store the result as a list. This is used to avoid + the performance penalty incurred by subgraph views. + """ + + def __init__(self, G): + self.G = G + + def __missing__(self, v): + Gv = self[v] = list(self.G[v]) + return Gv + + +def _johnson_cycle_search(G, path): + """The main loop of the cycle-enumeration algorithm of Johnson. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + """ + + G = _NeighborhoodCache(G) + blocked = set(path) + B = defaultdict(set) # graph portions that yield no elementary circuit + start = path[0] + stack = [iter(G[path[-1]])] + closed = [False] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + closed[-1] = True + elif w not in blocked: + path.append(w) + closed.append(False) + stack.append(iter(G[w])) + blocked.add(w) + break + else: # no more nbrs + stack.pop() + v = path.pop() + if closed.pop(): + if closed: + closed[-1] = True + unblock_stack = {v} + while unblock_stack: + u = unblock_stack.pop() + if u in blocked: + blocked.remove(u) + unblock_stack.update(B[u]) + B[u].clear() + else: + for w in G[v]: + B[w].add(v) + + +def _bounded_cycle_search(G, path, length_bound): + """The main loop of the cycle-enumeration algorithm of Gupta and Suzumura. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound: int + A length bound. All cycles generated will have length at most length_bound. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + + """ + G = _NeighborhoodCache(G) + lock = {v: 0 for v in path} + B = defaultdict(set) + start = path[0] + stack = [iter(G[path[-1]])] + blen = [length_bound] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + blen[-1] = 1 + elif len(path) < lock.get(w, length_bound): + path.append(w) + blen.append(length_bound) + lock[w] = len(path) + stack.append(iter(G[w])) + break + else: + stack.pop() + v = path.pop() + bl = blen.pop() + if blen: + blen[-1] = min(blen[-1], bl) + if bl < length_bound: + relax_stack = [(bl, v)] + while relax_stack: + bl, u = relax_stack.pop() + if lock.get(u, length_bound) < length_bound - bl + 1: + lock[u] = length_bound - bl + 1 + relax_stack.extend((bl + 1, w) for w in B[u].difference(path)) + else: + for w in G[v]: + B[w].add(v) + + +@nx._dispatch +def chordless_cycles(G, length_bound=None): + """Find simple chordless cycles of a graph. + + A `simple cycle` is a closed path where no node appears twice. In a simple + cycle, a `chord` is an additional edge between two nodes in the cycle. A + `chordless cycle` is a simple cycle without chords. Said differently, a + chordless cycle is a cycle C in a graph G where the number of edges in the + induced graph G[C] is equal to the length of `C`. + + Note that some care must be taken in the case that G is not a simple graph + nor a simple digraph. Some authors limit the definition of chordless cycles + to have a prescribed minimum length; we do not. + + 1. We interpret self-loops to be chordless cycles, except in multigraphs + with multiple loops in parallel. Likewise, in a chordless cycle of + length greater than 1, there can be no nodes with self-loops. + + 2. We interpret directed two-cycles to be chordless cycles, except in + multi-digraphs when any edge in a two-cycle has a parallel copy. + + 3. We interpret parallel pairs of undirected edges as two-cycles, except + when a third (or more) parallel edge exists between the two nodes. + + 4. Generalizing the above, edges with parallel clones may not occur in + chordless cycles. + + In a directed graph, two chordless cycles are distinct if they are not + cyclic permutations of each other. In an undirected graph, two chordless + cycles are distinct if they are not cyclic permutations of each other nor of + the other's reversal. + + Optionally, the cycles are bounded in length. + + We use an algorithm strongly inspired by that of Dias et al [1]_. It has + been modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None, optional (default=None) + If length_bound is an int, generate all simple cycles of G with length at + most length_bound. Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> sorted(list(nx.chordless_cycles(nx.complete_graph(4)))) + [[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]] + + Notes + ----- + When length_bound is None, and the graph is simple, the time complexity is + $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles. + + Raises + ------ + ValueError + when length_bound < 0. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + See Also + -------- + simple_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + multigraph = G.is_multigraph() + + if multigraph: + yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1) + else: + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + # Nodes with loops cannot belong to longer cycles. Let's delete them here. + # also, we implicitly reduce the multiplicity of edges down to 1 in the case + # of multiedges. + if directed: + F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu) + B = F.to_undirected(as_view=False) + else: + F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu) + B = None + + # If we're given a multigraph, we have a few cases to consider with parallel + # edges. + # + # 1. If we have 2 or more edges in parallel between the nodes (u, v), we + # must not construct longer cycles along (u, v). + # 2. If G is not directed, then a pair of parallel edges between (u, v) is a + # chordless cycle unless there exists a third (or more) parallel edge. + # 3. If G is directed, then parallel edges do not form cycles, but do + # preclude back-edges from forming cycles (handled in the next section), + # Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also + # present, then we remove both from F. + # + # In directed graphs, we need to consider both directions that edges can + # take, so iterate over all edges (u, v) and possibly (v, u). In undirected + # graphs, we need to be a little careful to only consider every edge once, + # so we use a "visited" set to emulate node-order comparisons. + + if multigraph: + if not directed: + B = F.copy() + visited = set() + for u, Gu in G.adj.items(): + if directed: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items()) + for v, m in multiplicity: + if m > 1: + F.remove_edges_from(((u, v), (v, u))) + else: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + for v, m in multiplicity: + if m == 2: + yield [u, v] + if m > 1: + F.remove_edge(u, v) + visited.add(u) + + # If we're given a directed graphs, we need to think about digons. If we + # have two edges (u, v) and (v, u), then that's a two-cycle. If either edge + # was duplicated above, then we removed both from F. So, any digons we find + # here are chordless. After finding digons, we remove their edges from F + # to avoid traversing them in the search for chordless cycles. + if directed: + for u, Fu in F.adj.items(): + digons = [[u, v] for v in Fu if F.has_edge(v, u)] + yield from digons + F.remove_edges_from(digons) + F.remove_edges_from(e[::-1] for e in digons) + + if length_bound is not None and length_bound == 2: + return + + # Now, we prepare to search for cycles. We have removed all cycles of + # lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly + # separate digraphs into their strongly connected components, and undirected + # graphs into their biconnected components. For each component, we pick a + # node v, search for chordless cycles based at each "stem" (u, v, w), and + # then remove v from that component before separating the graph again. + if directed: + separate = nx.strongly_connected_components + + # Directed stems look like (u -> v -> w), so we use the product of + # predecessors of v with successors of v. + def stems(C, v): + for u, w in product(C.pred[v], C.succ[v]): + if not G.has_edge(u, w): # omit stems with acyclic chords + yield [u, v, w], F.has_edge(w, u) + + else: + separate = nx.biconnected_components + + # Undirected stems look like (u ~ v ~ w), but we must not also search + # (w ~ v ~ u), so we use combinations of v's neighbors of length 2. + def stems(C, v): + yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2)) + + components = [c for c in separate(F) if len(c) > 2] + while components: + c = components.pop() + v = next(iter(c)) + Fc = F.subgraph(c) + Fcc = Bcc = None + for S, is_triangle in stems(Fc, v): + if is_triangle: + yield S + else: + if Fcc is None: + Fcc = _NeighborhoodCache(Fc) + Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c)) + yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound) + + components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2) + + +def _chordless_cycle_search(F, B, path, length_bound): + """The main loop for chordless cycle enumeration. + + This algorithm is strongly inspired by that of Dias et al [1]_. It has been + modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + F : _NeighborhoodCache + A graph of forward edges to follow in constructing cycles + + B : _NeighborhoodCache + A graph of blocking edges to prevent the production of chordless cycles + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound : int + A length bound. All cycles generated will have length at most length_bound. + + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + """ + blocked = defaultdict(int) + target = path[0] + blocked[path[1]] = 1 + for w in path[1:]: + for v in B[w]: + blocked[v] += 1 + + stack = [iter(F[path[2]])] + while stack: + nbrs = stack[-1] + for w in nbrs: + if blocked[w] == 1 and (length_bound is None or len(path) < length_bound): + Fw = F[w] + if target in Fw: + yield path + [w] + else: + Bw = B[w] + if target in Bw: + continue + for v in Bw: + blocked[v] += 1 + path.append(w) + stack.append(iter(Fw)) + break + else: + stack.pop() + for v in B[path.pop()]: + blocked[v] -= 1 + + +@not_implemented_for("undirected") +@nx._dispatch +def recursive_simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. + + This version uses a recursive algorithm to build a list of cycles. + You should probably use the iterator version called simple_cycles(). + Warning: This recursive version uses lots of RAM! + It appears in NetworkX for pedagogical value. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of cycles, where each cycle is represented by a list of nodes + along the cycle. + + Example: + + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> nx.recursive_simple_cycles(G) + [[0], [2], [0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + See Also + -------- + simple_cycles, cycle_basis + """ + + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path[:]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + + # Johnson's algorithm exclude self cycle edges like (v, v) + # To be backward compatible, we record those cycles in advance + # and then remove from subG + for v in G: + if G.has_edge(v, v): + result.append([v]) + G.remove_edge(v, v) + + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering = dict(zip(G, range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns)) + component = G.subgraph(mincomp) + if len(component) > 1: + # smallest node in the component according to the ordering + startnode = min(component, key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy = circuit(startnode, startnode, component) + return result + + +@nx._dispatch +def find_cycle(G, source=None, orientation=None): + """Returns a cycle found via depth-first traversal. + + The cycle is a list of edges indicating the cyclic path. + Orientation of directed edges is controlled by `orientation`. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Returns + ------- + edges : directed edges + A list of directed edges indicating the path taken for the loop. + If no cycle is found, then an exception is raised. + For graphs, an edge is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, an edge is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Raises + ------ + NetworkXNoCycle + If no cycle was found. + + Examples + -------- + In this example, we construct a DAG and find, in the first call, that there + are no directed cycles, and so an exception is raised. In the second call, + we ignore edge orientations and find that there is an undirected cycle. + Note that the second call finds a directed cycle while effectively + traversing an undirected graph, and so, we found an "undirected cycle". + This means that this DAG structure does not form a directed tree (which + is also known as a polytree). + + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + >>> nx.find_cycle(G, orientation="original") + Traceback (most recent call last): + ... + networkx.exception.NetworkXNoCycle: No cycle found. + >>> list(nx.find_cycle(G, orientation="ignore")) + [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')] + + See Also + -------- + simple_cycles + """ + if not G.is_directed() or orientation in (None, "original"): + + def tailhead(edge): + return edge[:2] + + elif orientation == "reverse": + + def tailhead(edge): + return edge[1], edge[0] + + elif orientation == "ignore": + + def tailhead(edge): + if edge[-1] == "reverse": + return edge[1], edge[0] + return edge[:2] + + explored = set() + cycle = [] + final_node = None + for start_node in G.nbunch_iter(source): + if start_node in explored: + # No loop is possible. + continue + + edges = [] + # All nodes seen in this iteration of edge_dfs + seen = {start_node} + # Nodes in active path. + active_nodes = {start_node} + previous_head = None + + for edge in nx.edge_dfs(G, start_node, orientation): + # Determine if this edge is a continuation of the active path. + tail, head = tailhead(edge) + if head in explored: + # Then we've already explored it. No loop is possible. + continue + if previous_head is not None and tail != previous_head: + # This edge results from backtracking. + # Pop until we get a node whose head equals the current tail. + # So for example, we might have: + # (0, 1), (1, 2), (2, 3), (1, 4) + # which must become: + # (0, 1), (1, 4) + while True: + try: + popped_edge = edges.pop() + except IndexError: + edges = [] + active_nodes = {tail} + break + else: + popped_head = tailhead(popped_edge)[1] + active_nodes.remove(popped_head) + + if edges: + last_head = tailhead(edges[-1])[1] + if tail == last_head: + break + edges.append(edge) + + if head in active_nodes: + # We have a loop! + cycle.extend(edges) + final_node = head + break + else: + seen.add(head) + active_nodes.add(head) + previous_head = head + + if cycle: + break + else: + explored.update(seen) + + else: + assert len(cycle) == 0 + raise nx.exception.NetworkXNoCycle("No cycle found.") + + # We now have a list of edges which ends on a cycle. + # So we need to remove from the beginning edges that are not relevant. + + for i, edge in enumerate(cycle): + tail, head = tailhead(edge) + if tail == final_node: + break + + return cycle[i:] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def minimum_cycle_basis(G, weight=None): + """Returns a minimum weight cycle basis for G + + Minimum weight means a cycle basis for which the total weight + (length for unweighted graphs) of all the cycles is minimum. + + Parameters + ---------- + G : NetworkX Graph + weight: string + name of the edge attribute to use for edge weights + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. Note that the nodes are not + necessarily returned in a order by which they appear in the cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.minimum_cycle_basis(G) + [[5, 4, 3, 0], [3, 2, 1, 0]] + + References: + [1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for + Minimum Cycle Basis of Graphs." + http://link.springer.com/article/10.1007/s00453-007-9064-z + [2] de Pina, J. 1995. Applications of shortest path methods. + Ph.D. thesis, University of Amsterdam, Netherlands + + See Also + -------- + simple_cycles, cycle_basis + """ + # We first split the graph in connected subgraphs + return sum( + (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)), + [], + ) + + +def _min_cycle_basis(G, weight): + cb = [] + # We extract the edges not in a spanning tree. We do not really need a + # *minimum* spanning tree. That is why we call the next function with + # weight=None. Depending on implementation, it may be faster as well + tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False)) + chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges} + + # We maintain a set of vectors orthogonal to sofar found cycles + set_orth = [{edge} for edge in chords] + while set_orth: + base = set_orth.pop() + # kth cycle is "parallel" to kth vector in set_orth + cycle_edges = _min_cycle(G, base, weight) + cb.append([v for u, v in cycle_edges]) + + # now update set_orth so that k+1,k+2... th elements are + # orthogonal to the newly found cycle, as per [p. 336, 1] + set_orth = [ + ( + {e for e in orth if e not in base if e[::-1] not in base} + | {e for e in base if e not in orth if e[::-1] not in orth} + ) + if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2 + else orth + for orth in set_orth + ] + return cb + + +def _min_cycle(G, orth, weight): + """ + Computes the minimum weight cycle in G, + orthogonal to the vector orth as per [p. 338, 1] + Use (u, 1) to indicate the lifted copy of u (denoted u' in paper). + """ + Gi = nx.Graph() + + # Add 2 copies of each edge in G to Gi. + # If edge is in orth, add cross edge; otherwise in-plane edge + for u, v, wt in G.edges(data=weight, default=1): + if (u, v) in orth or (v, u) in orth: + Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt) + else: + Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt) + + # find the shortest length in Gi between n and (n, 1) for each n + # Note: Use "Gi_weight" for name of weight attribute + spl = nx.shortest_path_length + lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G} + + # Now compute that short path in Gi, which translates to a cycle in G + start = min(lift, key=lift.get) + end = (start, 1) + min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight") + + # Now we obtain the actual path, re-map nodes in Gi to those in G + min_path = [n if n in G else n[0] for n in min_path_i] + + # Now remove the edges that occur two times + # two passes: flag which edges get kept, then build it + edgelist = list(pairwise(min_path)) + edgeset = set() + for e in edgelist: + if e in edgeset: + edgeset.remove(e) + elif e[::-1] in edgeset: + edgeset.remove(e[::-1]) + else: + edgeset.add(e) + + min_edgelist = [] + for e in edgelist: + if e in edgeset: + min_edgelist.append(e) + edgeset.remove(e) + elif e[::-1] in edgeset: + min_edgelist.append(e[::-1]) + edgeset.remove(e[::-1]) + + return min_edgelist + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def girth(G): + """Returns the girth of the graph. + + The girth of a graph is the length of its shortest cycle, or infinity if + the graph is acyclic. The algorithm follows the description given on the + Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n + nodes. + + Parameters + ---------- + G : NetworkX Graph + + Returns + ------- + int or math.inf + + Examples + -------- + All examples below (except P_5) can easily be checked using Wikipedia, + which has a page for each of these famous graphs. + + >>> nx.girth(nx.chvatal_graph()) + 4 + >>> nx.girth(nx.tutte_graph()) + 4 + >>> nx.girth(nx.petersen_graph()) + 5 + >>> nx.girth(nx.heawood_graph()) + 6 + >>> nx.girth(nx.pappus_graph()) + 6 + >>> nx.girth(nx.path_graph(5)) + inf + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Girth_(graph_theory) + + """ + girth = depth_limit = inf + tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE + level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE + for n in G: + # run a BFS from source n, keeping track of distances; since we want + # the shortest cycle, no need to explore beyond the current minimum length + depth = {n: 0} + for u, v, label in nx.bfs_labeled_edges(G, n): + du = depth[u] + if du > depth_limit: + break + if label is tree_edge: + depth[v] = du + 1 + else: + # if (u, v) is a level edge, the length is du + du + 1 (odd) + # otherwise, it's a forward edge; length is du + (du + 1) + 1 (even) + delta = label is level_edge + length = du + du + 2 - delta + if length < girth: + girth = length + depth_limit = du - delta + + return girth diff --git a/phivenv/Lib/site-packages/networkx/algorithms/d_separation.py b/phivenv/Lib/site-packages/networkx/algorithms/d_separation.py new file mode 100644 index 0000000000000000000000000000000000000000..4322b095822ad750ff016fabeabfeb71540143b1 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/d_separation.py @@ -0,0 +1,457 @@ +""" +Algorithm for testing d-separation in DAGs. + +*d-separation* is a test for conditional independence in probability +distributions that can be factorized using DAGs. It is a purely +graphical test that uses the underlying graph and makes no reference +to the actual distribution parameters. See [1]_ for a formal +definition. + +The implementation is based on the conceptually simple linear time +algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of +alternative algorithms. + +Here, we provide a brief overview of d-separation and related concepts that +are relevant for understanding it: + +Blocking paths +-------------- + +Before we overview, we introduce the following terminology to describe paths: + +- "open" path: A path between two nodes that can be traversed +- "blocked" path: A path between two nodes that cannot be traversed + +A **collider** is a triplet of nodes along a path that is like the following: +``... u -> c <- v ...``), where 'c' is a common successor of ``u`` and ``v``. A path +through a collider is considered "blocked". When +a node that is a collider, or a descendant of a collider is included in +the d-separating set, then the path through that collider node is "open". If the +path through the collider node is open, then we will call this node an open collider. + +The d-separation set blocks the paths between ``u`` and ``v``. If you include colliders, +or their descendant nodes in the d-separation set, then those colliders will open up, +enabling a path to be traversed if it is not blocked some other way. + +Illustration of D-separation with examples +------------------------------------------ + +For a pair of two nodes, ``u`` and ``v``, all paths are considered open if +there is a path between ``u`` and ``v`` that is not blocked. That means, there is an open +path between ``u`` and ``v`` that does not encounter a collider, or a variable in the +d-separating set. + +For example, if the d-separating set is the empty set, then the following paths are +unblocked between ``u`` and ``v``: + +- u <- z -> v +- u -> w -> ... -> z -> v + +If for example, 'z' is in the d-separating set, then 'z' blocks those paths +between ``u`` and ``v``. + +Colliders block a path by default if they and their descendants are not included +in the d-separating set. An example of a path that is blocked when the d-separating +set is empty is: + +- u -> w -> ... -> z <- v + +because 'z' is a collider in this path and 'z' is not in the d-separating set. However, +if 'z' or a descendant of 'z' is included in the d-separating set, then the path through +the collider at 'z' (... -> z <- ...) is now "open". + +D-separation is concerned with blocking all paths between u and v. Therefore, a +d-separating set between ``u`` and ``v`` is one where all paths are blocked. + +D-separation and its applications in probability +------------------------------------------------ + +D-separation is commonly used in probabilistic graphical models. D-separation +connects the idea of probabilistic "dependence" with separation in a graph. If +one assumes the causal Markov condition [5]_, then d-separation implies conditional +independence in probability distributions. + +Examples +-------- + +>>> +>>> # HMM graph with five states and observation nodes +... g = nx.DiGraph() +>>> g.add_edges_from( +... [ +... ("S1", "S2"), +... ("S2", "S3"), +... ("S3", "S4"), +... ("S4", "S5"), +... ("S1", "O1"), +... ("S2", "O2"), +... ("S3", "O3"), +... ("S4", "O4"), +... ("S5", "O5"), +... ] +... ) +>>> +>>> # states/obs before 'S3' are d-separated from states/obs after 'S3' +... nx.d_separated(g, {"S1", "S2", "O1", "O2"}, {"S4", "S5", "O4", "O5"}, {"S3"}) +True + + +References +---------- + +.. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press. + +.. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks. + Cambridge: Cambridge University Press. + +.. [3] Shachter, R. D. (1998). + Bayes-ball: rational pastime (for determining irrelevance and requisite + information in belief networks and influence diagrams). + In , Proceedings of the Fourteenth Conference on Uncertainty in Artificial + Intelligence (pp. 480–487). + San Francisco, CA, USA: Morgan Kaufmann Publishers Inc. + +.. [4] Koller, D., & Friedman, N. (2009). + Probabilistic graphical models: principles and techniques. The MIT Press. + +.. [5] https://en.wikipedia.org/wiki/Causal_Markov_condition + +""" + +from collections import deque + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for + +__all__ = ["d_separated", "minimal_d_separator", "is_minimal_d_separator"] + + +@not_implemented_for("undirected") +@nx._dispatch +def d_separated(G, x, y, z): + """ + Return whether node sets ``x`` and ``y`` are d-separated by ``z``. + + Parameters + ---------- + G : graph + A NetworkX DAG. + + x : set + First set of nodes in ``G``. + + y : set + Second set of nodes in ``G``. + + z : set + Set of conditioning nodes in ``G``. Can be empty set. + + Returns + ------- + b : bool + A boolean that is true if ``x`` is d-separated from ``y`` given ``z`` in ``G``. + + Raises + ------ + NetworkXError + The *d-separation* test is commonly used with directed + graphical models which are acyclic. Accordingly, the algorithm + raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + Notes + ----- + A d-separating set in a DAG is a set of nodes that + blocks all paths between the two sets. Nodes in `z` + block a path if they are part of the path and are not a collider, + or a descendant of a collider. A collider structure along a path + is ``... -> c <- ...`` where ``c`` is the collider node. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + union_xyz = x.union(y).union(z) + + if any(n not in G.nodes for n in union_xyz): + raise nx.NodeNotFound("one or more specified nodes not found in the graph") + + G_copy = G.copy() + + # transform the graph by removing leaves that are not in x | y | z + # until no more leaves can be removed. + leaves = deque([n for n in G_copy.nodes if G_copy.out_degree[n] == 0]) + while len(leaves) > 0: + leaf = leaves.popleft() + if leaf not in union_xyz: + for p in G_copy.predecessors(leaf): + if G_copy.out_degree[p] == 1: + leaves.append(p) + G_copy.remove_node(leaf) + + # transform the graph by removing outgoing edges from the + # conditioning set. + edges_to_remove = list(G_copy.out_edges(z)) + G_copy.remove_edges_from(edges_to_remove) + + # use disjoint-set data structure to check if any node in `x` + # occurs in the same weakly connected component as a node in `y`. + disjoint_set = UnionFind(G_copy.nodes()) + for component in nx.weakly_connected_components(G_copy): + disjoint_set.union(*component) + disjoint_set.union(*x) + disjoint_set.union(*y) + + if x and y and disjoint_set[next(iter(x))] == disjoint_set[next(iter(y))]: + return False + else: + return True + + +@not_implemented_for("undirected") +@nx._dispatch +def minimal_d_separator(G, u, v): + """Compute a minimal d-separating set between 'u' and 'v'. + + A d-separating set in a DAG is a set of nodes that blocks all paths + between the two nodes, 'u' and 'v'. This function + constructs a d-separating set that is "minimal", meaning it is the smallest + d-separating set for 'u' and 'v'. This is not necessarily + unique. For more details, see Notes. + + Parameters + ---------- + G : graph + A networkx DAG. + u : node + A node in the graph, G. + v : node + A node in the graph, G. + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. + + Notes + ----- + This function only finds ``a`` minimal d-separator. It does not guarantee + uniqueness, since in a DAG there may be more than one minimal d-separator + between two nodes. Moreover, this only checks for minimal separators + between two nodes, not two sets. Finding minimal d-separators between + two sets of nodes is not supported. + + Uses the algorithm presented in [1]_. The complexity of the algorithm + is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the + number of edges in the moralized graph of the sub-graph consisting + of only the ancestors of 'u' and 'v'. For full details, see [1]_. + + The algorithm works by constructing the moral graph consisting of just + the ancestors of `u` and `v`. Then it constructs a candidate for + a separating set ``Z'`` from the predecessors of `u` and `v`. + Then BFS is run starting from `u` and marking nodes + found from ``Z'`` and calling those nodes ``Z''``. + Then BFS is run again starting from `v` and marking nodes if they are + present in ``Z''``. Those marked nodes are the returned minimal + d-separating set. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + union_uv = {u, v} + + if any(n not in G.nodes for n in union_uv): + raise nx.NodeNotFound("one or more specified nodes not found in the graph") + + # first construct the set of ancestors of X and Y + x_anc = nx.ancestors(G, u) + y_anc = nx.ancestors(G, v) + D_anc_xy = x_anc.union(y_anc) + D_anc_xy.update((u, v)) + + # second, construct the moralization of the subgraph of Anc(X,Y) + moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) + + # find a separating set Z' in moral_G + Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v))) + + # perform BFS on the graph from 'x' to mark + Z_dprime = _bfs_with_marks(moral_G, u, Z_prime) + Z = _bfs_with_marks(moral_G, v, Z_dprime) + return Z + + +@not_implemented_for("undirected") +@nx._dispatch +def is_minimal_d_separator(G, u, v, z): + """Determine if a d-separating set is minimal. + + A d-separating set, `z`, in a DAG is a set of nodes that blocks + all paths between the two nodes, `u` and `v`. This function + verifies that a set is "minimal", meaning there is no smaller + d-separating set between the two nodes. + + Note: This function checks whether `z` is a d-separator AND is minimal. + One can use the function `d_separated` to only check if `z` is a d-separator. + See examples below. + + Parameters + ---------- + G : nx.DiGraph + The graph. + u : node + A node in the graph. + v : node + A node in the graph. + z : Set of nodes + The set of nodes to check if it is a minimal d-separating set. + The function :func:`d_separated` is called inside this function + to verify that `z` is in fact a d-separator. + + Returns + ------- + bool + Whether or not the set `z` is a d-separator and is also minimal. + + Examples + -------- + >>> G = nx.path_graph([0, 1, 2, 3], create_using=nx.DiGraph) + >>> G.add_node(4) + >>> nx.is_minimal_d_separator(G, 0, 2, {1}) + True + >>> # since {1} is the minimal d-separator, {1, 3, 4} is not minimal + >>> nx.is_minimal_d_separator(G, 0, 2, {1, 3, 4}) + False + >>> # alternatively, if we only want to check that {1, 3, 4} is a d-separator + >>> nx.d_separated(G, {0}, {4}, {1, 3, 4}) + True + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. + + Notes + ----- + This function only works on verifying a d-separating set is minimal + between two nodes. To verify that a d-separating set is minimal between + two sets of nodes is not supported. + + Uses algorithm 2 presented in [1]_. The complexity of the algorithm + is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the + number of edges in the moralized graph of the sub-graph consisting + of only the ancestors of ``u`` and ``v``. + + The algorithm works by constructing the moral graph consisting of just + the ancestors of `u` and `v`. First, it performs BFS on the moral graph + starting from `u` and marking any nodes it encounters that are part of + the separating set, `z`. If a node is marked, then it does not continue + along that path. In the second stage, BFS with markings is repeated on the + moral graph starting from `v`. If at any stage, any node in `z` is + not marked, then `z` is considered not minimal. If the end of the algorithm + is reached, then `z` is minimal. + + For full details, see [1]_. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + if not nx.d_separated(G, {u}, {v}, z): + return False + + x_anc = nx.ancestors(G, u) + y_anc = nx.ancestors(G, v) + xy_anc = x_anc.union(y_anc) + + # if Z contains any node which is not in ancestors of X or Y + # then it is definitely not minimal + if any(node not in xy_anc for node in z): + return False + + D_anc_xy = x_anc.union(y_anc) + D_anc_xy.update((u, v)) + + # second, construct the moralization of the subgraph + moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) + + # start BFS from X + marks = _bfs_with_marks(moral_G, u, z) + + # if not all the Z is marked, then the set is not minimal + if any(node not in marks for node in z): + return False + + # similarly, start BFS from Y and check the marks + marks = _bfs_with_marks(moral_G, v, z) + # if not all the Z is marked, then the set is not minimal + if any(node not in marks for node in z): + return False + + return True + + +@not_implemented_for("directed") +def _bfs_with_marks(G, start_node, check_set): + """Breadth-first-search with markings. + + Performs BFS starting from ``start_node`` and whenever a node + inside ``check_set`` is met, it is "marked". Once a node is marked, + BFS does not continue along that path. The resulting marked nodes + are returned. + + Parameters + ---------- + G : nx.Graph + An undirected graph. + start_node : node + The start of the BFS. + check_set : set + The set of nodes to check against. + + Returns + ------- + marked : set + A set of nodes that were marked. + """ + visited = {} + marked = set() + queue = [] + + visited[start_node] = None + queue.append(start_node) + while queue: + m = queue.pop(0) + + for nbr in G.neighbors(m): + if nbr not in visited: + # memoize where we visited so far + visited[nbr] = None + + # mark the node in Z' and do not continue along that path + if nbr in check_set: + marked.add(nbr) + else: + queue.append(nbr) + return marked diff --git a/phivenv/Lib/site-packages/networkx/algorithms/dag.py b/phivenv/Lib/site-packages/networkx/algorithms/dag.py new file mode 100644 index 0000000000000000000000000000000000000000..fb74df81c6dbaa5137569e70b087e313abfaa974 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/dag.py @@ -0,0 +1,1258 @@ +"""Algorithms for directed acyclic graphs (DAGs). + +Note that most of these functions are only guaranteed to work for DAGs. +In general, these functions do not check for acyclic-ness, so it is up +to the user to check for that. +""" + +import heapq +from collections import deque +from functools import partial +from itertools import chain, combinations, product, starmap +from math import gcd + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for, pairwise + +__all__ = [ + "descendants", + "ancestors", + "topological_sort", + "lexicographical_topological_sort", + "all_topological_sorts", + "topological_generations", + "is_directed_acyclic_graph", + "is_aperiodic", + "transitive_closure", + "transitive_closure_dag", + "transitive_reduction", + "antichains", + "dag_longest_path", + "dag_longest_path_length", + "dag_to_branching", + "compute_v_structures", +] + +chaini = chain.from_iterable + + +@nx._dispatch +def descendants(G, source): + """Returns all nodes reachable from `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The descendants of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + + The `source` node is not a descendant of itself, but can be included manually: + + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] + + See also + -------- + ancestors + """ + return {child for parent, child in nx.bfs_edges(G, source)} + + +@nx._dispatch +def ancestors(G, source): + """Returns all nodes having a path to `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The ancestors of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.ancestors(DG, 2)) + [0, 1] + + The `source` node is not an ancestor of itself, but can be included manually: + + >>> sorted(nx.ancestors(DG, 2) | {2}) + [0, 1, 2] + + See also + -------- + descendants + """ + return {child for parent, child in nx.bfs_edges(G, source, reverse=True)} + + +@nx._dispatch +def has_cycle(G): + """Decides whether the directed graph has a cycle.""" + try: + # Feed the entire iterator into a zero-length deque. + deque(topological_sort(G), maxlen=0) + except nx.NetworkXUnfeasible: + return True + else: + return False + + +@nx._dispatch +def is_directed_acyclic_graph(G): + """Returns True if the graph `G` is a directed acyclic graph (DAG) or + False if not. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + True if `G` is a DAG, False otherwise + + Examples + -------- + Undirected graph:: + + >>> G = nx.Graph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed graph with cycle:: + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed acyclic graph:: + + >>> G = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + True + + See also + -------- + topological_sort + """ + return G.is_directed() and not has_cycle(G) + + +@nx._dispatch +def topological_generations(G): + """Stratifies a DAG into generations. + + A topological generation is node collection in which ancestors of a node in each + generation are guaranteed to be in a previous generation, and any descendants of + a node are guaranteed to be in a following generation. Nodes are guaranteed to + be in the earliest possible generation that they can belong to. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + sets of nodes + Yields sets of nodes representing each generation. + + Raises + ------ + NetworkXError + Generations are defined for directed graphs only. If the graph + `G` is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological generations + exist and a :exc:`NetworkXUnfeasible` exception is raised. This can also + be raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (3, 1)]) + >>> [sorted(generation) for generation in nx.topological_generations(DG)] + [[2, 3], [1]] + + Notes + ----- + The generation in which a node resides can also be determined by taking the + max-path-distance from the node to the farthest leaf node. That value can + be obtained with this function using `enumerate(topological_generations(G))`. + + See also + -------- + topological_sort + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + multigraph = G.is_multigraph() + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + zero_indegree = [v for v, d in G.in_degree() if d == 0] + + while zero_indegree: + this_generation = zero_indegree + zero_indegree = [] + for node in this_generation: + if node not in G: + raise RuntimeError("Graph changed during iteration") + for child in G.neighbors(node): + try: + indegree_map[child] -= len(G[node][child]) if multigraph else 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + zero_indegree.append(child) + del indegree_map[child] + yield this_generation + + if indegree_map: + raise nx.NetworkXUnfeasible( + "Graph contains a cycle or graph changed during iteration" + ) + + +@nx._dispatch +def topological_sort(G): + """Returns a generator of nodes in topologically sorted order. + + A topological sort is a nonunique permutation of the nodes of a + directed graph such that an edge from u to v implies that u + appears before v in the topological sort order. This ordering is + valid only if the graph has no directed cycles. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + nodes + Yields the nodes in topological sorted order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + To get the reverse order of the topological sort: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> list(reversed(list(nx.topological_sort(DG)))) + [3, 2, 1] + + If your DiGraph naturally has the edges representing tasks/inputs + and nodes representing people/processes that initiate tasks, then + topological_sort is not quite what you need. You will have to change + the tasks to nodes with dependence reflected by edges. The result is + a kind of topological sort of the edges. This can be done + with :func:`networkx.line_graph` as follows: + + >>> list(nx.topological_sort(nx.line_graph(DG))) + [(1, 2), (2, 3)] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + is_directed_acyclic_graph, lexicographical_topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + for generation in nx.topological_generations(G): + yield from generation + + +@nx._dispatch +def lexicographical_topological_sort(G, key=None): + """Generate the nodes in the unique lexicographical topological sort order. + + Generates a unique ordering of nodes by first sorting topologically (for which there are often + multiple valid orderings) and then additionally by sorting lexicographically. + + A topological sort arranges the nodes of a directed graph so that the + upstream node of each directed edge precedes the downstream node. + It is always possible to find a solution for directed graphs that have no cycles. + There may be more than one valid solution. + + Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the + topological sort and to determine a single, unique ordering. This can be useful in comparing + sort results. + + The lexicographical order can be customized by providing a function to the `key=` parameter. + The definition of the key function is the same as used in python's built-in `sort()`. + The function takes a single argument and returns a key to use for sorting purposes. + + Lexicographical sorting can fail if the node names are un-sortable. See the example below. + The solution is to provide a function to the `key=` argument that returns sortable keys. + + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + key : function, optional + A function of one argument that converts a node name to a comparison key. + It defines and resolves ambiguities in the sort order. Defaults to the identity function. + + Yields + ------ + nodes + Yields the nodes of G in lexicographical topological sort order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + TypeError + Results from un-sortable node names. + Consider using `key=` parameter to resolve ambiguities in the sort order. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) + >>> list(nx.lexicographical_topological_sort(DG)) + [2, 1, 3, 5, 4] + >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) + [2, 5, 1, 4, 3] + + The sort will fail for any graph with integer and string nodes. Comparison of integer to strings + is not defined in python. Is 3 greater or less than 'red'? + + >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')]) + >>> list(nx.lexicographical_topological_sort(DG)) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + ... + + Incomparable nodes can be resolved using a `key` function. This example function + allows comparison of integers and strings by returning a tuple where the first + element is True for `str`, False otherwise. The second element is the node name. + This groups the strings and integers separately so they can be compared only among themselves. + + >>> key = lambda node: (isinstance(node, str), node) + >>> list(nx.lexicographical_topological_sort(DG, key=key)) + [1, 2, 3, 'blue', 'green', 'red'] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + if not G.is_directed(): + msg = "Topological sort not defined on undirected graphs." + raise nx.NetworkXError(msg) + + if key is None: + + def key(node): + return node + + nodeid_map = {n: i for i, n in enumerate(G)} + + def create_tuple(node): + return key(node), nodeid_map[node], node + + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + # These nodes have zero indegree and ready to be returned. + zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] + heapq.heapify(zero_indegree) + + while zero_indegree: + _, _, node = heapq.heappop(zero_indegree) + + if node not in G: + raise RuntimeError("Graph changed during iteration") + for _, child in G.edges(node): + try: + indegree_map[child] -= 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + try: + heapq.heappush(zero_indegree, create_tuple(child)) + except TypeError as err: + raise TypeError( + f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order." + ) + del indegree_map[child] + + yield node + + if indegree_map: + msg = "Graph contains a cycle or graph changed during iteration" + raise nx.NetworkXUnfeasible(msg) + + +@not_implemented_for("undirected") +@nx._dispatch +def all_topological_sorts(G): + """Returns a generator of _all_ topological sorts of the directed graph G. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Yields + ------ + topological_sort_order : list + a list of nodes in `G`, representing one of the topological sort orders + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` is not acyclic + + Examples + -------- + To enumerate all topological sorts of directed graph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + >>> list(nx.all_topological_sorts(DG)) + [[1, 2, 4, 3], [1, 2, 3, 4]] + + Notes + ----- + Implements an iterative version of the algorithm given in [1]. + + References + ---------- + .. [1] Knuth, Donald E., Szwarcfiter, Jayme L. (1974). + "A Structured Program to Generate All Topological Sorting Arrangements" + Information Processing Letters, Volume 2, Issue 6, 1974, Pages 153-157, + ISSN 0020-0190, + https://doi.org/10.1016/0020-0190(74)90001-5. + Elsevier (North-Holland), Amsterdam + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + # the names of count and D are chosen to match the global variables in [1] + # number of edges originating in a vertex v + count = dict(G.in_degree()) + # vertices with indegree 0 + D = deque([v for v, d in G.in_degree() if d == 0]) + # stack of first value chosen at a position k in the topological sort + bases = [] + current_sort = [] + + # do-while construct + while True: + assert all(count[v] == 0 for v in D) + + if len(current_sort) == len(G): + yield list(current_sort) + + # clean-up stack + while len(current_sort) > 0: + assert len(bases) == len(current_sort) + q = current_sort.pop() + + # "restores" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] += 1 + assert count[j] >= 0 + # remove entries from D + while len(D) > 0 and count[D[-1]] > 0: + D.pop() + + # corresponds to a circular shift of the values in D + # if the first value chosen (the base) is in the first + # position of D again, we are done and need to consider the + # previous condition + D.appendleft(q) + if D[-1] == bases[-1]: + # all possible values have been chosen at current position + # remove corresponding marker + bases.pop() + else: + # there are still elements that have not been fixed + # at the current position in the topological sort + # stop removing elements, escape inner loop + break + + else: + if len(D) == 0: + raise nx.NetworkXUnfeasible("Graph contains a cycle.") + + # choose next node + q = D.pop() + # "erase" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] -= 1 + assert count[j] >= 0 + if count[j] == 0: + D.append(j) + current_sort.append(q) + + # base for current position might _not_ be fixed yet + if len(bases) < len(current_sort): + bases.append(q) + + if len(bases) == 0: + break + + +@nx._dispatch +def is_aperiodic(G): + """Returns True if `G` is aperiodic. + + A directed graph is aperiodic if there is no integer k > 1 that + divides the length of every cycle in the graph. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + bool + True if the graph is aperiodic False otherwise + + Raises + ------ + NetworkXError + If `G` is not directed + + Examples + -------- + A graph consisting of one cycle, the length of which is 2. Therefore ``k = 2`` + divides the length of every cycle in the graph and thus the graph + is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1)]) + >>> nx.is_aperiodic(DG) + False + + A graph consisting of two cycles: one of length 2 and the other of length 3. + The cycle lengths are coprime, so there is no single value of k where ``k > 1`` + that divides each cycle length and therefore the graph is *aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)]) + >>> nx.is_aperiodic(DG) + True + + A graph consisting of two cycles: one of length 2 and the other of length 4. + The lengths of the cycles share a common factor ``k = 2``, and therefore + the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1), (3, 4), (4, 5), (5, 6), (6, 3)]) + >>> nx.is_aperiodic(DG) + False + + An acyclic graph, therefore the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_aperiodic(DG) + False + + Notes + ----- + This uses the method outlined in [1]_, which runs in $O(m)$ time + given $m$ edges in `G`. Note that a graph is not aperiodic if it is + acyclic as every integer trivial divides length 0 cycles. + + References + ---------- + .. [1] Jarvis, J. P.; Shier, D. R. (1996), + "Graph-theoretic analysis of finite Markov chains," + in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling: + A Multidisciplinary Approach, CRC Press. + """ + if not G.is_directed(): + raise nx.NetworkXError("is_aperiodic not defined for undirected graphs") + + s = arbitrary_element(G) + levels = {s: 0} + this_level = [s] + g = 0 + lev = 1 + while this_level: + next_level = [] + for u in this_level: + for v in G[u]: + if v in levels: # Non-Tree Edge + g = gcd(g, levels[u] - levels[v] + 1) + else: # Tree Edge + next_level.append(v) + levels[v] = lev + this_level = next_level + lev += 1 + if len(levels) == len(G): # All nodes in tree + return g == 1 + else: + return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels))) + + +@nx._dispatch(preserve_all_attrs=True) +def transitive_closure(G, reflexive=False): + """Returns transitive closure of a graph + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a path from v to w in G. + + Handling of paths from v to v has some flexibility within this definition. + A reflexive transitive closure creates a self-loop for the path + from v to v of length 0. The usual transitive closure creates a + self-loop only if a cycle exists (a path from v to v with length > 0). + We also allow an option for no self-loops. + + Parameters + ---------- + G : NetworkX Graph + A directed/undirected graph/multigraph. + reflexive : Bool or None, optional (default: False) + Determines when cycles create self-loops in the Transitive Closure. + If True, trivial cycles (length 0) create self-loops. The result + is a reflexive transitive closure of G. + If False (the default) non-trivial cycles create self-loops. + If None, self-loops are not created. + + Returns + ------- + NetworkX graph + The transitive closure of `G` + + Raises + ------ + NetworkXError + If `reflexive` not in `{None, True, False}` + + Examples + -------- + The treatment of trivial (i.e. length 0) cycles is controlled by the + `reflexive` parameter. + + Trivial (i.e. length 0) cycles do not create self-loops when + ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + However, nontrivial (i.e. length greater than 0) cycles create self-loops + when ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (1, 1), (2, 3), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3)]) + + Trivial cycles (length 0) create self-loops when ``reflexive=True``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=True) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 1), (1, 3), (2, 3), (2, 2), (3, 3)]) + + And the third option is not to create self-loops at all when ``reflexive=None``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=None) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (2, 1), (3, 1), (3, 2)]) + + References + ---------- + .. [1] https://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py + """ + TC = G.copy() + + if reflexive not in {None, True, False}: + raise nx.NetworkXError("Incorrect value for the parameter `reflexive`") + + for v in G: + if reflexive is None: + TC.add_edges_from((v, u) for u in nx.descendants(G, v) if u not in TC[v]) + elif reflexive is True: + TC.add_edges_from( + (v, u) for u in nx.descendants(G, v) | {v} if u not in TC[v] + ) + elif reflexive is False: + TC.add_edges_from((v, e[1]) for e in nx.edge_bfs(G, v) if e[1] not in TC[v]) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatch(preserve_all_attrs=True) +def transitive_closure_dag(G, topo_order=None): + """Returns the transitive closure of a directed acyclic graph. + + This function is faster than the function `transitive_closure`, but fails + if the graph has a cycle. + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a non-null path from v to w in G. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Returns + ------- + NetworkX DiGraph + The transitive closure of `G` + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` has a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure_dag(DG) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + Notes + ----- + This algorithm is probably simple enough to be well-known but I didn't find + a mention in the literature. + """ + if topo_order is None: + topo_order = list(topological_sort(G)) + + TC = G.copy() + + # idea: traverse vertices following a reverse topological order, connecting + # each vertex to its descendants at distance 2 as we go + for v in reversed(topo_order): + TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2)) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatch +def transitive_reduction(G): + """Returns transitive reduction of a directed graph + + The transitive reduction of G = (V,E) is a graph G- = (V,E-) such that + for all v,w in V there is an edge (v,w) in E- if and only if (v,w) is + in E and there is no path from v to w in G with length greater than 1. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + Returns + ------- + NetworkX DiGraph + The transitive reduction of `G` + + Raises + ------ + NetworkXError + If `G` is not a directed acyclic graph (DAG) transitive reduction is + not uniquely defined and a :exc:`NetworkXError` exception is raised. + + Examples + -------- + To perform transitive reduction on a DiGraph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (1, 3)]) + >>> TR = nx.transitive_reduction(DG) + >>> list(TR.edges) + [(1, 2), (2, 3)] + + To avoid unnecessary data copies, this implementation does not return a + DiGraph with node/edge data. + To perform transitive reduction on a DiGraph and transfer node/edge data: + + >>> DG = nx.DiGraph() + >>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color='red') + >>> TR = nx.transitive_reduction(DG) + >>> TR.add_nodes_from(DG.nodes(data=True)) + >>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges) + >>> list(TR.edges(data=True)) + [(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})] + + References + ---------- + https://en.wikipedia.org/wiki/Transitive_reduction + + """ + if not is_directed_acyclic_graph(G): + msg = "Directed Acyclic Graph required for transitive_reduction" + raise nx.NetworkXError(msg) + TR = nx.DiGraph() + TR.add_nodes_from(G.nodes()) + descendants = {} + # count before removing set stored in descendants + check_count = dict(G.in_degree) + for u in G: + u_nbrs = set(G[u]) + for v in G[u]: + if v in u_nbrs: + if v not in descendants: + descendants[v] = {y for x, y in nx.dfs_edges(G, v)} + u_nbrs -= descendants[v] + check_count[v] -= 1 + if check_count[v] == 0: + del descendants[v] + TR.add_edges_from((u, v) for v in u_nbrs) + return TR + + +@not_implemented_for("undirected") +@nx._dispatch +def antichains(G, topo_order=None): + """Generates antichains from a directed acyclic graph (DAG). + + An antichain is a subset of a partially ordered set such that any + two elements in the subset are incomparable. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Yields + ------ + antichain : list + a list of nodes in `G` representing an antichain + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + NetworkXUnfeasible + If `G` contains a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (1, 3)]) + >>> list(nx.antichains(DG)) + [[], [3], [2], [2, 3], [1]] + + Notes + ----- + This function was originally developed by Peter Jipsen and Franco Saliola + for the SAGE project. It's included in NetworkX with permission from the + authors. Original SAGE code at: + + https://github.com/sagemath/sage/blob/master/src/sage/combinat/posets/hasse_diagram.py + + References + ---------- + .. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation, + AMS, Vol 42, 1995, p. 226. + """ + if topo_order is None: + topo_order = list(nx.topological_sort(G)) + + TC = nx.transitive_closure_dag(G, topo_order) + antichains_stacks = [([], list(reversed(topo_order)))] + + while antichains_stacks: + (antichain, stack) = antichains_stacks.pop() + # Invariant: + # - the elements of antichain are independent + # - the elements of stack are independent from those of antichain + yield antichain + while stack: + x = stack.pop() + new_antichain = antichain + [x] + new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))] + antichains_stacks.append((new_antichain, new_stack)) + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs={"weight": "default_weight"}) +def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None): + """Returns the longest path in a directed acyclic graph (DAG). + + If `G` has edges with `weight` attribute the edge data are used as + weight values. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : str, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + topo_order: list or tuple, optional + A topological order for `G` (if None, the function will compute one) + + Returns + ------- + list + Longest path + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path(DG) + [0, 1, 2] + >>> nx.dag_longest_path(DG, weight="cost") + [0, 2] + + In the case where multiple valid topological orderings exist, `topo_order` + can be used to specify a specific ordering: + + >>> DG = nx.DiGraph([(0, 1), (0, 2)]) + >>> sorted(nx.all_topological_sorts(DG)) # Valid topological orderings + [[0, 1, 2], [0, 2, 1]] + >>> nx.dag_longest_path(DG, topo_order=[0, 1, 2]) + [0, 1] + >>> nx.dag_longest_path(DG, topo_order=[0, 2, 1]) + [0, 2] + + See also + -------- + dag_longest_path_length + + """ + if not G: + return [] + + if topo_order is None: + topo_order = nx.topological_sort(G) + + dist = {} # stores {v : (length, u)} + for v in topo_order: + us = [ + ( + dist[u][0] + + ( + max(data.values(), key=lambda x: x.get(weight, default_weight)) + if G.is_multigraph() + else data + ).get(weight, default_weight), + u, + ) + for u, data in G.pred[v].items() + ] + + # Use the best predecessor if there is one and its distance is + # non-negative, otherwise terminate. + maxu = max(us, key=lambda x: x[0]) if us else (0, v) + dist[v] = maxu if maxu[0] >= 0 else (0, v) + + u = None + v = max(dist, key=lambda x: dist[x][0]) + path = [] + while u != v: + path.append(v) + u = v + v = dist[v][1] + + path.reverse() + return path + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs={"weight": "default_weight"}) +def dag_longest_path_length(G, weight="weight", default_weight=1): + """Returns the longest path length in a DAG + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : string, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + Returns + ------- + int + Longest path length + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path_length(DG) + 2 + >>> nx.dag_longest_path_length(DG, weight="cost") + 42 + + See also + -------- + dag_longest_path + """ + path = nx.dag_longest_path(G, weight, default_weight) + path_length = 0 + if G.is_multigraph(): + for u, v in pairwise(path): + i = max(G[u][v], key=lambda x: G[u][v][x].get(weight, default_weight)) + path_length += G[u][v][i].get(weight, default_weight) + else: + for u, v in pairwise(path): + path_length += G[u][v].get(weight, default_weight) + + return path_length + + +@nx._dispatch +def root_to_leaf_paths(G): + """Yields root-to-leaf paths in a directed acyclic graph. + + `G` must be a directed acyclic graph. If not, the behavior of this + function is undefined. A "root" in this graph is a node of in-degree + zero and a "leaf" a node of out-degree zero. + + When invoked, this function iterates over each path from any root to + any leaf. A path is a list of nodes. + + """ + roots = (v for v, d in G.in_degree() if d == 0) + leaves = (v for v, d in G.out_degree() if d == 0) + all_paths = partial(nx.all_simple_paths, G) + # TODO In Python 3, this would be better as `yield from ...`. + return chaini(starmap(all_paths, product(roots, leaves))) + + +@not_implemented_for("multigraph") +@not_implemented_for("undirected") +@nx._dispatch +def dag_to_branching(G): + """Returns a branching representing all (overlapping) paths from + root nodes to leaf nodes in the given directed acyclic graph. + + As described in :mod:`networkx.algorithms.tree.recognition`, a + *branching* is a directed forest in which each node has at most one + parent. In other words, a branching is a disjoint union of + *arborescences*. For this function, each node of in-degree zero in + `G` becomes a root of one of the arborescences, and there will be + one leaf node for each distinct path from that root to a leaf node + in `G`. + + Each node `v` in `G` with *k* parents becomes *k* distinct nodes in + the returned branching, one for each parent, and the sub-DAG rooted + at `v` is duplicated for each copy. The algorithm then recurses on + the children of each copy of `v`. + + Parameters + ---------- + G : NetworkX graph + A directed acyclic graph. + + Returns + ------- + DiGraph + The branching in which there is a bijection between root-to-leaf + paths in `G` (in which multiple paths may share the same leaf) + and root-to-leaf paths in the branching (in which there is a + unique path from a root to a leaf). + + Each node has an attribute 'source' whose value is the original + node to which this node corresponds. No other graph, node, or + edge attributes are copied into this new graph. + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed, or if `G` is a multigraph. + + HasACycle + If `G` is not acyclic. + + Examples + -------- + To examine which nodes in the returned branching were produced by + which original node in the directed acyclic graph, we can collect + the mapping from source node to new nodes into a dictionary. For + example, consider the directed diamond graph:: + + >>> from collections import defaultdict + >>> from operator import itemgetter + >>> + >>> G = nx.DiGraph(nx.utils.pairwise("abd")) + >>> G.add_edges_from(nx.utils.pairwise("acd")) + >>> B = nx.dag_to_branching(G) + >>> + >>> sources = defaultdict(set) + >>> for v, source in B.nodes(data="source"): + ... sources[source].add(v) + >>> len(sources["a"]) + 1 + >>> len(sources["d"]) + 2 + + To copy node attributes from the original graph to the new graph, + you can use a dictionary like the one constructed in the above + example:: + + >>> for source, nodes in sources.items(): + ... for v in nodes: + ... B.nodes[v].update(G.nodes[source]) + + Notes + ----- + This function is not idempotent in the sense that the node labels in + the returned branching may be uniquely generated each time the + function is invoked. In fact, the node labels may not be integers; + in order to relabel the nodes to be more readable, you can use the + :func:`networkx.convert_node_labels_to_integers` function. + + The current implementation of this function uses + :func:`networkx.prefix_tree`, so it is subject to the limitations of + that function. + + """ + if has_cycle(G): + msg = "dag_to_branching is only defined for acyclic graphs" + raise nx.HasACycle(msg) + paths = root_to_leaf_paths(G) + B = nx.prefix_tree(paths) + # Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree + B.remove_node(0) + B.remove_node(-1) + return B + + +@not_implemented_for("undirected") +@nx._dispatch +def compute_v_structures(G): + """Iterate through the graph to compute all v-structures. + + V-structures are triples in the directed graph where + two parent nodes point to the same child and the two parent nodes + are not adjacent. + + Parameters + ---------- + G : graph + A networkx DiGraph. + + Returns + ------- + vstructs : iterator of tuples + The v structures within the graph. Each v structure is a 3-tuple with the + parent, collider, and other parent. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (0, 5), (3, 1), (2, 4), (3, 1), (4, 5), (1, 5)]) + >>> sorted(nx.compute_v_structures(G)) + [(0, 5, 1), (0, 5, 4), (1, 5, 4)] + + Notes + ----- + https://en.wikipedia.org/wiki/Collider_(statistics) + """ + for collider, preds in G.pred.items(): + for common_parents in combinations(preds, r=2): + # ensure that the colliders are the same + common_parents = sorted(common_parents) + yield (common_parents[0], collider, common_parents[1]) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/distance_measures.py b/phivenv/Lib/site-packages/networkx/algorithms/distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..17e264821ba896d0fd30657453cec341259eb0ae --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/distance_measures.py @@ -0,0 +1,869 @@ +"""Graph diameter, radius, eccentricity and other properties.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "eccentricity", + "diameter", + "radius", + "periphery", + "center", + "barycenter", + "resistance_distance", + "kemeny_constant", +] + + +def _extrema_bounding(G, compute="diameter", weight=None): + """Compute requested extreme distance metric of undirected graph G + + Computation is based on smart lower and upper bounds, and in practice + linear in the number of nodes, rather than quadratic (except for some + border cases such as complete graphs or circle shaped graphs). + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + compute : string denoting the requesting metric + "diameter" for the maximal eccentricity value, + "radius" for the minimal eccentricity value, + "periphery" for the set of nodes with eccentricity equal to the diameter, + "center" for the set of nodes with eccentricity equal to the radius, + "eccentricities" for the maximum distance from each node to all other nodes in G + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + value : value of the requested metric + int for "diameter" and "radius" or + list of nodes for "center" and "periphery" or + dictionary of eccentricity values keyed by node for "eccentricities" + + Raises + ------ + NetworkXError + If the graph consists of multiple components + ValueError + If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities". + + Notes + ----- + This algorithm was proposed in [1]_ and discussed further in [2]_ and [3]_. + + References + ---------- + .. [1] F. W. Takes, W. A. Kosters, + "Determining the diameter of small world networks." + Proceedings of the 20th ACM international conference on Information and knowledge management, 2011 + https://dl.acm.org/doi/abs/10.1145/2063576.2063748 + .. [2] F. W. Takes, W. A. Kosters, + "Computing the Eccentricity Distribution of Large Graphs." + Algorithms, 2013 + https://www.mdpi.com/1999-4893/6/1/100 + .. [3] M. Borassi, P. Crescenzi, M. Habib, W. A. Kosters, A. Marino, F. W. Takes, + "Fast diameter and radius BFS-based computation in (weakly connected) real-world graphs: With an application to the six degrees of separation games. " + Theoretical Computer Science, 2015 + https://www.sciencedirect.com/science/article/pii/S0304397515001644 + """ + # init variables + degrees = dict(G.degree()) # start with the highest degree node + minlowernode = max(degrees, key=degrees.get) + N = len(degrees) # number of nodes + # alternate between smallest lower and largest upper bound + high = False + # status variables + ecc_lower = dict.fromkeys(G, 0) + ecc_upper = dict.fromkeys(G, N) + candidates = set(G) + + # (re)set bound extremes + minlower = N + maxlower = 0 + minupper = N + maxupper = 0 + + # repeat the following until there are no more candidates + while candidates: + if high: + current = maxuppernode # select node with largest upper bound + else: + current = minlowernode # select node with smallest lower bound + high = not high + + # get distances from/to current node and derive eccentricity + dist = nx.shortest_path_length(G, source=current, weight=weight) + + if len(dist) != N: + msg = "Cannot compute metric because graph is not connected." + raise nx.NetworkXError(msg) + current_ecc = max(dist.values()) + + # print status update + # print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/" + # + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is " + # + str(current_ecc)) + # print(ecc_upper) + + # (re)set bound extremes + maxuppernode = None + minlowernode = None + + # update node bounds + for i in candidates: + # update eccentricity bounds + d = dist[i] + ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d))) + ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d) + + # update min/max values of lower and upper bounds + minlower = min(ecc_lower[i], minlower) + maxlower = max(ecc_lower[i], maxlower) + minupper = min(ecc_upper[i], minupper) + maxupper = max(ecc_upper[i], maxupper) + + # update candidate set + if compute == "diameter": + ruled_out = { + i + for i in candidates + if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper + } + elif compute == "radius": + ruled_out = { + i + for i in candidates + if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower + } + elif compute == "periphery": + ruled_out = { + i + for i in candidates + if ecc_upper[i] < maxlower + and (maxlower == maxupper or ecc_lower[i] > maxupper) + } + elif compute == "center": + ruled_out = { + i + for i in candidates + if ecc_lower[i] > minupper + and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower) + } + elif compute == "eccentricities": + ruled_out = set() + else: + msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'" + raise ValueError(msg) + + ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i]) + candidates -= ruled_out + + # for i in ruled_out: + # print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper)) + # print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower)) + # print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower + # and 2 * ecc_lower[4] >= maxupper)) + + # updating maxuppernode and minlowernode for selection in next round + for i in candidates: + if ( + minlowernode is None + or ( + ecc_lower[i] == ecc_lower[minlowernode] + and degrees[i] > degrees[minlowernode] + ) + or (ecc_lower[i] < ecc_lower[minlowernode]) + ): + minlowernode = i + + if ( + maxuppernode is None + or ( + ecc_upper[i] == ecc_upper[maxuppernode] + and degrees[i] > degrees[maxuppernode] + ) + or (ecc_upper[i] > ecc_upper[maxuppernode]) + ): + maxuppernode = i + + # print status update + # print (" min=" + str(minlower) + "/" + str(minupper) + + # " max=" + str(maxlower) + "/" + str(maxupper) + + # " candidates: " + str(len(candidates))) + # print("cand:",candidates) + # print("ecc_l",ecc_lower) + # print("ecc_u",ecc_upper) + # wait = input("press Enter to continue") + + # return the correct value of the requested metric + if compute == "diameter": + return maxlower + if compute == "radius": + return minupper + if compute == "periphery": + p = [v for v in G if ecc_lower[v] == maxlower] + return p + if compute == "center": + c = [v for v in G if ecc_upper[v] == minupper] + return c + if compute == "eccentricities": + return ecc_lower + return None + + +@nx._dispatch(edge_attrs="weight") +def eccentricity(G, v=None, sp=None, weight=None): + """Returns the eccentricity of nodes in G. + + The eccentricity of a node v is the maximum distance from v to + all other nodes in G. + + Parameters + ---------- + G : NetworkX graph + A graph + + v : node, optional + Return value of specified node + + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + weight : string, function, or None (default=None) + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + ecc : dictionary + A dictionary of eccentricity values keyed by node. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> dict(nx.eccentricity(G)) + {1: 2, 2: 3, 3: 2, 4: 2, 5: 3} + + >>> dict(nx.eccentricity(G, v=[1, 5])) # This returns the eccentricity of node 1 & 5 + {1: 2, 5: 3} + + """ + # if v is None: # none, use entire graph + # nodes=G.nodes() + # elif v in G: # is v a single node + # nodes=[v] + # else: # assume v is a container of nodes + # nodes=v + order = G.order() + e = {} + for n in G.nbunch_iter(v): + if sp is None: + length = nx.shortest_path_length(G, source=n, weight=weight) + + L = len(length) + else: + try: + length = sp[n] + L = len(length) + except TypeError as err: + raise nx.NetworkXError('Format of "sp" is invalid.') from err + if L != order: + if G.is_directed(): + msg = ( + "Found infinite path length because the digraph is not" + " strongly connected" + ) + else: + msg = "Found infinite path length because the graph is not" " connected" + raise nx.NetworkXError(msg) + + e[n] = max(length.values()) + + if v in G: + return e[v] # return single value + return e + + +@nx._dispatch(edge_attrs="weight") +def diameter(G, e=None, usebounds=False, weight=None): + """Returns the diameter of the graph G. + + The diameter is the maximum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + d : integer + Diameter of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.diameter(G) + 3 + + See Also + -------- + eccentricity + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="diameter", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return max(e.values()) + + +@nx._dispatch(edge_attrs="weight") +def periphery(G, e=None, usebounds=False, weight=None): + """Returns the periphery of the graph G. + + The periphery is the set of nodes with eccentricity equal to the diameter. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + p : list + List of nodes in periphery + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.periphery(G) + [2, 5] + + See Also + -------- + barycenter + center + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="periphery", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + diameter = max(e.values()) + p = [v for v in e if e[v] == diameter] + return p + + +@nx._dispatch(edge_attrs="weight") +def radius(G, e=None, usebounds=False, weight=None): + """Returns the radius of the graph G. + + The radius is the minimum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + r : integer + Radius of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.radius(G) + 2 + + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="radius", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return min(e.values()) + + +@nx._dispatch(edge_attrs="weight") +def center(G, e=None, usebounds=False, weight=None): + """Returns the center of the graph G. + + The center is the set of nodes with eccentricity equal to radius. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + c : list + List of nodes in center + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.center(G)) + [1, 3, 4] + + See Also + -------- + barycenter + periphery + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="center", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + radius = min(e.values()) + p = [v for v in e if e[v] == radius] + return p + + +@nx._dispatch(edge_attrs="weight") +def barycenter(G, weight=None, attr=None, sp=None): + r"""Calculate barycenter of a connected graph, optionally with edge weights. + + The :dfn:`barycenter` a + :func:`connected ` graph + :math:`G` is the subgraph induced by the set of its nodes :math:`v` + minimizing the objective function + + .. math:: + + \sum_{u \in V(G)} d_G(u, v), + + where :math:`d_G` is the (possibly weighted) :func:`path length + `. + The barycenter is also called the :dfn:`median`. See [West01]_, p. 78. + + Parameters + ---------- + G : :class:`networkx.Graph` + The connected graph :math:`G`. + weight : :class:`str`, optional + Passed through to + :func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`. + attr : :class:`str`, optional + If given, write the value of the objective function to each node's + `attr` attribute. Otherwise do not store the value. + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + Returns + ------- + list + Nodes of `G` that induce the barycenter of `G`. + + Raises + ------ + NetworkXNoPath + If `G` is disconnected. `G` may appear disconnected to + :func:`barycenter` if `sp` is given but is missing shortest path + lengths for any pairs. + ValueError + If `sp` and `weight` are both given. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.barycenter(G) + [1, 3, 4] + + See Also + -------- + center + periphery + """ + if sp is None: + sp = nx.shortest_path_length(G, weight=weight) + else: + sp = sp.items() + if weight is not None: + raise ValueError("Cannot use both sp, weight arguments together") + smallest, barycenter_vertices, n = float("inf"), [], len(G) + for v, dists in sp: + if len(dists) < n: + raise nx.NetworkXNoPath( + f"Input graph {G} is disconnected, so every induced subgraph " + "has infinite barycentricity." + ) + barycentricity = sum(dists.values()) + if attr is not None: + G.nodes[v][attr] = barycentricity + if barycentricity < smallest: + smallest = barycentricity + barycenter_vertices = [v] + elif barycentricity == smallest: + barycenter_vertices.append(v) + return barycenter_vertices + + +def _count_lu_permutations(perm_array): + """Counts the number of permutations in SuperLU perm_c or perm_r""" + perm_cnt = 0 + arr = perm_array.tolist() + for i in range(len(arr)): + if i != arr[i]: + perm_cnt += 1 + n = arr.index(i) + arr[n] = arr[i] + arr[i] = i + + return perm_cnt + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True): + """Returns the resistance distance between every pair of nodes on graph G. + + The resistance distance between two nodes of a graph is akin to treating + the graph as a grid of resistors with a resistance equal to the provided + weight [1]_, [2]_. + + If weight is not provided, then a weight of 1 is used for all edges. + + If two nodes are the same, the resistance distance is zero. + + Parameters + ---------- + G : NetworkX graph + A graph + + nodeA : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as source nodes. + + nodeB : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as target nodes. + + weight : string or None, optional (default=None) + The edge data key used to compute the resistance distance. + If None, then each edge has weight 1. + + invert_weight : boolean (default=True) + Proper calculation of resistance distance requires building the + Laplacian matrix with the reciprocal of the weight. Not required + if the weight is already inverted. Weight cannot be zero. + + Returns + ------- + rd : dict or float + If `nodeA` and `nodeB` are given, resistance distance between `nodeA` + and `nodeB`. If `nodeA` or `nodeB` is unspecified (the default), a + dictionary of nodes with resistance distances as the value. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph. + + NetworkXError + If `G` is not connected, or contains no nodes, + or `nodeA` is not in `G` or `nodeB` is not in `G`. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> round(nx.resistance_distance(G, 1, 3), 10) + 0.625 + + Notes + ----- + The implementation is based on Theorem A in [2]_. Self-loops are ignored. + Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Resistance distance." + https://en.wikipedia.org/wiki/Resistance_distance + .. [2] D. J. Klein and M. Randic. + Resistance distance. + J. of Math. Chem. 12:81-95, 1993. + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be strongly connected.") + if nodeA is not None and nodeA not in G: + raise nx.NetworkXError("Node A is not in graph G.") + if nodeB is not None and nodeB not in G: + raise nx.NetworkXError("Node B is not in graph G.") + + G = G.copy() + node_list = list(G) + + # Invert weights + if invert_weight and weight is not None: + if G.is_multigraph(): + for u, v, k, d in G.edges(keys=True, data=True): + d[weight] = 1 / d[weight] + else: + for u, v, d in G.edges(data=True): + d[weight] = 1 / d[weight] + + # Compute resistance distance using the Pseudo-inverse of the Laplacian + # Self-loops are ignored + L = nx.laplacian_matrix(G, weight=weight).todense() + Linv = np.linalg.pinv(L, hermitian=True) + + # Return relevant distances + if nodeA is not None and nodeB is not None: + i = node_list.index(nodeA) + j = node_list.index(nodeB) + return Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + + elif nodeA is not None: + i = node_list.index(nodeA) + d = {} + for n in G: + j = node_list.index(n) + d[n] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + elif nodeB is not None: + j = node_list.index(nodeB) + d = {} + for n in G: + i = node_list.index(n) + d[n] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + else: + d = {} + for n in G: + i = node_list.index(n) + d[n] = {} + for n2 in G: + j = node_list.index(n2) + d[n][n2] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def kemeny_constant(G, *, weight=None): + """Returns the Kemeny constant of the given graph. + + The *Kemeny constant* (or Kemeny's constant) of a graph `G` + can be computed by regarding the graph as a Markov chain. + The Kemeny constant is then the expected number of time steps + to transition from a starting state i to a random destination state + sampled from the Markov chain's stationary distribution. + The Kemeny constant is independent of the chosen initial state [1]_. + + The Kemeny constant measures the time needed for spreading + across a graph. Low values indicate a closely connected graph + whereas high values indicate a spread-out graph. + + If weight is not provided, then a weight of 1 is used for all edges. + + Since `G` represents a Markov chain, the weights must be positive. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The edge data key used to compute the Kemeny constant. + If None, then each edge has weight 1. + + Returns + ------- + K : float + The Kemeny constant of the graph `G`. + + Raises + ------ + NetworkXNotImplemented + If the graph `G` is directed. + + NetworkXError + If the graph `G` is not connected, or contains no nodes, + or has edges with negative weights. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.kemeny_constant(G), 10) + 3.2 + + Notes + ----- + The implementation is based on equation (3.3) in [2]_. + Self-loops are allowed and indicate a Markov chain where + the state can remain the same. Multi-edges are contracted + in one edge with weight equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kemeny's constant." + https://en.wikipedia.org/wiki/Kemeny%27s_constant + .. [2] Lovász L. + Random walks on graphs: A survey. + Paul Erdös is Eighty, vol. 2, Bolyai Society, + Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46 + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be connected.") + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("The weights of graph G must be nonnegative.") + + # Compute matrix H = D^-1/2 A D^-1/2 + A = nx.adjacency_matrix(G, weight=weight) + n, m = A.shape + diags = A.sum(axis=1) + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) + H = DH @ (A @ DH) + + # Compute eigenvalues of H + eig = np.sort(sp.linalg.eigvalsh(H.todense())) + + # Compute the Kemeny constant + return np.sum(1 / (1 - eig[:-1])) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/distance_regular.py b/phivenv/Lib/site-packages/networkx/algorithms/distance_regular.py new file mode 100644 index 0000000000000000000000000000000000000000..18c19ee00e07394f6127095766ff6219ae47598a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/distance_regular.py @@ -0,0 +1,234 @@ +""" +======================= +Distance-regular graphs +======================= +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from .distance_measures import diameter + +__all__ = [ + "is_distance_regular", + "is_strongly_regular", + "intersection_array", + "global_parameters", +] + + +@nx._dispatch +def is_distance_regular(G): + """Returns True if the graph is distance regular, False otherwise. + + A connected graph G is distance-regular if for any nodes x,y + and any integers i,j=0,1,...,d (where d is the graph + diameter), the number of vertices at distance i from x and + distance j from y depends only on i,j and the graph distance + between x and y, independently of the choice of x and y. + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + bool + True if the graph is Distance Regular, False otherwise + + Examples + -------- + >>> G = nx.hypercube_graph(6) + >>> nx.is_distance_regular(G) + True + + See Also + -------- + intersection_array, global_parameters + + Notes + ----- + For undirected and simple graphs only + + References + ---------- + .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A. + Distance-Regular Graphs. New York: Springer-Verlag, 1989. + .. [2] Weisstein, Eric W. "Distance-Regular Graph." + http://mathworld.wolfram.com/Distance-RegularGraph.html + + """ + try: + intersection_array(G) + return True + except nx.NetworkXError: + return False + + +def global_parameters(b, c): + """Returns global parameters for a given intersection array. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + Thus, a distance regular graph has the global parameters, + [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the + intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + where a_i+b_i+c_i=k , k= degree of every vertex. + + Parameters + ---------- + b : list + + c : list + + Returns + ------- + iterable + An iterable over three tuples. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> b, c = nx.intersection_array(G) + >>> list(nx.global_parameters(b, c)) + [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)] + + References + ---------- + .. [1] Weisstein, Eric W. "Global Parameters." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/GlobalParameters.html + + See Also + -------- + intersection_array + """ + return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c)) + + +@not_implemented_for("directed", "multigraph") +@nx._dispatch +def intersection_array(G): + """Returns the intersection array of a distance-regular graph. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + A distance regular graph's intersection array is given by, + [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + b,c: tuple of lists + + Examples + -------- + >>> G = nx.icosahedral_graph() + >>> nx.intersection_array(G) + ([5, 2, 1], [1, 2, 5]) + + References + ---------- + .. [1] Weisstein, Eric W. "Intersection Array." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/IntersectionArray.html + + See Also + -------- + global_parameters + """ + # test for regular graph (all degrees must be equal) + degree = iter(G.degree()) + (_, k) = next(degree) + for _, knext in degree: + if knext != k: + raise nx.NetworkXError("Graph is not distance regular.") + k = knext + path_length = dict(nx.all_pairs_shortest_path_length(G)) + diameter = max(max(path_length[n].values()) for n in path_length) + bint = {} # 'b' intersection array + cint = {} # 'c' intersection array + for u in G: + for v in G: + try: + i = path_length[u][v] + except KeyError as err: # graph must be connected + raise nx.NetworkXError("Graph is not distance regular.") from err + # number of neighbors of v at a distance of i-1 from u + c = len([n for n in G[v] if path_length[n][u] == i - 1]) + # number of neighbors of v at a distance of i+1 from u + b = len([n for n in G[v] if path_length[n][u] == i + 1]) + # b,c are independent of u and v + if cint.get(i, c) != c or bint.get(i, b) != b: + raise nx.NetworkXError("Graph is not distance regular") + bint[i] = b + cint[i] = c + return ( + [bint.get(j, 0) for j in range(diameter)], + [cint.get(j + 1, 0) for j in range(diameter)], + ) + + +# TODO There is a definition for directed strongly regular graphs. +@not_implemented_for("directed", "multigraph") +@nx._dispatch +def is_strongly_regular(G): + """Returns True if and only if the given graph is strongly + regular. + + An undirected graph is *strongly regular* if + + * it is regular, + * each pair of adjacent vertices has the same number of neighbors in + common, + * each pair of nonadjacent vertices has the same number of neighbors + in common. + + Each strongly regular graph is a distance-regular graph. + Conversely, if a distance-regular graph has diameter two, then it is + a strongly regular graph. For more information on distance-regular + graphs, see :func:`is_distance_regular`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + bool + Whether `G` is strongly regular. + + Examples + -------- + + The cycle graph on five vertices is strongly regular. It is + two-regular, each pair of adjacent vertices has no shared neighbors, + and each pair of nonadjacent vertices has one shared neighbor:: + + >>> G = nx.cycle_graph(5) + >>> nx.is_strongly_regular(G) + True + + """ + # Here is an alternate implementation based directly on the + # definition of strongly regular graphs: + # + # return (all_equal(G.degree().values()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in G.edges()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in non_edges(G))) + # + # We instead use the fact that a distance-regular graph of diameter + # two is strongly regular. + return is_distance_regular(G) and diameter(G) == 2 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/dominance.py b/phivenv/Lib/site-packages/networkx/algorithms/dominance.py new file mode 100644 index 0000000000000000000000000000000000000000..ffdbe7d21391872c20cab7a9e6c06a522af97ac3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/dominance.py @@ -0,0 +1,135 @@ +""" +Dominance algorithms. +""" + +from functools import reduce + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["immediate_dominators", "dominance_frontiers"] + + +@not_implemented_for("undirected") +@nx._dispatch +def immediate_dominators(G, start): + """Returns the immediate dominators of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + idom : dict keyed by nodes + A dict containing the immediate dominators of each node reachable from + `start`. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Notes + ----- + Except for `start`, the immediate dominators are the parents of their + corresponding nodes in the dominator tree. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted(nx.immediate_dominators(G, 1).items()) + [(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + if start not in G: + raise nx.NetworkXError("start is not in G") + + idom = {start: start} + + order = list(nx.dfs_postorder_nodes(G, start)) + dfn = {u: i for i, u in enumerate(order)} + order.pop() + order.reverse() + + def intersect(u, v): + while u != v: + while dfn[u] < dfn[v]: + u = idom[u] + while dfn[u] > dfn[v]: + v = idom[v] + return u + + changed = True + while changed: + changed = False + for u in order: + new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom)) + if u not in idom or idom[u] != new_idom: + idom[u] = new_idom + changed = True + + return idom + + +@nx._dispatch +def dominance_frontiers(G, start): + """Returns the dominance frontiers of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + df : dict keyed by nodes + A dict containing the dominance frontiers of each node reachable from + `start` as lists. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items()) + [(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + idom = nx.immediate_dominators(G, start) + + df = {u: set() for u in idom} + for u in idom: + if len(G.pred[u]) >= 2: + for v in G.pred[u]: + if v in idom: + while v != idom[u]: + df[v].add(u) + v = idom[v] + return df diff --git a/phivenv/Lib/site-packages/networkx/algorithms/dominating.py b/phivenv/Lib/site-packages/networkx/algorithms/dominating.py new file mode 100644 index 0000000000000000000000000000000000000000..97408ab4380515244437742624571a852098a74e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/dominating.py @@ -0,0 +1,94 @@ +"""Functions for computing dominating sets in a graph.""" +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element + +__all__ = ["dominating_set", "is_dominating_set"] + + +@nx._dispatch +def dominating_set(G, start_with=None): + r"""Finds a dominating set for the graph G. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + start_with : node (default=None) + Node to use as a starting point for the algorithm. + + Returns + ------- + D : set + A dominating set for G. + + Notes + ----- + This function is an implementation of algorithm 7 in [2]_ which + finds some dominating set, not necessarily the smallest one. + + See also + -------- + is_dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + .. [2] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + all_nodes = set(G) + if start_with is None: + start_with = arbitrary_element(all_nodes) + if start_with not in G: + raise nx.NetworkXError(f"node {start_with} is not in G") + dominating_set = {start_with} + dominated_nodes = set(G[start_with]) + remaining_nodes = all_nodes - dominated_nodes - dominating_set + while remaining_nodes: + # Choose an arbitrary node and determine its undominated neighbors. + v = remaining_nodes.pop() + undominated_neighbors = set(G[v]) - dominating_set + # Add the node to the dominating set and the neighbors to the + # dominated set. Finally, remove all of those nodes from the set + # of remaining nodes. + dominating_set.add(v) + dominated_nodes |= undominated_neighbors + remaining_nodes -= undominated_neighbors + return dominating_set + + +@nx._dispatch +def is_dominating_set(G, nbunch): + """Checks if `nbunch` is a dominating set for `G`. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + nbunch : iterable + An iterable of nodes in the graph `G`. + + See also + -------- + dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + """ + testset = {n for n in nbunch if n in G} + nbrs = set(chain.from_iterable(G[n] for n in testset)) + return len(set(G) - testset - nbrs) == 0 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/efficiency_measures.py b/phivenv/Lib/site-packages/networkx/algorithms/efficiency_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..3beea38b013ac5f1bed237ae6a06d739be3c9d1e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/efficiency_measures.py @@ -0,0 +1,168 @@ +"""Provides functions for computing the efficiency of nodes and graphs.""" + +import networkx as nx +from networkx.exception import NetworkXNoPath + +from ..utils import not_implemented_for + +__all__ = ["efficiency", "local_efficiency", "global_efficiency"] + + +@not_implemented_for("directed") +@nx._dispatch +def efficiency(G, u, v): + """Returns the efficiency of a pair of nodes in a graph. + + The *efficiency* of a pair of nodes is the multiplicative inverse of the + shortest path distance between the nodes [1]_. Returns 0 if no path + between nodes. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + u, v : node + Nodes in the graph ``G``. + + Returns + ------- + float + Multiplicative inverse of the shortest path distance between the nodes. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.efficiency(G, 2, 3) # this gives efficiency for node 2 and 3 + 0.5 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + try: + eff = 1 / nx.shortest_path_length(G, u, v) + except NetworkXNoPath: + eff = 0 + return eff + + +@not_implemented_for("directed") +@nx._dispatch +def global_efficiency(G): + """Returns the average global efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *average + global efficiency* of a graph is the average efficiency of all pairs of + nodes [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average global efficiency. + + Returns + ------- + float + The average global efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> round(nx.global_efficiency(G), 12) + 0.916666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + n = len(G) + denom = n * (n - 1) + if denom != 0: + lengths = nx.all_pairs_shortest_path_length(G) + g_eff = 0 + for source, targets in lengths: + for target, distance in targets.items(): + if distance > 0: + g_eff += 1 / distance + g_eff /= denom + # g_eff = sum(1 / d for s, tgts in lengths + # for t, d in tgts.items() if d > 0) / denom + else: + g_eff = 0 + # TODO This can be made more efficient by computing all pairs shortest + # path lengths in parallel. + return g_eff + + +@not_implemented_for("directed") +@nx._dispatch +def local_efficiency(G): + """Returns the average local efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *local + efficiency* of a node in the graph is the average global efficiency of the + subgraph induced by the neighbors of the node. The *average local + efficiency* is the average of the local efficiencies of each node [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + + Returns + ------- + float + The average local efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.local_efficiency(G) + 0.9166666666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + # TODO This summation can be trivially parallelized. + efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G) + return sum(efficiency_list) / len(G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/euler.py b/phivenv/Lib/site-packages/networkx/algorithms/euler.py new file mode 100644 index 0000000000000000000000000000000000000000..9d61b5e4130b819588fb9b55d626eeb5e225523d --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/euler.py @@ -0,0 +1,469 @@ +""" +Eulerian circuits and graphs. +""" +from itertools import combinations + +import networkx as nx + +from ..utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_eulerian", + "eulerian_circuit", + "eulerize", + "is_semieulerian", + "has_eulerian_path", + "eulerian_path", +] + + +@nx._dispatch +def is_eulerian(G): + """Returns True if and only if `G` is Eulerian. + + A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian + circuit* is a closed walk that includes each edge of a graph exactly + once. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not + considered to have Eulerian circuits. Therefore, if the graph is not + connected (or not strongly connected, for directed graphs), this function + returns False. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + Examples + -------- + >>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]})) + True + >>> nx.is_eulerian(nx.complete_graph(5)) + True + >>> nx.is_eulerian(nx.petersen_graph()) + False + + If you prefer to allow graphs with isolated vertices to have Eulerian circuits, + you can first remove such vertices and then call `is_eulerian` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.is_eulerian(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.is_eulerian(G) + True + + + """ + if G.is_directed(): + # Every node must have equal in degree and out degree and the + # graph must be strongly connected + return all( + G.in_degree(n) == G.out_degree(n) for n in G + ) and nx.is_strongly_connected(G) + # An undirected Eulerian graph has no vertices of odd degree and + # must be connected. + return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G) + + +@nx._dispatch +def is_semieulerian(G): + """Return True iff `G` is semi-Eulerian. + + G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit. + + See Also + -------- + has_eulerian_path + is_eulerian + """ + return has_eulerian_path(G) and not is_eulerian(G) + + +def _find_path_start(G): + """Return a suitable starting vertex for an Eulerian path. + + If no path exists, return None. + """ + if not has_eulerian_path(G): + return None + + if is_eulerian(G): + return arbitrary_element(G) + + if G.is_directed(): + v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v)) + # Determines which is the 'start' node (as opposed to the 'end') + if G.out_degree(v1) > G.in_degree(v1): + return v1 + else: + return v2 + + else: + # In an undirected graph randomly choose one of the possibilities + start = [v for v in G if G.degree(v) % 2 != 0][0] + return start + + +def _simplegraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [source] + last_vertex = None + while vertex_stack: + current_vertex = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex) + last_vertex = current_vertex + vertex_stack.pop() + else: + _, next_vertex = arbitrary_element(edges(current_vertex)) + vertex_stack.append(next_vertex) + G.remove_edge(current_vertex, next_vertex) + + +def _multigraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [(source, None)] + last_vertex = None + last_key = None + while vertex_stack: + current_vertex, current_key = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex, last_key) + last_vertex, last_key = current_vertex, current_key + vertex_stack.pop() + else: + triple = arbitrary_element(edges(current_vertex, keys=True)) + _, next_vertex, next_key = triple + vertex_stack.append((next_vertex, next_key)) + G.remove_edge(current_vertex, next_vertex, next_key) + + +@nx._dispatch +def eulerian_circuit(G, source=None, keys=False): + """Returns an iterator over the edges of an Eulerian circuit in `G`. + + An *Eulerian circuit* is a closed walk that includes each edge of a + graph exactly once. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + source : node, optional + Starting node for circuit. + + keys : bool + If False, edges generated by this function will be of the form + ``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``. + This option is ignored unless `G` is a multigraph. + + Returns + ------- + edges : iterator + An iterator over edges in the Eulerian circuit. + + Raises + ------ + NetworkXError + If the graph is not Eulerian. + + See Also + -------- + is_eulerian + + Notes + ----- + This is a linear time implementation of an algorithm adapted from [1]_. + + For general information about Euler tours, see [2]_. + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + + Examples + -------- + To get an Eulerian circuit in an undirected graph:: + + >>> G = nx.complete_graph(3) + >>> list(nx.eulerian_circuit(G)) + [(0, 2), (2, 1), (1, 0)] + >>> list(nx.eulerian_circuit(G, source=1)) + [(1, 2), (2, 0), (0, 1)] + + To get the sequence of vertices in an Eulerian circuit:: + + >>> [u for u, v in nx.eulerian_circuit(G)] + [0, 2, 1] + + """ + if not is_eulerian(G): + raise nx.NetworkXError("G is not Eulerian.") + if G.is_directed(): + G = G.reverse() + else: + G = G.copy() + if source is None: + source = arbitrary_element(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + + +@nx._dispatch +def has_eulerian_path(G, source=None): + """Return True iff `G` has an Eulerian path. + + An Eulerian path is a path in a graph which uses each edge of a graph + exactly once. If `source` is specified, then this function checks + whether an Eulerian path that starts at node `source` exists. + + A directed graph has an Eulerian path iff: + - at most one vertex has out_degree - in_degree = 1, + - at most one vertex has in_degree - out_degree = 1, + - every other vertex has equal in_degree and out_degree, + - and all of its vertices belong to a single connected + component of the underlying undirected graph. + + If `source` is not None, an Eulerian path starting at `source` exists if no + other node has out_degree - in_degree = 1. This is equivalent to either + there exists an Eulerian circuit or `source` has out_degree - in_degree = 1 + and the conditions above hold. + + An undirected graph has an Eulerian path iff: + - exactly zero or two vertices have odd degree, + - and all of its vertices belong to a single connected component. + + If `source` is not None, an Eulerian path starting at `source` exists if + either there exists an Eulerian circuit or `source` has an odd degree and the + conditions above hold. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not considered + to have an Eulerian path. Therefore, if the graph is not connected (or not strongly + connected, for directed graphs), this function returns False. + + Parameters + ---------- + G : NetworkX Graph + The graph to find an euler path in. + + source : node, optional + Starting node for path. + + Returns + ------- + Bool : True if G has an Eulerian path. + + Examples + -------- + If you prefer to allow graphs with isolated vertices to have Eulerian path, + you can first remove such vertices and then call `has_eulerian_path` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.has_eulerian_path(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.has_eulerian_path(G) + True + + See Also + -------- + is_eulerian + eulerian_path + """ + if nx.is_eulerian(G): + return True + + if G.is_directed(): + ins = G.in_degree + outs = G.out_degree + # Since we know it is not eulerian, outs - ins must be 1 for source + if source is not None and outs[source] - ins[source] != 1: + return False + + unbalanced_ins = 0 + unbalanced_outs = 0 + for v in G: + if ins[v] - outs[v] == 1: + unbalanced_ins += 1 + elif outs[v] - ins[v] == 1: + unbalanced_outs += 1 + elif ins[v] != outs[v]: + return False + + return ( + unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G) + ) + else: + # We know it is not eulerian, so degree of source must be odd. + if source is not None and G.degree[source] % 2 != 1: + return False + + # Sum is 2 since we know it is not eulerian (which implies sum is 0) + return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G) + + +@nx._dispatch +def eulerian_path(G, source=None, keys=False): + """Return an iterator over the edges of an Eulerian path in `G`. + + Parameters + ---------- + G : NetworkX Graph + The graph in which to look for an eulerian path. + source : node or None (default: None) + The node at which to start the search. None means search over all + starting nodes. + keys : Bool (default: False) + Indicates whether to yield edge 3-tuples (u, v, edge_key). + The default yields edge 2-tuples + + Yields + ------ + Edge tuples along the eulerian path. + + Warning: If `source` provided is not the start node of an Euler path + will raise error even if an Euler Path exists. + """ + if not has_eulerian_path(G, source): + raise nx.NetworkXError("Graph has no Eulerian paths.") + if G.is_directed(): + G = G.reverse() + if source is None or nx.is_eulerian(G) is False: + source = _find_path_start(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + else: + G = G.copy() + if source is None: + source = _find_path_start(G) + if G.is_multigraph(): + if keys: + yield from reversed( + [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)] + ) + + +@not_implemented_for("directed") +@nx._dispatch +def eulerize(G): + """Transforms a graph into an Eulerian graph. + + If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest + (in terms of the number of edges) multigraph whose underlying simple graph is `G`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + G : NetworkX multigraph + + Raises + ------ + NetworkXError + If the graph is not connected. + + See Also + -------- + is_eulerian + eulerian_circuit + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + .. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf + + Examples + -------- + >>> G = nx.complete_graph(10) + >>> H = nx.eulerize(G) + >>> nx.is_eulerian(H) + True + + """ + if G.order() == 0: + raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph") + if not nx.is_connected(G): + raise nx.NetworkXError("G is not connected") + odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1] + G = nx.MultiGraph(G) + if len(odd_degree_nodes) == 0: + return G + + # get all shortest paths between vertices of odd degree + odd_deg_pairs_paths = [ + (m, {n: nx.shortest_path(G, source=m, target=n)}) + for m, n in combinations(odd_degree_nodes, 2) + ] + + # use the number of vertices in a graph + 1 as an upper bound on + # the maximum length of a path in G + upper_bound_on_max_path_length = len(G) + 1 + + # use "len(G) + 1 - len(P)", + # where P is a shortest path between vertices n and m, + # as edge-weights in a new graph + # store the paths in the graph for easy indexing later + Gp = nx.Graph() + for n, Ps in odd_deg_pairs_paths: + for m, P in Ps.items(): + if n != m: + Gp.add_edge( + m, n, weight=upper_bound_on_max_path_length - len(P), path=P + ) + + # find the minimum weight matching of edges in the weighted graph + best_matching = nx.Graph(list(nx.max_weight_matching(Gp))) + + # duplicate each edge along each path in the set of paths in Gp + for m, n in best_matching.edges(): + path = Gp[m][n]["path"] + G.add_edges_from(nx.utils.pairwise(path)) + return G diff --git a/phivenv/Lib/site-packages/networkx/algorithms/graph_hashing.py b/phivenv/Lib/site-packages/networkx/algorithms/graph_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..d85a44a3604a70b9bac7f214e52d2e2ae12bc79a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/graph_hashing.py @@ -0,0 +1,313 @@ +""" +Functions for hashing graphs to strings. +Isomorphic graphs should be assigned identical hashes. +For now, only Weisfeiler-Lehman hashing is implemented. +""" + +from collections import Counter, defaultdict +from hashlib import blake2b + +import networkx as nx + +__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"] + + +def _hash_label(label, digest_size): + return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest() + + +def _init_node_labels(G, edge_attr, node_attr): + if node_attr: + return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)} + elif edge_attr: + return {u: "" for u in G} + else: + return {u: str(deg) for u, deg in G.degree()} + + +def _neighborhood_aggregate(G, node, node_labels, edge_attr=None): + """ + Compute new labels for given node by aggregating + the labels of each node's neighbors. + """ + label_list = [] + for nbr in G.neighbors(node): + prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr]) + label_list.append(prefix + node_labels[nbr]) + return node_labels[node] + "".join(sorted(label_list)) + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_graph_hash( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """Return Weisfeiler Lehman (WL) graph hash. + + The function iteratively aggregates and hashes neighbourhoods of each node. + After each node's neighbors are hashed to obtain updated node labels, + a hashed histogram of resulting labels is returned as the final hash. + + Hashes are identical for isomorphic graphs and strong guarantees that + non-isomorphic graphs will get different hashes. See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + + Returns + ------- + h : string + Hexadecimal string corresponding to hash of the input graph. + + Examples + -------- + Two graphs with edge attributes that are isomorphic, except for + differences in the edge labels. + + >>> G1 = nx.Graph() + >>> G1.add_edges_from( + ... [ + ... (1, 2, {"label": "A"}), + ... (2, 3, {"label": "A"}), + ... (3, 1, {"label": "A"}), + ... (1, 4, {"label": "B"}), + ... ] + ... ) + >>> G2 = nx.Graph() + >>> G2.add_edges_from( + ... [ + ... (5, 6, {"label": "B"}), + ... (6, 7, {"label": "A"}), + ... (7, 5, {"label": "A"}), + ... (7, 8, {"label": "A"}), + ... ] + ... ) + + Omitting the `edge_attr` option, results in identical hashes. + + >>> nx.weisfeiler_lehman_graph_hash(G1) + '7bc4dde9a09d0b94c5097b219891d81a' + >>> nx.weisfeiler_lehman_graph_hash(G2) + '7bc4dde9a09d0b94c5097b219891d81a' + + With edge labels, the graphs are no longer assigned + the same hash digest. + + >>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label") + 'c653d85538bcf041d88c011f4f905f10' + >>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label") + '3dcd84af1ca855d0eff3c978d88e7ec7' + + Notes + ----- + To return the WL hashes of each subgraph of a graph, use + `weisfeiler_lehman_subgraph_hashes` + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + + See also + -------- + weisfeiler_lehman_subgraph_hashes + """ + + def weisfeiler_lehman_step(G, labels, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + new_labels[node] = _hash_label(label, digest_size) + return new_labels + + # set initial node labels + node_labels = _init_node_labels(G, edge_attr, node_attr) + + subgraph_hash_counts = [] + for _ in range(iterations): + node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr) + counter = Counter(node_labels.values()) + # sort the counter, extend total counts + subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0])) + + # hash the final counter + return _hash_label(str(tuple(subgraph_hash_counts)), digest_size) + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_subgraph_hashes( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """ + Return a dictionary of subgraph hashes by node. + + Dictionary keys are nodes in `G`, and values are a list of hashes. + Each hash corresponds to a subgraph rooted at a given node u in `G`. + Lists of subgraph hashes are sorted in increasing order of depth from + their root node, with the hash at index i corresponding to a subgraph + of nodes at most i edges distance from u. Thus, each list will contain + ``iterations + 1`` elements - a hash for a subgraph at each depth, and + additionally a hash of the initial node label (or equivalently a + subgraph of depth 0) + + The function iteratively aggregates and hashes neighbourhoods of each node. + This is achieved for each step by replacing for each node its label from + the previous iteration with its hashed 1-hop neighborhood aggregate. + The new node label is then appended to a list of node labels for each + node. + + To aggregate neighborhoods at each step for a node $n$, all labels of + nodes adjacent to $n$ are concatenated. If the `edge_attr` parameter is set, + labels for each neighboring node are prefixed with the value of this attribute + along the connecting edge from this neighbor to node $n$. The resulting string + is then hashed to compress this information into a fixed digest size. + + Thus, at the $i$-th iteration, nodes within $i$ hops influence any given + hashed node label. We can therefore say that at depth $i$ for node $n$ + we have a hash for a subgraph induced by the $2i$-hop neighborhood of $n$. + + The output can be used to to create general Weisfeiler-Lehman graph kernels, + or generate features for graphs or nodes - for example to generate 'words' in + a graph as seen in the 'graph2vec' algorithm. + See [1]_ & [2]_ respectively for details. + + Hashes are identical for isomorphic subgraphs and there exist strong + guarantees that non-isomorphic graphs will get different hashes. + See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + The default size is 16 bits + + Returns + ------- + node_subgraph_hashes : dict + A dictionary with each key given by a node in G, and each value given + by the subgraph hashes in order of depth from the key node. + + Examples + -------- + Finding similar nodes in different graphs: + + >>> G1 = nx.Graph() + >>> G1.add_edges_from([ + ... (1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7) + ... ]) + >>> G2 = nx.Graph() + >>> G2.add_edges_from([ + ... (1, 3), (2, 3), (1, 6), (1, 5), (4, 6) + ... ]) + >>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=3, digest_size=8) + >>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2, iterations=3, digest_size=8) + + Even though G1 and G2 are not isomorphic (they have different numbers of edges), + the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar: + + >>> g1_hashes[1] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0'] + >>> g2_hashes[5] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc'] + + The first 2 WL subgraph hashes match. From this we can conclude that it's very + likely the neighborhood of 4 hops around these nodes are isomorphic: each + iteration aggregates 1-hop neighbourhoods meaning hashes at depth $n$ are influenced + by every node within $2n$ hops. + + However the neighborhood of 6 hops is no longer isomorphic since their 3rd hash does + not match. + + These nodes may be candidates to be classified together since their local topology + is similar. + + Notes + ----- + To hash the full graph when subgraph hashes are not needed, use + `weisfeiler_lehman_graph_hash` for efficiency. + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + .. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, + Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning + Distributed Representations of Graphs. arXiv. 2017 + https://arxiv.org/pdf/1707.05005.pdf + + See also + -------- + weisfeiler_lehman_graph_hash + """ + + def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + Appends the new hashed label to the dictionary of subgraph hashes + originating from and indexed by each node in G + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + hashed_label = _hash_label(label, digest_size) + new_labels[node] = hashed_label + node_subgraph_hashes[node].append(hashed_label) + return new_labels + + node_labels = _init_node_labels(G, edge_attr, node_attr) + + node_subgraph_hashes = defaultdict(list) + for _ in range(iterations): + node_labels = weisfeiler_lehman_step( + G, node_labels, node_subgraph_hashes, edge_attr + ) + + return dict(node_subgraph_hashes) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/graphical.py b/phivenv/Lib/site-packages/networkx/algorithms/graphical.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1664427fda27b52930e6e8d451000ba9d5912b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/graphical.py @@ -0,0 +1,483 @@ +"""Test sequences for graphiness. +""" +import heapq + +import networkx as nx + +__all__ = [ + "is_graphical", + "is_multigraphical", + "is_pseudographical", + "is_digraphical", + "is_valid_degree_sequence_erdos_gallai", + "is_valid_degree_sequence_havel_hakimi", +] + + +@nx._dispatch(graphs=None) +def is_graphical(sequence, method="eg"): + """Returns True if sequence is a valid degree sequence. + + A degree sequence is valid if some graph can realize it. + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + method : "eg" | "hh" (default: 'eg') + The method used to validate the degree sequence. + "eg" corresponds to the Erdős-Gallai algorithm + [EG1960]_, [choudum1986]_, and + "hh" to the Havel-Hakimi algorithm + [havel1955]_, [hakimi1962]_, [CL1996]_. + + Returns + ------- + valid : bool + True if the sequence is a valid degree sequence and False if not. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> sequence = (d for n, d in G.degree()) + >>> nx.is_graphical(sequence) + True + + To test a non-graphical sequence: + >>> sequence_list = [d for n, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_graphical(sequence_list) + False + + References + ---------- + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + .. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on + graph sequences." Bulletin of the Australian Mathematical Society, 33, + pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872 + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + if method == "eg": + valid = is_valid_degree_sequence_erdos_gallai(list(sequence)) + elif method == "hh": + valid = is_valid_degree_sequence_havel_hakimi(list(sequence)) + else: + msg = "`method` must be 'eg' or 'hh'" + raise nx.NetworkXException(msg) + return valid + + +def _basic_graphical_tests(deg_sequence): + # Sort and perform some simple tests on the sequence + deg_sequence = nx.utils.make_list_of_ints(deg_sequence) + p = len(deg_sequence) + num_degs = [0] * p + dmax, dmin, dsum, n = 0, p, 0, 0 + for d in deg_sequence: + # Reject if degree is negative or larger than the sequence length + if d < 0 or d >= p: + raise nx.NetworkXUnfeasible + # Process only the non-zero integers + elif d > 0: + dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1 + num_degs[d] += 1 + # Reject sequence if it has odd sum or is oversaturated + if dsum % 2 or dsum > n * (n - 1): + raise nx.NetworkXUnfeasible + return dmax, dmin, dsum, n, num_degs + + +@nx._dispatch(graphs=None) +def is_valid_degree_sequence_havel_hakimi(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation proceeds using the Havel-Hakimi theorem + [havel1955]_, [hakimi1962]_, [CL1996]_. + Worst-case run time is $O(s)$ where $s$ is the sum of the sequence. + + Parameters + ---------- + deg_sequence : list + A list of integers where each element specifies the degree of a node + in a graph. + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence_list) + False + + Notes + ----- + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [1]_. + + References + ---------- + .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + modstubs = [0] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while num_degs[dmax] == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + return False + + # Remove largest stub in list + num_degs[dmax], n = num_degs[dmax] - 1, n - 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while num_degs[k] == 0: + k -= 1 + num_degs[k], n = num_degs[k] - 1, n - 1 + if k > 1: + modstubs[mslen] = k - 1 + mslen += 1 + # Add back to the list any non-zero stubs that were removed + for i in range(mslen): + stub = modstubs[i] + num_degs[stub], n = num_degs[stub] + 1, n + 1 + return True + + +@nx._dispatch(graphs=None) +def is_valid_degree_sequence_erdos_gallai(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation is done using the Erdős-Gallai theorem [EG1960]_. + + Parameters + ---------- + deg_sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence_list) + False + + Notes + ----- + + This implementation uses an equivalent form of the Erdős-Gallai criterion. + Worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + Specifically, a sequence d is graphical if and only if the + sum of the sequence is even and for all strong indices k in the sequence, + + .. math:: + + \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k) + = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j ) + + A strong index k is any index where d_k >= k and the value n_j is the + number of occurrences of j in d. The maximal strong index is called the + Durfee index. + + This particular rearrangement comes from the proof of Theorem 3 in [2]_. + + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [2]_. + + References + ---------- + .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai", + Discrete Mathematics, 265, pp. 417-420 (2003). + .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + # Perform the EG checks using the reformulation of Zverovich and Zverovich + k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0 + for dk in range(dmax, dmin - 1, -1): + if dk < k + 1: # Check if already past Durfee index + return True + if num_degs[dk] > 0: + run_size = num_degs[dk] # Process a run of identical-valued degrees + if dk < k + run_size: # Check if end of run is past Durfee index + run_size = dk - k # Adjust back to Durfee index + sum_deg += run_size * dk + for v in range(run_size): + sum_nj += num_degs[k + v] + sum_jnj += (k + v) * num_degs[k + v] + k += run_size + if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj: + return False + return True + + +@nx._dispatch(graphs=None) +def is_multigraphical(sequence): + """Returns True if some multigraph can realize the sequence. + + Parameters + ---------- + sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is a multigraphic degree sequence and False if not. + + Examples + -------- + >>> G = nx.MultiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_multigraphical(sequence) + True + + To test a non-multigraphical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_multigraphical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + References + ---------- + .. [1] S. L. Hakimi. "On the realizability of a set of integers as + degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506 + (1962). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + dsum, dmax = 0, 0 + for d in deg_sequence: + if d < 0: + return False + dsum, dmax = dsum + d, max(dmax, d) + if dsum % 2 or dsum < 2 * dmax: + return False + return True + + +@nx._dispatch(graphs=None) +def is_pseudographical(sequence): + """Returns True if some pseudograph can realize the sequence. + + Every nonnegative integer sequence with an even sum is pseudographical + (see [1]_). + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + Returns + ------- + valid : bool + True if the sequence is a pseudographic degree sequence and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_pseudographical(sequence) + True + + To test a non-pseudographical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_pseudographical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where n is the length of the sequence. + + References + ---------- + .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs + and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12), + pp. 778-782 (1976). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0 + + +@nx._dispatch(graphs=None) +def is_digraphical(in_sequence, out_sequence): + r"""Returns True if some directed graph can realize the in- and out-degree + sequences. + + Parameters + ---------- + in_sequence : list or iterable container + A sequence of integer node in-degrees + + out_sequence : list or iterable container + A sequence of integer node out-degrees + + Returns + ------- + valid : bool + True if in and out-sequences are digraphic False if not. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> in_seq = (d for n, d in G.in_degree()) + >>> out_seq = (d for n, d in G.out_degree()) + >>> nx.is_digraphical(in_seq, out_seq) + True + + To test a non-digraphical scenario: + >>> in_seq_list = [d for n, d in G.in_degree()] + >>> in_seq_list[-1] += 1 + >>> nx.is_digraphical(in_seq_list, out_seq) + False + + Notes + ----- + This algorithm is from Kleitman and Wang [1]_. + The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the + sum and length of the sequences respectively. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + try: + in_deg_sequence = nx.utils.make_list_of_ints(in_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_sequence) + except nx.NetworkXError: + return False + # Process the sequences and form two heaps to store degree pairs with + # either zero or non-zero out degrees + sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + maxin = 0 + if maxn == 0: + return True + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + return False + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg)) + elif out_deg > 0: + zeroheap.append(-1 * out_deg) + if sumin != sumout: + return False + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum out degree + while stubheap: + # Take the first value in the sequence with non-zero in degree + (freeout, freein) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + return False + + # Attach out stubs to the nodes with the most in stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]): + stubout = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin) = heapq.heappop(stubheap) + if stubout == 0: + return False + # Check if target is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin) + mslen += 1 + + # Add back the nodes to the heap that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, stub[0]) + if freeout < 0: + heapq.heappush(zeroheap, freeout) + return True diff --git a/phivenv/Lib/site-packages/networkx/algorithms/hierarchy.py b/phivenv/Lib/site-packages/networkx/algorithms/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc63a741b5cfd0aa6b58dbd68caffca7ba1135a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/hierarchy.py @@ -0,0 +1,48 @@ +""" +Flow Hierarchy. +""" +import networkx as nx + +__all__ = ["flow_hierarchy"] + + +@nx._dispatch(edge_attrs="weight") +def flow_hierarchy(G, weight=None): + """Returns the flow hierarchy of a directed network. + + Flow hierarchy is defined as the fraction of edges not participating + in cycles in a directed graph [1]_. + + Parameters + ---------- + G : DiGraph or MultiDiGraph + A directed graph + + weight : string, optional (default=None) + Attribute to use for edge weights. If None the weight defaults to 1. + + Returns + ------- + h : float + Flow hierarchy value + + Notes + ----- + The algorithm described in [1]_ computes the flow hierarchy through + exponentiation of the adjacency matrix. This function implements an + alternative approach that finds strongly connected components. + An edge is in a cycle if and only if it is in a strongly connected + component, which can be found in $O(m)$ time using Tarjan's algorithm. + + References + ---------- + .. [1] Luo, J.; Magee, C.L. (2011), + Detecting evolving patterns of self-organizing networks by flow + hierarchy measurement, Complexity, Volume 16 Issue 6 53-61. + DOI: 10.1002/cplx.20368 + http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf + """ + if not G.is_directed(): + raise nx.NetworkXError("G must be a digraph in flow_hierarchy") + scc = nx.strongly_connected_components(G) + return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/hybrid.py b/phivenv/Lib/site-packages/networkx/algorithms/hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..347f5c2f199bac838f5196d5ed544af880f8b06a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/hybrid.py @@ -0,0 +1,195 @@ +""" +Provides functions for finding and testing for locally `(k, l)`-connected +graphs. + +""" +import copy + +import networkx as nx + +__all__ = ["kl_connected_subgraph", "is_kl_connected"] + + +@nx._dispatch +def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False): + """Returns the maximum locally `(k, l)`-connected subgraph of `G`. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph in which to find a maximum locally `(k, l)`-connected + subgraph. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + same_as_graph : bool + If True then return a tuple of the form `(H, is_same)`, + where `H` is the maximum locally `(k, l)`-connected subgraph and + `is_same` is a Boolean representing whether `G` is locally `(k, + l)`-connected (and hence, whether `H` is simply a copy of the input + graph `G`). + + Returns + ------- + NetworkX graph or two-tuple + If `same_as_graph` is True, then this function returns a + two-tuple as described above. Otherwise, it returns only the maximum + locally `(k, l)`-connected subgraph. + + See also + -------- + is_kl_connected + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + H = copy.deepcopy(G) # subgraph we construct by removing from G + + graphOK = True + deleted_some = True # hack to start off the while loop + while deleted_some: + deleted_some = False + # We use `for edge in list(H.edges()):` instead of + # `for edge in H.edges():` because we edit the graph `H` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for edge in list(H.edges()): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + for w in verts.copy(): + verts.update(G[w]) + G2 = G.subgraph(verts).copy() + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if prev != w: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + H.remove_edge(u, v) + deleted_some = True + if graphOK: + graphOK = False + # We looked through all edges and removed none of them. + # So, H is the maximal (k,l)-connected subgraph of G + if same_as_graph: + return (H, graphOK) + return H + + +@nx._dispatch +def is_kl_connected(G, k, l, low_memory=False): + """Returns True if and only if `G` is locally `(k, l)`-connected. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph to test for local `(k, l)`-connectedness. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + Returns + ------- + bool + Whether the graph is locally `(k, l)`-connected subgraph. + + See also + -------- + kl_connected_subgraph + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + graphOK = True + for edge in G.edges(): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + [verts.update(G.neighbors(w)) for w in verts.copy()] + G2 = G.subgraph(verts) + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if w != prev: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + graphOK = False + break + # return status + return graphOK diff --git a/phivenv/Lib/site-packages/networkx/algorithms/isolate.py b/phivenv/Lib/site-packages/networkx/algorithms/isolate.py new file mode 100644 index 0000000000000000000000000000000000000000..f9983282a635c2699eaed79446bb0ed79794c362 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/isolate.py @@ -0,0 +1,107 @@ +""" +Functions for identifying isolate (degree zero) nodes. +""" +import networkx as nx + +__all__ = ["is_isolate", "isolates", "number_of_isolates"] + + +@nx._dispatch +def is_isolate(G, n): + """Determines whether a node is an isolate. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + n : node + A node in `G`. + + Returns + ------- + is_isolate : bool + True if and only if `n` has no neighbors. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> nx.is_isolate(G, 2) + False + >>> nx.is_isolate(G, 3) + True + """ + return G.degree(n) == 0 + + +@nx._dispatch +def isolates(G): + """Iterator over isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + iterator + An iterator over the isolates of `G`. + + Examples + -------- + To get a list of all isolates of a graph, use the :class:`list` + constructor:: + + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + To remove all isolates in the graph, first create a list of the + isolates, then use :meth:`Graph.remove_nodes_from`:: + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> list(G) + [1, 2] + + For digraphs, isolates have zero in-degree and zero out_degre:: + + >>> G = nx.DiGraph([(0, 1), (1, 2)]) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + """ + return (n for n, d in G.degree() if d == 0) + + +@nx._dispatch +def number_of_isolates(G): + """Returns the number of isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + int + The number of degree zero nodes in the graph `G`. + + """ + # TODO This can be parallelized. + return sum(1 for v in isolates(G)) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/link_prediction.py b/phivenv/Lib/site-packages/networkx/algorithms/link_prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..7335a77f5e0ac0e93d873d91bf1e4f25feeb6381 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/link_prediction.py @@ -0,0 +1,604 @@ +""" +Link prediction algorithms. +""" + + +from math import log + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "resource_allocation_index", + "jaccard_coefficient", + "adamic_adar_index", + "preferential_attachment", + "cn_soundarajan_hopcroft", + "ra_index_soundarajan_hopcroft", + "within_inter_cluster", + "common_neighbor_centrality", +] + + +def _apply_prediction(G, func, ebunch=None): + """Applies the given function to each edge in the specified iterable + of edges. + + `G` is an instance of :class:`networkx.Graph`. + + `func` is a function on two inputs, each of which is a node in the + graph. The function can return anything, but it should return a + value representing a prediction of the likelihood of a "link" + joining the two nodes. + + `ebunch` is an iterable of pairs of nodes. If not specified, all + non-edges in the graph `G` will be used. + + """ + if ebunch is None: + ebunch = nx.non_edges(G) + return ((u, v, func(u, v)) for u, v in ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def resource_allocation_index(G, ebunch=None): + r"""Compute the resource allocation index of all node pairs in ebunch. + + Resource allocation index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Resource allocation index will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their resource allocation index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.75000000 + (2, 3) -> 0.75000000 + + References + ---------- + .. [1] T. Zhou, L. Lu, Y.-C. Zhang. + Predicting missing links via local information. + Eur. Phys. J. B 71 (2009) 623. + https://arxiv.org/pdf/0901.0553.pdf + """ + + def predict(u, v): + return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def jaccard_coefficient(G, ebunch=None): + r"""Compute the Jaccard coefficient of all node pairs in ebunch. + + Jaccard coefficient of nodes `u` and `v` is defined as + + .. math:: + + \frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Jaccard coefficient will be computed for each pair of nodes + given in the iterable. The pairs must be given as 2-tuples + (u, v) where u and v are nodes in the graph. If ebunch is None + then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Jaccard coefficient. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.60000000 + (2, 3) -> 0.60000000 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + union_size = len(set(G[u]) | set(G[v])) + if union_size == 0: + return 0 + return len(list(nx.common_neighbors(G, u, v))) / union_size + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def adamic_adar_index(G, ebunch=None): + r"""Compute the Adamic-Adar index of all node pairs in ebunch. + + Adamic-Adar index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + This index leads to zero-division for nodes only connected via self-loops. + It is intended to be used when no self-loops are present. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Adamic-Adar index will be computed for each pair of nodes given + in the iterable. The pairs must be given as 2-tuples (u, v) + where u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Adamic-Adar index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 2.16404256 + (2, 3) -> 2.16404256 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def common_neighbor_centrality(G, ebunch=None, alpha=0.8): + r"""Return the CCPA score for each pair of nodes. + + Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA) + score of all node pairs in ebunch. + + CCPA score of `u` and `v` is defined as + + .. math:: + + \alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}} + + where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the + set of neighbors of $v$, $\alpha$ is parameter varies between [0,1], $N$ denotes + total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance + between $u$ and $v$. + + This algorithm is based on two vital properties of nodes, namely the number + of common neighbors and their centrality. Common neighbor refers to the common + nodes between two nodes. Centrality refers to the prestige that a node enjoys + in a network. + + .. seealso:: + + :func:`common_neighbors` + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + alpha : Parameter defined for participation of Common Neighbor + and Centrality Algorithm share. Values for alpha should + normally be between 0 and 1. Default value set to 0.8 + because author found better performance at 0.8 for all the + dataset. + Default value: 0.8 + + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Common Neighbor and Centrality based + Parameterized Algorithm(CCPA) score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.common_neighbor_centrality(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 3.4000000000000004 + (2, 3) -> 3.4000000000000004 + + References + ---------- + .. [1] Ahmad, I., Akhtar, M.U., Noor, S. et al. + Missing Link Prediction using Common Neighbor and Centrality based Parameterized Algorithm. + Sci Rep 10, 364 (2020). + https://doi.org/10.1038/s41598-019-57304-y + """ + + # When alpha == 1, the CCPA score simplifies to the number of common neighbors. + if alpha == 1: + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + + return sum(1 for _ in nx.common_neighbors(G, u, v)) + + else: + spl = dict(nx.shortest_path_length(G)) + inf = float("inf") + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + path_len = spl[u].get(v, inf) + + return alpha * sum(1 for _ in nx.common_neighbors(G, u, v)) + ( + 1 - alpha + ) * (G.number_of_nodes() / path_len) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def preferential_attachment(G, ebunch=None): + r"""Compute the preferential attachment score of all node pairs in ebunch. + + Preferential attachment score of `u` and `v` is defined as + + .. math:: + + |\Gamma(u)| |\Gamma(v)| + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their preferential attachment score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 16 + (2, 3) -> 16 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return G.degree(u) * G.degree(v) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def cn_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Count the number of common neighbors of all node pairs in ebunch + using community information. + + For two nodes $u$ and $v$, this function computes the number of + common neighbors and bonus one for each common neighbor belonging to + the same community as $u$ and $v$. Mathematically, + + .. math:: + + |\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w) + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 0 + >>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 2) -> 2 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + cnbors = list(nx.common_neighbors(G, u, v)) + neighbors = ( + sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0 + ) + return len(cnbors) + neighbors + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Compute the resource allocation index of all node pairs in + ebunch using community information. + + For two nodes $u$ and $v$, this function computes the resource + allocation index considering only common neighbors belonging to the + same community as $u$ and $v$. Mathematically, + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|} + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 1 + >>> G.nodes[3]["community"] = 0 + >>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 3) -> 0.50000000 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = nx.common_neighbors(G, u, v) + return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"): + """Compute the ratio of within- and inter-cluster common neighbors + of all node pairs in ebunch. + + For two nodes `u` and `v`, if a common neighbor `w` belongs to the + same community as them, `w` is considered as within-cluster common + neighbor of `u` and `v`. Otherwise, it is considered as + inter-cluster common neighbor of `u` and `v`. The ratio between the + size of the set of within- and inter-cluster common neighbors is + defined as the WIC measure. [1]_ + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The WIC measure will be computed for each pair of nodes given in + the iterable. The pairs must be given as 2-tuples (u, v) where + u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + delta : float, optional (default = 0.001) + Value to prevent division by zero in case there is no + inter-cluster common neighbor between two nodes. See [1]_ for + details. Default value: 0.001. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their WIC measure. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 1 + >>> G.nodes[2]["community"] = 0 + >>> G.nodes[3]["community"] = 0 + >>> G.nodes[4]["community"] = 0 + >>> preds = nx.within_inter_cluster(G, [(0, 4)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.99800200 + >>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.33333333 + + References + ---------- + .. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes. + Link prediction in complex networks based on cluster information. + In Proceedings of the 21st Brazilian conference on Advances in + Artificial Intelligence (SBIA'12) + https://doi.org/10.1007/978-3-642-34459-6_10 + """ + if delta <= 0: + raise nx.NetworkXAlgorithmError("Delta must be greater than zero") + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = set(nx.common_neighbors(G, u, v)) + within = {w for w in cnbors if _community(G, w, community) == Cu} + inter = cnbors - within + return len(within) / (len(inter) + delta) + + return _apply_prediction(G, predict, ebunch) + + +def _community(G, u, community): + """Get the community of the given node.""" + node_u = G.nodes[u] + try: + return node_u[community] + except KeyError as err: + raise nx.NetworkXAlgorithmError("No community information") from err diff --git a/phivenv/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py b/phivenv/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py new file mode 100644 index 0000000000000000000000000000000000000000..ca21d73766f2287103689ea6bcf6d86dd3eebced --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py @@ -0,0 +1,268 @@ +"""Algorithms for finding the lowest common ancestor of trees and DAGs.""" +from collections import defaultdict +from collections.abc import Mapping, Set +from itertools import combinations_with_replacement + +import networkx as nx +from networkx.utils import UnionFind, arbitrary_element, not_implemented_for + +__all__ = [ + "all_pairs_lowest_common_ancestor", + "tree_all_pairs_lowest_common_ancestor", + "lowest_common_ancestor", +] + + +@not_implemented_for("undirected") +@nx._dispatch +def all_pairs_lowest_common_ancestor(G, pairs=None): + """Return the lowest common ancestor of all pairs or the provided pairs + + Parameters + ---------- + G : NetworkX directed graph + + pairs : iterable of pairs of nodes, optional (default: all pairs) + The pairs of nodes of interest. + If None, will find the LCA of all pairs of nodes. + + Yields + ------ + ((node1, node2), lca) : 2-tuple + Where lca is least common ancestor of node1 and node2. + Note that for the default case, the order of the node pair is not considered, + e.g. you will not get both ``(a, b)`` and ``(b, a)`` + + Raises + ------ + NetworkXPointlessConcept + If `G` is null. + NetworkXError + If `G` is not a DAG. + + Examples + -------- + The default behavior is to yield the lowest common ancestor for all + possible combinations of nodes in `G`, including self-pairings: + + >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)]) + >>> dict(nx.all_pairs_lowest_common_ancestor(G)) + {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2} + + The pairs argument can be used to limit the output to only the + specified node pairings: + + >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)])) + {(1, 2): 1, (2, 3): 0} + + Notes + ----- + Only defined on non-null directed acyclic graphs. + + See Also + -------- + lowest_common_ancestor + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + if pairs is None: + pairs = combinations_with_replacement(G, 2) + else: + # Convert iterator to iterable, if necessary. Trim duplicates. + pairs = dict.fromkeys(pairs) + # Verify that each of the nodes in the provided pairs is in G + nodeset = set(G) + for pair in pairs: + if set(pair) - nodeset: + raise nx.NodeNotFound( + f"Node(s) {set(pair) - nodeset} from pair {pair} not in G." + ) + + # Once input validation is done, construct the generator + def generate_lca_from_pairs(G, pairs): + ancestor_cache = {} + + for v, w in pairs: + if v not in ancestor_cache: + ancestor_cache[v] = nx.ancestors(G, v) + ancestor_cache[v].add(v) + if w not in ancestor_cache: + ancestor_cache[w] = nx.ancestors(G, w) + ancestor_cache[w].add(w) + + common_ancestors = ancestor_cache[v] & ancestor_cache[w] + + if common_ancestors: + common_ancestor = next(iter(common_ancestors)) + while True: + successor = None + for lower_ancestor in G.successors(common_ancestor): + if lower_ancestor in common_ancestors: + successor = lower_ancestor + break + if successor is None: + break + common_ancestor = successor + yield ((v, w), common_ancestor) + + return generate_lca_from_pairs(G, pairs) + + +@not_implemented_for("undirected") +@nx._dispatch +def lowest_common_ancestor(G, node1, node2, default=None): + """Compute the lowest common ancestor of the given pair of nodes. + + Parameters + ---------- + G : NetworkX directed graph + + node1, node2 : nodes in the graph. + + default : object + Returned if no common ancestor between `node1` and `node2` + + Returns + ------- + The lowest common ancestor of node1 and node2, + or default if they have no common ancestors. + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, (0, 1, 2, 3)) + >>> nx.add_path(G, (0, 4, 3)) + >>> nx.lowest_common_ancestor(G, 2, 4) + 0 + + See Also + -------- + all_pairs_lowest_common_ancestor""" + + ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) + if ans: + assert len(ans) == 1 + return ans[0][1] + return default + + +@not_implemented_for("undirected") +@nx._dispatch +def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None): + r"""Yield the lowest common ancestor for sets of pairs in a tree. + + Parameters + ---------- + G : NetworkX directed graph (must be a tree) + + root : node, optional (default: None) + The root of the subtree to operate on. + If None, assume the entire graph has exactly one source and use that. + + pairs : iterable or iterator of pairs of nodes, optional (default: None) + The pairs of interest. If None, Defaults to all pairs of nodes + under `root` that have a lowest common ancestor. + + Returns + ------- + lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes + in `pairs` and `lca` is their lowest common ancestor. + + Examples + -------- + >>> import pprint + >>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)]) + >>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G))) + {(1, 1): 1, + (2, 1): 1, + (2, 2): 2, + (3, 1): 1, + (3, 2): 1, + (3, 3): 3, + (3, 4): 1, + (4, 1): 1, + (4, 2): 2, + (4, 4): 4} + + We can also use `pairs` argument to specify the pairs of nodes for which we + want to compute lowest common ancestors. Here is an example: + + >>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)])) + {(2, 3): 1, (1, 4): 1} + + Notes + ----- + Only defined on non-null trees represented with directed edges from + parents to children. Uses Tarjan's off-line lowest-common-ancestors + algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest + value of the inverse Ackermann function likely to ever come up in actual + use, and $P$ is the number of pairs requested (or $V^2$ if all are needed). + + Tarjan, R. E. (1979), "Applications of path compression on balanced trees", + Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161. + + See Also + -------- + all_pairs_lowest_common_ancestor: similar routine for general DAGs + lowest_common_ancestor: just a single pair for general DAGs + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + # Index pairs of interest for efficient lookup from either side. + if pairs is not None: + pair_dict = defaultdict(set) + # See note on all_pairs_lowest_common_ancestor. + if not isinstance(pairs, (Mapping, Set)): + pairs = set(pairs) + for u, v in pairs: + for n in (u, v): + if n not in G: + msg = f"The node {str(n)} is not in the digraph." + raise nx.NodeNotFound(msg) + pair_dict[u].add(v) + pair_dict[v].add(u) + + # If root is not specified, find the exactly one node with in degree 0 and + # use it. Raise an error if none are found, or more than one is. Also check + # for any nodes with in degree larger than 1, which would imply G is not a + # tree. + if root is None: + for n, deg in G.in_degree: + if deg == 0: + if root is not None: + msg = "No root specified and tree has multiple sources." + raise nx.NetworkXError(msg) + root = n + # checking deg>1 is not sufficient for MultiDiGraphs + elif deg > 1 and len(G.pred[n]) > 1: + msg = "Tree LCA only defined on trees; use DAG routine." + raise nx.NetworkXError(msg) + if root is None: + raise nx.NetworkXError("Graph contains a cycle.") + + # Iterative implementation of Tarjan's offline lca algorithm + # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition) + uf = UnionFind() + ancestors = {} + for node in G: + ancestors[node] = uf[node] + + colors = defaultdict(bool) + for node in nx.dfs_postorder_nodes(G, root): + colors[node] = True + for v in pair_dict[node] if pairs is not None else G: + if colors[v]: + # If the user requested both directions of a pair, give it. + # Otherwise, just give one. + if pairs is not None and (node, v) in pairs: + yield (node, v), ancestors[uf[v]] + if pairs is None or (v, node) in pairs: + yield (v, node), ancestors[uf[v]] + if node != root: + parent = arbitrary_element(G.pred[node]) + uf.union(parent, node) + ancestors[uf[parent]] = parent diff --git a/phivenv/Lib/site-packages/networkx/algorithms/matching.py b/phivenv/Lib/site-packages/networkx/algorithms/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..b20d7f6970cd3a3c3373c8ced4d31ac4dc85c1e2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/matching.py @@ -0,0 +1,1151 @@ +"""Functions for computing and verifying matchings in a graph.""" +from collections import Counter +from itertools import combinations, repeat + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "is_matching", + "is_maximal_matching", + "is_perfect_matching", + "max_weight_matching", + "min_weight_matching", + "maximal_matching", +] + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def maximal_matching(G): + r"""Find a maximal matching in the graph. + + A matching is a subset of edges in which no node occurs more than once. + A maximal matching cannot add more edges and still be a matching. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> sorted(nx.maximal_matching(G)) + [(1, 2), (3, 5)] + + Notes + ----- + The algorithm greedily selects a maximal matching M of the graph G + (i.e. no superset of M exists). It runs in $O(|E|)$ time. + """ + matching = set() + nodes = set() + for edge in G.edges(): + # If the edge isn't covered, add it to the matching + # then remove neighborhood of u and v from consideration. + u, v = edge + if u not in nodes and v not in nodes and u != v: + matching.add(edge) + nodes.update(edge) + return matching + + +def matching_dict_to_set(matching): + """Converts matching dict format to matching set format + + Converts a dictionary representing a matching (as returned by + :func:`max_weight_matching`) to a set representing a matching (as + returned by :func:`maximal_matching`). + + In the definition of maximal matching adopted by NetworkX, + self-loops are not allowed, so the provided dictionary is expected + to never have any mapping from a key to itself. However, the + dictionary is expected to have mirrored key/value pairs, for + example, key ``u`` with value ``v`` and key ``v`` with value ``u``. + + """ + edges = set() + for edge in matching.items(): + u, v = edge + if (v, u) in edges or edge in edges: + continue + if u == v: + raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}") + edges.add(edge) + return edges + + +@nx._dispatch +def is_matching(G, matching): + """Return True if ``matching`` is a valid matching of ``G`` + + A *matching* in a graph is a set of edges in which no two distinct + edges share a common endpoint. Each node is incident to at most one + edge in the matching. The edges are said to be independent. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid matching + in the graph. + + Raises + ------ + NetworkXError + If the proposed matching has an edge to a node not in G. + Or if the matching is not a collection of 2-tuple edges. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> nx.is_maximal_matching(G, {1: 3, 2: 4}) # using dict to represent matching + True + + >>> nx.is_matching(G, {(1, 3), (2, 4)}) # using set to represent matching + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return True + + +@nx._dispatch +def is_maximal_matching(G, matching): + """Return True if ``matching`` is a maximal matching of ``G`` + + A *maximal matching* in a graph is a matching in which adding any + edge would cause the set to no longer be a valid matching. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid maximal + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> nx.is_maximal_matching(G, {(1, 2), (3, 4)}) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + # If the given set is not a matching, then it is not a maximal matching. + edges = set() + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + edges.add(edge) + edges.add((v, u)) + # A matching is maximal if adding any new edge from G to it + # causes the resulting set to match some node twice. + # Be careful to check for adding selfloops + for u, v in G.edges: + if (u, v) not in edges: + # could add edge (u, v) to edges and have a bigger matching + if u not in nodes and v not in nodes and u != v: + return False + return True + + +@nx._dispatch +def is_perfect_matching(G, matching): + """Return True if ``matching`` is a perfect matching for ``G`` + + A *perfect matching* in a graph is a matching in which exactly one edge + is incident upon each vertex. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid perfect + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5), (4, 6)]) + >>> my_match = {1: 2, 3: 5, 4: 6} + >>> nx.is_perfect_matching(G, my_match) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return len(nodes) == len(G) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def min_weight_matching(G, weight="weight"): + """Computing a minimum-weight maximal matching of G. + + Use the maximum-weight algorithm with edge weights subtracted + from the maximum weight of all edges. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + This method replaces the edge weights with 1 plus the maximum edge weight + minus the original edge weight. + + new_weight = (max_weight + 1) - edge_weight + + then runs :func:`max_weight_matching` with the new weights. + The max weight matching with these new weights corresponds + to the min weight matching using the original weights. + Adding 1 to the max edge weight keeps all edge weights positive + and as integers if they started as integers. + + You might worry that adding 1 to each weight would make the algorithm + favor matchings with more edges. But we use the parameter + `maxcardinality=True` in `max_weight_matching` to ensure that the + number of edges in the competing matchings are the same and thus + the optimum does not change due to changes in the number of edges. + + Read the documentation of `max_weight_matching` for more information. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + Returns + ------- + matching : set + A minimal weight matching of the graph. + + See Also + -------- + max_weight_matching + """ + if len(G.edges) == 0: + return max_weight_matching(G, maxcardinality=True, weight=weight) + G_edges = G.edges(data=weight, default=1) + max_weight = 1 + max(w for _, _, w in G_edges) + InvG = nx.Graph() + edges = ((u, v, max_weight - w) for u, v, w in G_edges) + InvG.add_weighted_edges_from(edges, weight=weight) + return max_weight_matching(InvG, maxcardinality=True, weight=weight) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def max_weight_matching(G, maxcardinality=False, weight="weight"): + """Compute a maximum-weighted matching of G. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + maxcardinality: bool, optional (default=False) + If maxcardinality is True, compute the maximum-cardinality matching + with maximum weight among all maximum-cardinality matchings. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)] + >>> G.add_weighted_edges_from(edges) + >>> sorted(nx.max_weight_matching(G)) + [(2, 4), (5, 3)] + + Notes + ----- + If G has edges with weight attributes the edge data are used as + weight values else the weights are assumed to be 1. + + This function takes time O(number_of_nodes ** 3). + + If all edge weights are integers, the algorithm uses only integer + computations. If floating point weights are used, the algorithm + could return a slightly suboptimal matching due to numeric + precision errors. + + This method is based on the "blossom" method for finding augmenting + paths and the "primal-dual" method for finding a matching of maximum + weight, both methods invented by Jack Edmonds [1]_. + + Bipartite graphs can also be matched using the functions present in + :mod:`networkx.algorithms.bipartite.matching`. + + References + ---------- + .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs", + Zvi Galil, ACM Computing Surveys, 1986. + """ + # + # The algorithm is taken from "Efficient Algorithms for Finding Maximum + # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986. + # It is based on the "blossom" method for finding augmenting paths and + # the "primal-dual" method for finding a matching of maximum weight, both + # methods invented by Jack Edmonds. + # + # A C program for maximum weight matching by Ed Rothberg was used + # extensively to validate this new code. + # + # Many terms used in the code comments are explained in the paper + # by Galil. You will probably need the paper to make sense of this code. + # + + class NoNode: + """Dummy value which is different from any node.""" + + class Blossom: + """Representation of a non-trivial blossom or sub-blossom.""" + + __slots__ = ["childs", "edges", "mybestedges"] + + # b.childs is an ordered list of b's sub-blossoms, starting with + # the base and going round the blossom. + + # b.edges is the list of b's connecting edges, such that + # b.edges[i] = (v, w) where v is a vertex in b.childs[i] + # and w is a vertex in b.childs[wrap(i+1)]. + + # If b is a top-level S-blossom, + # b.mybestedges is a list of least-slack edges to neighbouring + # S-blossoms, or None if no such list has been computed yet. + # This is used for efficient computation of delta3. + + # Generate the blossom's leaf vertices. + def leaves(self): + stack = [*self.childs] + while stack: + t = stack.pop() + if isinstance(t, Blossom): + stack.extend(t.childs) + else: + yield t + + # Get a list of vertices. + gnodes = list(G) + if not gnodes: + return set() # don't bother with empty graphs + + # Find the maximum edge weight. + maxweight = 0 + allinteger = True + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i != j and wt > maxweight: + maxweight = wt + allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long")) + + # If v is a matched vertex, mate[v] is its partner vertex. + # If v is a single vertex, v does not occur as a key in mate. + # Initially all vertices are single; updated during augmentation. + mate = {} + + # If b is a top-level blossom, + # label.get(b) is None if b is unlabeled (free), + # 1 if b is an S-blossom, + # 2 if b is a T-blossom. + # The label of a vertex is found by looking at the label of its top-level + # containing blossom. + # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable + # from an S-vertex outside the blossom. + # Labels are assigned during a stage and reset after each augmentation. + label = {} + + # If b is a labeled top-level blossom, + # labeledge[b] = (v, w) is the edge through which b obtained its label + # such that w is a vertex in b, or None if b's base vertex is single. + # If w is a vertex inside a T-blossom and label[w] == 2, + # labeledge[w] = (v, w) is an edge through which w is reachable from + # outside the blossom. + labeledge = {} + + # If v is a vertex, inblossom[v] is the top-level blossom to which v + # belongs. + # If v is a top-level vertex, inblossom[v] == v since v is itself + # a (trivial) top-level blossom. + # Initially all vertices are top-level trivial blossoms. + inblossom = dict(zip(gnodes, gnodes)) + + # If b is a sub-blossom, + # blossomparent[b] is its immediate parent (sub-)blossom. + # If b is a top-level blossom, blossomparent[b] is None. + blossomparent = dict(zip(gnodes, repeat(None))) + + # If b is a (sub-)blossom, + # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom). + blossombase = dict(zip(gnodes, gnodes)) + + # If w is a free vertex (or an unreached vertex inside a T-blossom), + # bestedge[w] = (v, w) is the least-slack edge from an S-vertex, + # or None if there is no such edge. + # If b is a (possibly trivial) top-level S-blossom, + # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom + # (v inside b), or None if there is no such edge. + # This is used for efficient computation of delta2 and delta3. + bestedge = {} + + # If v is a vertex, + # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual + # optimization problem (if all edge weights are integers, multiplication + # by two ensures that all values remain integers throughout the algorithm). + # Initially, u(v) = maxweight / 2. + dualvar = dict(zip(gnodes, repeat(maxweight))) + + # If b is a non-trivial blossom, + # blossomdual[b] = z(b) where z(b) is b's variable in the dual + # optimization problem. + blossomdual = {} + + # If (v, w) in allowedge or (w, v) in allowedg, then the edge + # (v, w) is known to have zero slack in the optimization problem; + # otherwise the edge may or may not have zero slack. + allowedge = {} + + # Queue of newly discovered S-vertices. + queue = [] + + # Return 2 * slack of edge (v, w) (does not work inside blossoms). + def slack(v, w): + return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1) + + # Assign label t to the top-level blossom containing vertex w, + # coming through an edge from vertex v. + def assignLabel(w, t, v): + b = inblossom[w] + assert label.get(w) is None and label.get(b) is None + label[w] = label[b] = t + if v is not None: + labeledge[w] = labeledge[b] = (v, w) + else: + labeledge[w] = labeledge[b] = None + bestedge[w] = bestedge[b] = None + if t == 1: + # b became an S-vertex/blossom; add it(s vertices) to the queue. + if isinstance(b, Blossom): + queue.extend(b.leaves()) + else: + queue.append(b) + elif t == 2: + # b became a T-vertex/blossom; assign label S to its mate. + # (If b is a non-trivial blossom, its base is the only vertex + # with an external mate.) + base = blossombase[b] + assignLabel(mate[base], 1, base) + + # Trace back from vertices v and w to discover either a new blossom + # or an augmenting path. Return the base vertex of the new blossom, + # or NoNode if an augmenting path was found. + def scanBlossom(v, w): + # Trace back from v and w, placing breadcrumbs as we go. + path = [] + base = NoNode + while v is not NoNode: + # Look for a breadcrumb in v's blossom or put a new breadcrumb. + b = inblossom[v] + if label[b] & 4: + base = blossombase[b] + break + assert label[b] == 1 + path.append(b) + label[b] = 5 + # Trace one step back. + if labeledge[b] is None: + # The base of blossom b is single; stop tracing this path. + assert blossombase[b] not in mate + v = NoNode + else: + assert labeledge[b][0] == mate[blossombase[b]] + v = labeledge[b][0] + b = inblossom[v] + assert label[b] == 2 + # b is a T-blossom; trace one more step back. + v = labeledge[b][0] + # Swap v and w so that we alternate between both paths. + if w is not NoNode: + v, w = w, v + # Remove breadcrumbs. + for b in path: + label[b] = 1 + # Return base vertex, if we found one. + return base + + # Construct a new blossom with given base, through S-vertices v and w. + # Label the new blossom as S; set its dual variable to zero; + # relabel its T-vertices to S and add them to the queue. + def addBlossom(base, v, w): + bb = inblossom[base] + bv = inblossom[v] + bw = inblossom[w] + # Create blossom. + b = Blossom() + blossombase[b] = base + blossomparent[b] = None + blossomparent[bb] = b + # Make list of sub-blossoms and their interconnecting edge endpoints. + b.childs = path = [] + b.edges = edgs = [(v, w)] + # Trace back from v to base. + while bv != bb: + # Add bv to the new blossom. + blossomparent[bv] = b + path.append(bv) + edgs.append(labeledge[bv]) + assert label[bv] == 2 or ( + label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]] + ) + # Trace one step back. + v = labeledge[bv][0] + bv = inblossom[v] + # Add base sub-blossom; reverse lists. + path.append(bb) + path.reverse() + edgs.reverse() + # Trace back from w to base. + while bw != bb: + # Add bw to the new blossom. + blossomparent[bw] = b + path.append(bw) + edgs.append((labeledge[bw][1], labeledge[bw][0])) + assert label[bw] == 2 or ( + label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]] + ) + # Trace one step back. + w = labeledge[bw][0] + bw = inblossom[w] + # Set label to S. + assert label[bb] == 1 + label[b] = 1 + labeledge[b] = labeledge[bb] + # Set dual variable to zero. + blossomdual[b] = 0 + # Relabel vertices. + for v in b.leaves(): + if label[inblossom[v]] == 2: + # This T-vertex now turns into an S-vertex because it becomes + # part of an S-blossom; add it to the queue. + queue.append(v) + inblossom[v] = b + # Compute b.mybestedges. + bestedgeto = {} + for bv in path: + if isinstance(bv, Blossom): + if bv.mybestedges is not None: + # Walk this subblossom's least-slack edges. + nblist = bv.mybestedges + # The sub-blossom won't need this data again. + bv.mybestedges = None + else: + # This subblossom does not have a list of least-slack + # edges; get the information from the vertices. + nblist = [ + (v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w + ] + else: + nblist = [(bv, w) for w in G.neighbors(bv) if bv != w] + for k in nblist: + (i, j) = k + if inblossom[j] == b: + i, j = j, i + bj = inblossom[j] + if ( + bj != b + and label.get(bj) == 1 + and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj])) + ): + bestedgeto[bj] = k + # Forget about least-slack edge of the subblossom. + bestedge[bv] = None + b.mybestedges = list(bestedgeto.values()) + # Select bestedge[b]. + mybestedge = None + bestedge[b] = None + for k in b.mybestedges: + kslack = slack(*k) + if mybestedge is None or kslack < mybestslack: + mybestedge = k + mybestslack = kslack + bestedge[b] = mybestedge + + # Expand the given top-level blossom. + def expandBlossom(b, endstage): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, endstage): + # Convert sub-blossoms into top-level blossoms. + for s in b.childs: + blossomparent[s] = None + if isinstance(s, Blossom): + if endstage and blossomdual[s] == 0: + # Recursively expand this sub-blossom. + yield s + else: + for v in s.leaves(): + inblossom[v] = s + else: + inblossom[s] = s + # If we expand a T-blossom during a stage, its sub-blossoms must be + # relabeled. + if (not endstage) and label.get(b) == 2: + # Start at the sub-blossom through which the expanding + # blossom obtained its label, and relabel sub-blossoms untili + # we reach the base. + # Figure out through which sub-blossom the expanding blossom + # obtained its label initially. + entrychild = inblossom[labeledge[b][1]] + # Decide in which direction we will go round the blossom. + j = b.childs.index(entrychild) + if j & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + v, w = labeledge[b] + while j != 0: + # Relabel the T-sub-blossom. + if jstep == 1: + p, q = b.edges[j] + else: + q, p = b.edges[j - 1] + label[w] = None + label[q] = None + assignLabel(w, 2, v) + # Step to the next S-sub-blossom and note its forward edge. + allowedge[(p, q)] = allowedge[(q, p)] = True + j += jstep + if jstep == 1: + v, w = b.edges[j] + else: + w, v = b.edges[j - 1] + # Step to the next T-sub-blossom. + allowedge[(v, w)] = allowedge[(w, v)] = True + j += jstep + # Relabel the base T-sub-blossom WITHOUT stepping through to + # its mate (so don't call assignLabel). + bw = b.childs[j] + label[w] = label[bw] = 2 + labeledge[w] = labeledge[bw] = (v, w) + bestedge[bw] = None + # Continue along the blossom until we get back to entrychild. + j += jstep + while b.childs[j] != entrychild: + # Examine the vertices of the sub-blossom to see whether + # it is reachable from a neighbouring S-vertex outside the + # expanding blossom. + bv = b.childs[j] + if label.get(bv) == 1: + # This sub-blossom just got label S through one of its + # neighbours; leave it be. + j += jstep + continue + if isinstance(bv, Blossom): + for v in bv.leaves(): + if label.get(v): + break + else: + v = bv + # If the sub-blossom contains a reachable vertex, assign + # label T to the sub-blossom. + if label.get(v): + assert label[v] == 2 + assert inblossom[v] == bv + label[v] = None + label[mate[blossombase[bv]]] = None + assignLabel(v, 2, labeledge[v][0]) + j += jstep + # Remove the expanded blossom entirely. + label.pop(b, None) + labeledge.pop(b, None) + bestedge.pop(b, None) + del blossomparent[b] + del blossombase[b] + del blossomdual[b] + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, endstage)] + while stack: + top = stack[-1] + for s in top: + stack.append(_recurse(s, endstage)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path through blossom b + # between vertex v and the base vertex. Keep blossom bookkeeping + # consistent. + def augmentBlossom(b, v): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, v): + # Bubble up through the blossom tree from vertex v to an immediate + # sub-blossom of b. + t = v + while blossomparent[t] != b: + t = blossomparent[t] + # Recursively deal with the first sub-blossom. + if isinstance(t, Blossom): + yield (t, v) + # Decide in which direction we will go round the blossom. + i = j = b.childs.index(t) + if i & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + while j != 0: + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if jstep == 1: + w, x = b.edges[j] + else: + x, w = b.edges[j - 1] + if isinstance(t, Blossom): + yield (t, w) + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if isinstance(t, Blossom): + yield (t, x) + # Match the edge connecting those sub-blossoms. + mate[w] = x + mate[x] = w + # Rotate the list of sub-blossoms to put the new base at the front. + b.childs = b.childs[i:] + b.childs[:i] + b.edges = b.edges[i:] + b.edges[:i] + blossombase[b] = blossombase[b.childs[0]] + assert blossombase[b] == v + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, v)] + while stack: + top = stack[-1] + for args in top: + stack.append(_recurse(*args)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path between two + # single vertices. The augmenting path runs through S-vertices v and w. + def augmentMatching(v, w): + for s, j in ((v, w), (w, v)): + # Match vertex s to vertex j. Then trace back from s + # until we find a single vertex, swapping matched and unmatched + # edges as we go. + while 1: + bs = inblossom[s] + assert label[bs] == 1 + assert (labeledge[bs] is None and blossombase[bs] not in mate) or ( + labeledge[bs][0] == mate[blossombase[bs]] + ) + # Augment through the S-blossom from s to base. + if isinstance(bs, Blossom): + augmentBlossom(bs, s) + # Update mate[s] + mate[s] = j + # Trace one step back. + if labeledge[bs] is None: + # Reached single vertex; stop. + break + t = labeledge[bs][0] + bt = inblossom[t] + assert label[bt] == 2 + # Trace one more step back. + s, j = labeledge[bt] + # Augment through the T-blossom from j to base. + assert blossombase[bt] == t + if isinstance(bt, Blossom): + augmentBlossom(bt, j) + # Update mate[j] + mate[j] = s + + # Verify that the optimum solution has been reached. + def verifyOptimum(): + if maxcardinality: + # Vertices may have negative dual; + # find a constant non-negative number to add to all vertex duals. + vdualoffset = max(0, -min(dualvar.values())) + else: + vdualoffset = 0 + # 0. all dual variables are non-negative + assert min(dualvar.values()) + vdualoffset >= 0 + assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0 + # 0. all edges have non-negative slack and + # 1. all matched edges have zero slack; + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i == j: + continue # ignore self-loops + s = dualvar[i] + dualvar[j] - 2 * wt + iblossoms = [i] + jblossoms = [j] + while blossomparent[iblossoms[-1]] is not None: + iblossoms.append(blossomparent[iblossoms[-1]]) + while blossomparent[jblossoms[-1]] is not None: + jblossoms.append(blossomparent[jblossoms[-1]]) + iblossoms.reverse() + jblossoms.reverse() + for bi, bj in zip(iblossoms, jblossoms): + if bi != bj: + break + s += 2 * blossomdual[bi] + assert s >= 0 + if mate.get(i) == j or mate.get(j) == i: + assert mate[i] == j and mate[j] == i + assert s == 0 + # 2. all single vertices have zero dual value; + for v in gnodes: + assert (v in mate) or dualvar[v] + vdualoffset == 0 + # 3. all blossoms with positive dual value are full. + for b in blossomdual: + if blossomdual[b] > 0: + assert len(b.edges) % 2 == 1 + for i, j in b.edges[1::2]: + assert mate[i] == j and mate[j] == i + # Ok. + + # Main loop: continue until no further improvement is possible. + while 1: + # Each iteration of this loop is a "stage". + # A stage finds an augmenting path and uses that to improve + # the matching. + + # Remove labels from top-level blossoms/vertices. + label.clear() + labeledge.clear() + + # Forget all about least-slack edges. + bestedge.clear() + for b in blossomdual: + b.mybestedges = None + + # Loss of labeling means that we can not be sure that currently + # allowable edges remain allowable throughout this stage. + allowedge.clear() + + # Make queue empty. + queue[:] = [] + + # Label single blossoms/vertices with S and put them in the queue. + for v in gnodes: + if (v not in mate) and label.get(inblossom[v]) is None: + assignLabel(v, 1, None) + + # Loop until we succeed in augmenting the matching. + augmented = 0 + while 1: + # Each iteration of this loop is a "substage". + # A substage tries to find an augmenting path; + # if found, the path is used to improve the matching and + # the stage ends. If there is no augmenting path, the + # primal-dual method is used to pump some slack out of + # the dual variables. + + # Continue labeling until all vertices which are reachable + # through an alternating path have got a label. + while queue and not augmented: + # Take an S vertex from the queue. + v = queue.pop() + assert label[inblossom[v]] == 1 + + # Scan its neighbours: + for w in G.neighbors(v): + if w == v: + continue # ignore self-loops + # w is a neighbour to v + bv = inblossom[v] + bw = inblossom[w] + if bv == bw: + # this edge is internal to a blossom; ignore it + continue + if (v, w) not in allowedge: + kslack = slack(v, w) + if kslack <= 0: + # edge k has zero slack => it is allowable + allowedge[(v, w)] = allowedge[(w, v)] = True + if (v, w) in allowedge: + if label.get(bw) is None: + # (C1) w is a free vertex; + # label w with T and label its mate with S (R12). + assignLabel(w, 2, v) + elif label.get(bw) == 1: + # (C2) w is an S-vertex (not in the same blossom); + # follow back-links to discover either an + # augmenting path or a new blossom. + base = scanBlossom(v, w) + if base is not NoNode: + # Found a new blossom; add it to the blossom + # bookkeeping and turn it into an S-blossom. + addBlossom(base, v, w) + else: + # Found an augmenting path; augment the + # matching and end this stage. + augmentMatching(v, w) + augmented = 1 + break + elif label.get(w) is None: + # w is inside a T-blossom, but w itself has not + # yet been reached from outside the blossom; + # mark it as reached (we need this to relabel + # during T-blossom expansion). + assert label[bw] == 2 + label[w] = 2 + labeledge[w] = (v, w) + elif label.get(bw) == 1: + # keep track of the least-slack non-allowable edge to + # a different S-blossom. + if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]): + bestedge[bv] = (v, w) + elif label.get(w) is None: + # w is a free vertex (or an unreached vertex inside + # a T-blossom) but we can not reach it yet; + # keep track of the least-slack edge that reaches w. + if bestedge.get(w) is None or kslack < slack(*bestedge[w]): + bestedge[w] = (v, w) + + if augmented: + break + + # There is no augmenting path under these constraints; + # compute delta and reduce slack in the optimization problem. + # (Note that our vertex dual variables, edge slacks and delta's + # are pre-multiplied by two.) + deltatype = -1 + delta = deltaedge = deltablossom = None + + # Compute delta1: the minimum value of any vertex dual. + if not maxcardinality: + deltatype = 1 + delta = min(dualvar.values()) + + # Compute delta2: the minimum slack on any edge between + # an S-vertex and a free vertex. + for v in G.nodes(): + if label.get(inblossom[v]) is None and bestedge.get(v) is not None: + d = slack(*bestedge[v]) + if deltatype == -1 or d < delta: + delta = d + deltatype = 2 + deltaedge = bestedge[v] + + # Compute delta3: half the minimum slack on any edge between + # a pair of S-blossoms. + for b in blossomparent: + if ( + blossomparent[b] is None + and label.get(b) == 1 + and bestedge.get(b) is not None + ): + kslack = slack(*bestedge[b]) + if allinteger: + assert (kslack % 2) == 0 + d = kslack // 2 + else: + d = kslack / 2.0 + if deltatype == -1 or d < delta: + delta = d + deltatype = 3 + deltaedge = bestedge[b] + + # Compute delta4: minimum z variable of any T-blossom. + for b in blossomdual: + if ( + blossomparent[b] is None + and label.get(b) == 2 + and (deltatype == -1 or blossomdual[b] < delta) + ): + delta = blossomdual[b] + deltatype = 4 + deltablossom = b + + if deltatype == -1: + # No further improvement possible; max-cardinality optimum + # reached. Do a final delta update to make the optimum + # verifiable. + assert maxcardinality + deltatype = 1 + delta = max(0, min(dualvar.values())) + + # Update dual variables according to delta. + for v in gnodes: + if label.get(inblossom[v]) == 1: + # S-vertex: 2*u = 2*u - 2*delta + dualvar[v] -= delta + elif label.get(inblossom[v]) == 2: + # T-vertex: 2*u = 2*u + 2*delta + dualvar[v] += delta + for b in blossomdual: + if blossomparent[b] is None: + if label.get(b) == 1: + # top-level S-blossom: z = z + 2*delta + blossomdual[b] += delta + elif label.get(b) == 2: + # top-level T-blossom: z = z - 2*delta + blossomdual[b] -= delta + + # Take action at the point where minimum delta occurred. + if deltatype == 1: + # No further improvement possible; optimum reached. + break + elif deltatype == 2: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + assert label[inblossom[v]] == 1 + allowedge[(v, w)] = allowedge[(w, v)] = True + queue.append(v) + elif deltatype == 3: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + allowedge[(v, w)] = allowedge[(w, v)] = True + assert label[inblossom[v]] == 1 + queue.append(v) + elif deltatype == 4: + # Expand the least-z blossom. + expandBlossom(deltablossom, False) + + # End of a this substage. + + # Paranoia check that the matching is symmetric. + for v in mate: + assert mate[mate[v]] == v + + # Stop when no more augmenting path can be found. + if not augmented: + break + + # End of a stage; expand all S-blossoms which have zero dual. + for b in list(blossomdual.keys()): + if b not in blossomdual: + continue # already expanded + if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0: + expandBlossom(b, True) + + # Verify that we reached the optimum solution (only for integer weights). + if allinteger: + verifyOptimum() + + return matching_dict_to_set(mate) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/mis.py b/phivenv/Lib/site-packages/networkx/algorithms/mis.py new file mode 100644 index 0000000000000000000000000000000000000000..00d101c52887b01f35e43043431ab749b01f6867 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/mis.py @@ -0,0 +1,77 @@ +""" +Algorithm to find a maximal (not maximum) independent set. + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["maximal_independent_set"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatch +def maximal_independent_set(G, nodes=None, seed=None): + """Returns a random maximal independent set guaranteed to contain + a given set of nodes. + + An independent set is a set of nodes such that the subgraph + of G induced by these nodes contains no edges. A maximal + independent set is an independent set such that it is not possible + to add a new node and still get an independent set. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or iterable + Nodes that must be part of the independent set. This set of nodes + must be independent. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + indep_nodes : list + List of nodes that are part of a maximal independent set. + + Raises + ------ + NetworkXUnfeasible + If the nodes in the provided list are not part of the graph or + do not form an independent set, an exception is raised. + + NetworkXNotImplemented + If `G` is directed. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.maximal_independent_set(G) # doctest: +SKIP + [4, 0, 2] + >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP + [1, 3] + + Notes + ----- + This algorithm does not solve the maximum independent set problem. + + """ + if not nodes: + nodes = {seed.choice(list(G))} + else: + nodes = set(nodes) + if not nodes.issubset(G): + raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G") + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) + if set.intersection(neighbors, nodes): + raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G") + indep_nodes = list(nodes) + available_nodes = set(G.nodes()).difference(neighbors.union(nodes)) + while available_nodes: + node = seed.choice(list(available_nodes)) + indep_nodes.append(node) + available_nodes.difference_update(list(G.adj[node]) + [node]) + return indep_nodes diff --git a/phivenv/Lib/site-packages/networkx/algorithms/moral.py b/phivenv/Lib/site-packages/networkx/algorithms/moral.py new file mode 100644 index 0000000000000000000000000000000000000000..af187259251111f8ea4332412af36e06380a8df8 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/moral.py @@ -0,0 +1,59 @@ +r"""Function for computing the moral graph of a directed graph.""" + +import itertools + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["moral_graph"] + + +@not_implemented_for("undirected") +@nx._dispatch +def moral_graph(G): + r"""Return the Moral Graph + + Returns the moralized graph of a given directed graph. + + Parameters + ---------- + G : NetworkX graph + Directed graph + + Returns + ------- + H : NetworkX graph + The undirected moralized graph of G + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (2, 5), (3, 4), (4, 3)]) + >>> G_moral = nx.moral_graph(G) + >>> G_moral.edges() + EdgeView([(1, 2), (2, 3), (2, 5), (2, 4), (3, 4)]) + + Notes + ----- + A moral graph is an undirected graph H = (V, E) generated from a + directed Graph, where if a node has more than one parent node, edges + between these parent nodes are inserted and all directed edges become + undirected. + + https://en.wikipedia.org/wiki/Moral_graph + + References + ---------- + .. [1] Wray L. Buntine. 1995. Chain graphs for learning. + In Proceedings of the Eleventh conference on Uncertainty + in artificial intelligence (UAI'95) + """ + H = G.to_undirected() + for preds in G.pred.values(): + predecessors_combinations = itertools.combinations(preds, r=2) + H.add_edges_from(predecessors_combinations) + return H diff --git a/phivenv/Lib/site-packages/networkx/algorithms/node_classification.py b/phivenv/Lib/site-packages/networkx/algorithms/node_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b46545abbfcc2b0b99b91c56f00c415b70f53b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/node_classification.py @@ -0,0 +1,218 @@ +""" This module provides the functions for node classification problem. + +The functions in this module are not imported +into the top level `networkx` namespace. +You can access these functions by importing +the `networkx.algorithms.node_classification` modules, +then accessing the functions as attributes of `node_classification`. +For example: + + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> node_classification.harmonic_function(G) + ['A', 'A', 'B', 'B'] + +References +---------- +Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). +Semi-supervised learning using gaussian fields and harmonic functions. +In ICML (Vol. 3, pp. 912-919). +""" +import networkx as nx + +__all__ = ["harmonic_function", "local_and_global_consistency"] + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(node_attrs="label_name") +def harmonic_function(G, max_iter=30, label_name="label"): + """Node classification by Harmonic function + + Function for computing Harmonic function algorithm by Zhu et al. + + Parameters + ---------- + G : NetworkX Graph + max_iter : int + maximum number of iterations allowed + label_name : string + name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.harmonic_function(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). + Semi-supervised learning using gaussian fields and harmonic functions. + In ICML (Vol. 3, pp. 912-919). + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)) + P = (D @ X).tolil() + P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(node_attrs="label_name") +def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"): + """Node classification by Local and Global Consistency + + Function for computing Local and global consistency algorithm by Zhou et al. + + Parameters + ---------- + G : NetworkX Graph + alpha : float + Clamping factor + max_iter : int + Maximum number of iterations allowed + label_name : string + Name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.local_and_global_consistency(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004). + Learning with local and global consistency. + Advances in neural information processing systems, 16(16), 321-328. + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))) + P = alpha * ((D2 @ X) @ D2) + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 - alpha + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +def _get_label_info(G, label_name): + """Get and return information of labels from the input graph + + Parameters + ---------- + G : Network X graph + label_name : string + Name of the target label + + Returns + ------- + labels : numpy array, shape = [n_labeled_samples, 2] + Array of pairs of labeled node ID and label ID + label_dict : numpy array, shape = [n_classes] + Array of labels + i-th element contains the label corresponding label ID `i` + """ + import numpy as np + + labels = [] + label_to_id = {} + lid = 0 + for i, n in enumerate(G.nodes(data=True)): + if label_name in n[1]: + label = n[1][label_name] + if label not in label_to_id: + label_to_id[label] = lid + lid += 1 + labels.append([i, label_to_id[label]]) + labels = np.array(labels) + label_dict = np.array( + [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])] + ) + return (labels, label_dict) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/non_randomness.py b/phivenv/Lib/site-packages/networkx/algorithms/non_randomness.py new file mode 100644 index 0000000000000000000000000000000000000000..777cecbc96948b2336cb06393345de2c9a4a0dad --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/non_randomness.py @@ -0,0 +1,96 @@ +r""" Computation of graph non-randomness +""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["non_randomness"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def non_randomness(G, k=None, weight="weight"): + """Compute the non-randomness of graph G. + + The first returned value nr is the sum of non-randomness values of all + edges within the graph (where the non-randomness of an edge tends to be + small when the two nodes linked by that edge are from two different + communities). + + The second computed value nr_rd is a relative measure that indicates + to what extent graph G is different from random graphs in terms + of probability. When it is close to 0, the graph tends to be more + likely generated by an Erdos Renyi model. + + Parameters + ---------- + G : NetworkX graph + Graph must be symmetric, connected, and without self-loops. + + k : int + The number of communities in G. + If k is not set, the function will use a default community + detection algorithm to set it. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1, i.e., the graph is + binary. + + Returns + ------- + non-randomness : (float, float) tuple + Non-randomness, Relative non-randomness w.r.t. + Erdos Renyi random graphs. + + Raises + ------ + NetworkXException + if the input graph is not connected. + NetworkXError + if the input graph contains self-loops. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nr, nr_rd = nx.non_randomness(G, 2) + >>> nr, nr_rd = nx.non_randomness(G, 2, 'weight') + + Notes + ----- + This computes Eq. (4.4) and (4.5) in Ref. [1]_. + + If a weight field is passed, this algorithm will use the eigenvalues + of the weighted adjacency matrix to compute Eq. (4.4) and (4.5). + + References + ---------- + .. [1] Xiaowei Ying and Xintao Wu, + On Randomness Measures for Social Networks, + SIAM International Conference on Data Mining. 2009 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if len(list(nx.selfloop_edges(G))) > 0: + raise nx.NetworkXError("Graph must not contain self-loops") + + if k is None: + k = len(tuple(nx.community.label_propagation_communities(G))) + + # eq. 4.4 + eigenvalues = np.linalg.eigvals(nx.to_numpy_array(G, weight=weight)) + nr = np.real(np.sum(eigenvalues[:k])) + + n = G.number_of_nodes() + m = G.number_of_edges() + p = (2 * k * m) / (n * (n - k)) + + # eq. 4.5 + nr_rd = (nr - ((n - 2 * k) * p + k)) / math.sqrt(2 * k * p * (1 - p)) + + return nr, nr_rd diff --git a/phivenv/Lib/site-packages/networkx/algorithms/planar_drawing.py b/phivenv/Lib/site-packages/networkx/algorithms/planar_drawing.py new file mode 100644 index 0000000000000000000000000000000000000000..47f94f172154bdb63753832b29b48d6be78f406a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/planar_drawing.py @@ -0,0 +1,464 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["combinatorial_embedding_to_pos"] + + +def combinatorial_embedding_to_pos(embedding, fully_triangulate=False): + """Assigns every node a (x, y) position based on the given embedding + + The algorithm iteratively inserts nodes of the input graph in a certain + order and rearranges previously inserted nodes so that the planar drawing + stays valid. This is done efficiently by only maintaining relative + positions during the node placements and calculating the absolute positions + at the end. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + This defines the order of the edges + + fully_triangulate : bool + If set to True the algorithm adds edges to a copy of the input + embedding and makes it chordal. + + Returns + ------- + pos : dict + Maps each node to a tuple that defines the (x, y) position + + References + ---------- + .. [1] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + if len(embedding.nodes()) < 4: + # Position the node in any triangle + default_positions = [(0, 0), (2, 0), (1, 1)] + pos = {} + for i, v in enumerate(embedding.nodes()): + pos[v] = default_positions[i] + return pos + + embedding, outer_face = triangulate_embedding(embedding, fully_triangulate) + + # The following dicts map a node to another node + # If a node is not in the key set it means that the node is not yet in G_k + # If a node maps to None then the corresponding subtree does not exist + left_t_child = {} + right_t_child = {} + + # The following dicts map a node to an integer + delta_x = {} + y_coordinate = {} + + node_list = get_canonical_ordering(embedding, outer_face) + + # 1. Phase: Compute relative positions + + # Initialization + v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0] + + delta_x[v1] = 0 + y_coordinate[v1] = 0 + right_t_child[v1] = v3 + left_t_child[v1] = None + + delta_x[v2] = 1 + y_coordinate[v2] = 0 + right_t_child[v2] = None + left_t_child[v2] = None + + delta_x[v3] = 1 + y_coordinate[v3] = 1 + right_t_child[v3] = v2 + left_t_child[v3] = None + + for k in range(3, len(node_list)): + vk, contour_neighbors = node_list[k] + wp = contour_neighbors[0] + wp1 = contour_neighbors[1] + wq = contour_neighbors[-1] + wq1 = contour_neighbors[-2] + adds_mult_tri = len(contour_neighbors) > 2 + + # Stretch gaps: + delta_x[wp1] += 1 + delta_x[wq] += 1 + + delta_x_wp_wq = sum(delta_x[x] for x in contour_neighbors[1:]) + + # Adjust offsets + delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + delta_x[wq] = delta_x_wp_wq - delta_x[vk] + if adds_mult_tri: + delta_x[wp1] -= delta_x[vk] + + # Install v_k: + right_t_child[wp] = vk + right_t_child[vk] = wq + if adds_mult_tri: + left_t_child[vk] = wp1 + right_t_child[wq1] = None + else: + left_t_child[vk] = None + + # 2. Phase: Set absolute positions + pos = {} + pos[v1] = (0, y_coordinate[v1]) + remaining_nodes = [v1] + while remaining_nodes: + parent_node = remaining_nodes.pop() + + # Calculate position for left child + set_position( + parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + # Calculate position for right child + set_position( + parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + return pos + + +def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos): + """Helper method to calculate the absolute position of nodes.""" + child = tree[parent] + parent_node_x = pos[parent][0] + if child is not None: + # Calculate pos of child + child_x = parent_node_x + delta_x[child] + pos[child] = (child_x, y_coordinate[child]) + # Remember to calculate pos of its children + remaining_nodes.append(child) + + +def get_canonical_ordering(embedding, outer_face): + """Returns a canonical ordering of the nodes + + The canonical ordering of nodes (v1, ..., vn) must fulfill the following + conditions: + (See Lemma 1 in [2]_) + + - For the subgraph G_k of the input graph induced by v1, ..., vk it holds: + - 2-connected + - internally triangulated + - the edge (v1, v2) is part of the outer face + - For a node v(k+1) the following holds: + - The node v(k+1) is part of the outer face of G_k + - It has at least two neighbors in G_k + - All neighbors of v(k+1) in G_k lie consecutively on the outer face of + G_k (excluding the edge (v1, v2)). + + The algorithm used here starts with G_n (containing all nodes). It first + selects the nodes v1 and v2. And then tries to find the order of the other + nodes by checking which node can be removed in order to fulfill the + conditions mentioned above. This is done by calculating the number of + chords of nodes on the outer face. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The embedding must be triangulated + outer_face : list + The nodes on the outer face of the graph + + Returns + ------- + ordering : list + A list of tuples `(vk, wp_wq)`. Here `vk` is the node at this position + in the canonical ordering. The element `wp_wq` is a list of nodes that + make up the outer face of G_k. + + References + ---------- + .. [1] Steven Chaplick. + Canonical Orders of Planar Graphs and (some of) Their Applications 2015 + https://wuecampus2.uni-wuerzburg.de/moodle/pluginfile.php/545727/mod_resource/content/0/vg-ss15-vl03-canonical-orders-druckversion.pdf + .. [2] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + v1 = outer_face[0] + v2 = outer_face[1] + chords = defaultdict(int) # Maps nodes to the number of their chords + marked_nodes = set() + ready_to_pick = set(outer_face) + + # Initialize outer_face_ccw_nbr (do not include v1 -> v2) + outer_face_ccw_nbr = {} + prev_nbr = v2 + for idx in range(2, len(outer_face)): + outer_face_ccw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + outer_face_ccw_nbr[prev_nbr] = v1 + + # Initialize outer_face_cw_nbr (do not include v2 -> v1) + outer_face_cw_nbr = {} + prev_nbr = v1 + for idx in range(len(outer_face) - 1, 0, -1): + outer_face_cw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + + def is_outer_face_nbr(x, y): + if x not in outer_face_ccw_nbr: + return outer_face_cw_nbr[x] == y + if x not in outer_face_cw_nbr: + return outer_face_ccw_nbr[x] == y + return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y + + def is_on_outer_face(x): + return x not in marked_nodes and (x in outer_face_ccw_nbr or x == v1) + + # Initialize number of chords + for v in outer_face: + for nbr in embedding.neighbors_cw_order(v): + if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr): + chords[v] += 1 + ready_to_pick.discard(v) + + # Initialize canonical_ordering + canonical_ordering = [None] * len(embedding.nodes()) + canonical_ordering[0] = (v1, []) + canonical_ordering[1] = (v2, []) + ready_to_pick.discard(v1) + ready_to_pick.discard(v2) + + for k in range(len(embedding.nodes()) - 1, 1, -1): + # 1. Pick v from ready_to_pick + v = ready_to_pick.pop() + marked_nodes.add(v) + + # v has exactly two neighbors on the outer face (wp and wq) + wp = None + wq = None + # Iterate over neighbors of v to find wp and wq + nbr_iterator = iter(embedding.neighbors_cw_order(v)) + while True: + nbr = next(nbr_iterator) + if nbr in marked_nodes: + # Only consider nodes that are not yet removed + continue + if is_on_outer_face(nbr): + # nbr is either wp or wq + if nbr == v1: + wp = v1 + elif nbr == v2: + wq = v2 + else: + if outer_face_cw_nbr[nbr] == v: + # nbr is wp + wp = nbr + else: + # nbr is wq + wq = nbr + if wp is not None and wq is not None: + # We don't need to iterate any further + break + + # Obtain new nodes on outer face (neighbors of v from wp to wq) + wp_wq = [wp] + nbr = wp + while nbr != wq: + # Get next neighbor (clockwise on the outer face) + next_nbr = embedding[v][nbr]["ccw"] + wp_wq.append(next_nbr) + # Update outer face + outer_face_cw_nbr[nbr] = next_nbr + outer_face_ccw_nbr[next_nbr] = nbr + # Move to next neighbor of v + nbr = next_nbr + + if len(wp_wq) == 2: + # There was a chord between wp and wq, decrease number of chords + chords[wp] -= 1 + if chords[wp] == 0: + ready_to_pick.add(wp) + chords[wq] -= 1 + if chords[wq] == 0: + ready_to_pick.add(wq) + else: + # Update all chords involving w_(p+1) to w_(q-1) + new_face_nodes = set(wp_wq[1:-1]) + for w in new_face_nodes: + # If we do not find a chord for w later we can pick it next + ready_to_pick.add(w) + for nbr in embedding.neighbors_cw_order(w): + if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr): + # There is a chord involving w + chords[w] += 1 + ready_to_pick.discard(w) + if nbr not in new_face_nodes: + # Also increase chord for the neighbor + # We only iterator over new_face_nodes + chords[nbr] += 1 + ready_to_pick.discard(nbr) + # Set the canonical ordering node and the list of contour neighbors + canonical_ordering[k] = (v, wp_wq) + + return canonical_ordering + + +def triangulate_face(embedding, v1, v2): + """Triangulates the face given by half edge (v, w) + + Parameters + ---------- + embedding : nx.PlanarEmbedding + v1 : node + The half-edge (v1, v2) belongs to the face that gets triangulated + v2 : node + """ + _, v3 = embedding.next_face_half_edge(v1, v2) + _, v4 = embedding.next_face_half_edge(v2, v3) + if v1 in (v2, v3): + # The component has less than 3 nodes + return + while v1 != v4: + # Add edge if not already present on other side + if embedding.has_edge(v1, v3): + # Cannot triangulate at this position + v1, v2, v3 = v2, v3, v4 + else: + # Add edge for triangulation + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + v1, v2, v3 = v1, v3, v4 + # Get next node + _, v4 = embedding.next_face_half_edge(v2, v3) + + +def triangulate_embedding(embedding, fully_triangulate=True): + """Triangulates the embedding. + + Traverses faces of the embedding and adds edges to a copy of the + embedding to triangulate it. + The method also ensures that the resulting graph is 2-connected by adding + edges if the same vertex is contained twice on a path around a face. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The input graph must contain at least 3 nodes. + + fully_triangulate : bool + If set to False the face with the most nodes is chooses as outer face. + This outer face does not get triangulated. + + Returns + ------- + (embedding, outer_face) : (nx.PlanarEmbedding, list) tuple + The element `embedding` is a new embedding containing all edges from + the input embedding and the additional edges to triangulate the graph. + The element `outer_face` is a list of nodes that lie on the outer face. + If the graph is fully triangulated these are three arbitrary connected + nodes. + + """ + if len(embedding.nodes) <= 1: + return embedding, list(embedding.nodes) + embedding = nx.PlanarEmbedding(embedding) + + # Get a list with a node for each connected component + component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)] + + # 1. Make graph a single component (add edge between components) + for i in range(len(component_nodes) - 1): + v1 = component_nodes[i] + v2 = component_nodes[i + 1] + embedding.connect_components(v1, v2) + + # 2. Calculate faces, ensure 2-connectedness and determine outer face + outer_face = [] # A face with the most number of nodes + face_list = [] + edges_visited = set() # Used to keep track of already visited faces + for v in embedding.nodes(): + for w in embedding.neighbors_cw_order(v): + new_face = make_bi_connected(embedding, v, w, edges_visited) + if new_face: + # Found a new face + face_list.append(new_face) + if len(new_face) > len(outer_face): + # The face is a candidate to be the outer face + outer_face = new_face + + # 3. Triangulate (internal) faces + for face in face_list: + if face is not outer_face or fully_triangulate: + # Triangulate this face + triangulate_face(embedding, face[0], face[1]) + + if fully_triangulate: + v1 = outer_face[0] + v2 = outer_face[1] + v3 = embedding[v2][v1]["ccw"] + outer_face = [v1, v2, v3] + + return embedding, outer_face + + +def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted): + """Triangulate a face and make it 2-connected + + This method also adds all edges on the face to `edges_counted`. + + Parameters + ---------- + embedding: nx.PlanarEmbedding + The embedding that defines the faces + starting_node : node + A node on the face + outgoing_node : node + A node such that the half edge (starting_node, outgoing_node) belongs + to the face + edges_counted: set + Set of all half-edges that belong to a face that have been visited + + Returns + ------- + face_nodes: list + A list of all nodes at the border of this face + """ + + # Check if the face has already been calculated + if (starting_node, outgoing_node) in edges_counted: + # This face was already counted + return [] + edges_counted.add((starting_node, outgoing_node)) + + # Add all edges to edges_counted which have this face to their left + v1 = starting_node + v2 = outgoing_node + face_list = [starting_node] # List of nodes around the face + face_set = set(face_list) # Set for faster queries + _, v3 = embedding.next_face_half_edge(v1, v2) + + # Move the nodes v1, v2, v3 around the face: + while v2 != starting_node or v3 != outgoing_node: + if v1 == v2: + raise nx.NetworkXException("Invalid half-edge") + # cycle is not completed yet + if v2 in face_set: + # v2 encountered twice: Add edge to ensure 2-connectedness + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + edges_counted.add((v2, v3)) + edges_counted.add((v3, v1)) + v2 = v1 + else: + face_set.add(v2) + face_list.append(v2) + + # set next edge + v1 = v2 + v2, v3 = embedding.next_face_half_edge(v2, v3) + + # remember that this edge has been counted + edges_counted.add((v1, v2)) + + return face_list diff --git a/phivenv/Lib/site-packages/networkx/algorithms/planarity.py b/phivenv/Lib/site-packages/networkx/algorithms/planarity.py new file mode 100644 index 0000000000000000000000000000000000000000..ad46f4739e5162082f7df05a1b696846d97fc85b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/planarity.py @@ -0,0 +1,1179 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["check_planarity", "is_planar", "PlanarEmbedding"] + + +@nx._dispatch +def is_planar(G): + """Returns True if and only if `G` is planar. + + A graph is *planar* iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the graph is planar. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> nx.is_planar(G) + True + >>> nx.is_planar(nx.complete_graph(5)) + False + + See Also + -------- + check_planarity : + Check if graph is planar *and* return a `PlanarEmbedding` instance if True. + """ + + return check_planarity(G, counterexample=False)[0] + + +@nx._dispatch +def check_planarity(G, counterexample=False): + """Check if a graph is planar and return a counterexample or an embedding. + + A graph is planar iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + counterexample : bool + A Kuratowski subgraph (to proof non planarity) is only returned if set + to true. + + Returns + ------- + (is_planar, certificate) : (bool, NetworkX graph) tuple + is_planar is true if the graph is planar. + If the graph is planar `certificate` is a PlanarEmbedding + otherwise it is a Kuratowski subgraph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> is_planar, P = nx.check_planarity(G) + >>> print(is_planar) + True + + When `G` is planar, a `PlanarEmbedding` instance is returned: + + >>> P.get_data() + {0: [1, 2], 1: [0], 2: [0]} + + Notes + ----- + A (combinatorial) embedding consists of cyclic orderings of the incident + edges at each vertex. Given such an embedding there are multiple approaches + discussed in literature to drawing the graph (subject to various + constraints, e.g. integer coordinates), see e.g. [2]. + + The planarity check algorithm and extraction of the combinatorial embedding + is based on the Left-Right Planarity Test [1]. + + A counterexample is only generated if the corresponding parameter is set, + because the complexity of the counterexample generation is higher. + + See also + -------- + is_planar : + Check for planarity without creating a `PlanarEmbedding` or counterexample. + + References + ---------- + .. [1] Ulrik Brandes: + The Left-Right Planarity Test + 2009 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 + .. [2] Takao Nishizeki, Md Saidur Rahman: + Planar graph drawing + Lecture Notes Series on Computing: Volume 12 + 2004 + """ + + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatch +def check_planarity_recursive(G, counterexample=False): + """Recursive version of :meth:`check_planarity`.""" + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity_recursive() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample_recursive(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatch +def get_counterexample(G): + """Obtains a Kuratowski subgraph. + + Raises nx.NetworkXException if G is planar. + + The function removes edges such that the graph is still not planar. + At some point the removal of any edge would make the graph planar. + This subgraph must be a Kuratowski subgraph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + subgraph : NetworkX graph + A Kuratowski subgraph that proves that G is not planar. + + """ + # copy graph + G = nx.Graph(G) + + if check_planarity(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +@nx._dispatch +def get_counterexample_recursive(G): + """Recursive version of :meth:`get_counterexample`.""" + + # copy graph + G = nx.Graph(G) + + if check_planarity_recursive(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity_recursive(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +class Interval: + """Represents a set of return edges. + + All return edges in an interval induce a same constraint on the contained + edges, which means that all edges must either have a left orientation or + all edges must have a right orientation. + """ + + def __init__(self, low=None, high=None): + self.low = low + self.high = high + + def empty(self): + """Check if the interval is empty""" + return self.low is None and self.high is None + + def copy(self): + """Returns a copy of this interval""" + return Interval(self.low, self.high) + + def conflicting(self, b, planarity_state): + """Returns True if interval I conflicts with edge b""" + return ( + not self.empty() + and planarity_state.lowpt[self.high] > planarity_state.lowpt[b] + ) + + +class ConflictPair: + """Represents a different constraint between two intervals. + + The edges in the left interval must have a different orientation than + the one in the right interval. + """ + + def __init__(self, left=Interval(), right=Interval()): + self.left = left + self.right = right + + def swap(self): + """Swap left and right intervals""" + temp = self.left + self.left = self.right + self.right = temp + + def lowest(self, planarity_state): + """Returns the lowest lowpoint of a conflict pair""" + if self.left.empty(): + return planarity_state.lowpt[self.right.low] + if self.right.empty(): + return planarity_state.lowpt[self.left.low] + return min( + planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low] + ) + + +def top_of_stack(l): + """Returns the element on top of the stack.""" + if not l: + return None + return l[-1] + + +class LRPlanarity: + """A class to maintain the state during planarity check.""" + + __slots__ = [ + "G", + "roots", + "height", + "lowpt", + "lowpt2", + "nesting_depth", + "parent_edge", + "DG", + "adjs", + "ordered_adjs", + "ref", + "side", + "S", + "stack_bottom", + "lowpt_edge", + "left_ref", + "right_ref", + "embedding", + ] + + def __init__(self, G): + # copy G without adding self-loops + self.G = nx.Graph() + self.G.add_nodes_from(G.nodes) + for e in G.edges: + if e[0] != e[1]: + self.G.add_edge(e[0], e[1]) + + self.roots = [] + + # distance from tree root + self.height = defaultdict(lambda: None) + + self.lowpt = {} # height of lowest return point of an edge + self.lowpt2 = {} # height of second lowest return point + self.nesting_depth = {} # for nesting order + + # None -> missing edge + self.parent_edge = defaultdict(lambda: None) + + # oriented DFS graph + self.DG = nx.DiGraph() + self.DG.add_nodes_from(G.nodes) + + self.adjs = {} + self.ordered_adjs = {} + + self.ref = defaultdict(lambda: None) + self.side = defaultdict(lambda: 1) + + # stack of conflict pairs + self.S = [] + self.stack_bottom = {} + self.lowpt_edge = {} + + self.left_ref = {} + self.right_ref = {} + + self.embedding = PlanarEmbedding() + + def lr_planarity(self): + """Execute the LR planarity test. + + Returns + ------- + embedding : dict + If the graph is planar an embedding is returned. Otherwise None. + """ + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # make adjacency lists for dfs + for v in self.G: + self.adjs[v] = list(self.G[v]) + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation(v) + + # Free no longer used variables + self.G = None + self.lowpt2 = None + self.adjs = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing(v): + return None + + # Free no longer used variables + self.height = None + self.lowpt = None + self.S = None + self.stack_bottom = None + self.lowpt_edge = None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # Free no longer used variables + self.DG = None + self.nesting_depth = None + self.ref = None + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding(v) + + # Free no longer used variables + self.roots = None + self.parent_edge = None + self.ordered_adjs = None + self.left_ref = None + self.right_ref = None + self.side = None + + return self.embedding + + def lr_planarity_recursive(self): + """Recursive version of :meth:`lr_planarity`.""" + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation_recursive(v) + + # Free no longer used variable + self.G = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing_recursive(v): + return None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding_recursive(v) + + return self.embedding + + def dfs_orientation(self, v): + """Orient the graph by DFS, compute lowpoints and nesting order.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + + for w in self.adjs[v][ind[v] :]: + vw = (v, w) + + if not skip_init[vw]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + ind[v] += 1 + continue # the edge was already oriented + + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[vw] = True # don't redo this block + break # handle next node in dfs_stack (i.e. w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + ind[v] += 1 + + def dfs_orientation_recursive(self, v): + """Recursive version of :meth:`dfs_orientation`.""" + e = self.parent_edge[v] + for w in self.G[v]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + continue # the edge was already oriented + vw = (v, w) + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + self.dfs_orientation_recursive(w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + def dfs_testing(self, v): + """Test for LR partition.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + # to indicate whether to skip the final block after the for loop + skip_final = False + + for w in self.ordered_adjs[v][ind[v] :]: + ei = (v, w) + + if not skip_init[ei]: + self.stack_bottom[ei] = top_of_stack(self.S) + + if ei == self.parent_edge[w]: # tree edge + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[ei] = True # don't redo this block + skip_final = True # skip final work after breaking + break # handle next node in dfs_stack (i.e. w) + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + ind[v] += 1 + + if not skip_final: + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + + return True + + def dfs_testing_recursive(self, v): + """Recursive version of :meth:`dfs_testing`.""" + e = self.parent_edge[v] + for w in self.ordered_adjs[v]: + ei = (v, w) + self.stack_bottom[ei] = top_of_stack(self.S) + if ei == self.parent_edge[w]: # tree edge + if not self.dfs_testing_recursive(w): + return False + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + return True + + def add_constraints(self, ei, e): + P = ConflictPair() + # merge return edges of e_i into P.right + while True: + Q = self.S.pop() + if not Q.left.empty(): + Q.swap() + if not Q.left.empty(): # not planar + return False + if self.lowpt[Q.right.low] > self.lowpt[e]: + # merge intervals + if P.right.empty(): # topmost interval + P.right = Q.right.copy() + else: + self.ref[P.right.low] = Q.right.high + P.right.low = Q.right.low + else: # align + self.ref[Q.right.low] = self.lowpt_edge[e] + if top_of_stack(self.S) == self.stack_bottom[ei]: + break + # merge conflicting return edges of e_1,...,e_i-1 into P.L + while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack( + self.S + ).right.conflicting(ei, self): + Q = self.S.pop() + if Q.right.conflicting(ei, self): + Q.swap() + if Q.right.conflicting(ei, self): # not planar + return False + # merge interval below lowpt(e_i) into P.R + self.ref[P.right.low] = Q.right.high + if Q.right.low is not None: + P.right.low = Q.right.low + + if P.left.empty(): # topmost interval + P.left = Q.left.copy() + else: + self.ref[P.left.low] = Q.left.high + P.left.low = Q.left.low + + if not (P.left.empty() and P.right.empty()): + self.S.append(P) + return True + + def remove_back_edges(self, e): + u = e[0] + # trim back edges ending at parent u + # drop entire conflict pairs + while self.S and top_of_stack(self.S).lowest(self) == self.height[u]: + P = self.S.pop() + if P.left.low is not None: + self.side[P.left.low] = -1 + + if self.S: # one more conflict pair to consider + P = self.S.pop() + # trim left interval + while P.left.high is not None and P.left.high[1] == u: + P.left.high = self.ref[P.left.high] + if P.left.high is None and P.left.low is not None: + # just emptied + self.ref[P.left.low] = P.right.low + self.side[P.left.low] = -1 + P.left.low = None + # trim right interval + while P.right.high is not None and P.right.high[1] == u: + P.right.high = self.ref[P.right.high] + if P.right.high is None and P.right.low is not None: + # just emptied + self.ref[P.right.low] = P.left.low + self.side[P.right.low] = -1 + P.right.low = None + self.S.append(P) + + # side of e is side of a highest return edge + if self.lowpt[e] < self.height[u]: # e has return edge + hl = top_of_stack(self.S).left.high + hr = top_of_stack(self.S).right.high + + if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]): + self.ref[e] = hl + else: + self.ref[e] = hr + + def dfs_embedding(self, v): + """Completes the embedding.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + + while dfs_stack: + v = dfs_stack.pop() + + for w in self.ordered_adjs[v][ind[v] :]: + ind[v] += 1 + ei = (v, w) + + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + break # handle next node in dfs_stack (i.e. w) + else: # back edge + if self.side[ei] == 1: + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def dfs_embedding_recursive(self, v): + """Recursive version of :meth:`dfs_embedding`.""" + for w in self.ordered_adjs[v]: + ei = (v, w) + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + self.dfs_embedding_recursive(w) + else: # back edge + if self.side[ei] == 1: + # place v directly after right_ref[w] in embed. list of w + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + # place v directly before left_ref[w] in embed. list of w + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def sign(self, e): + """Resolve the relative side of an edge to the absolute side.""" + # the recursion stack + dfs_stack = [e] + # dict to remember reference edges + old_ref = defaultdict(lambda: None) + + while dfs_stack: + e = dfs_stack.pop() + + if self.ref[e] is not None: + dfs_stack.append(e) # revisit e after finishing self.ref[e] + dfs_stack.append(self.ref[e]) # visit self.ref[e] next + old_ref[e] = self.ref[e] # remember value of self.ref[e] + self.ref[e] = None + else: + self.side[e] *= self.side[old_ref[e]] + + return self.side[e] + + def sign_recursive(self, e): + """Recursive version of :meth:`sign`.""" + if self.ref[e] is not None: + self.side[e] = self.side[e] * self.sign_recursive(self.ref[e]) + self.ref[e] = None + return self.side[e] + + +class PlanarEmbedding(nx.DiGraph): + """Represents a planar graph with its planar embedding. + + The planar embedding is given by a `combinatorial embedding + `_. + + .. note:: `check_planarity` is the preferred way to check if a graph is planar. + + **Neighbor ordering:** + + In comparison to a usual graph structure, the embedding also stores the + order of all neighbors for every vertex. + The order of the neighbors can be given in clockwise (cw) direction or + counterclockwise (ccw) direction. This order is stored as edge attributes + in the underlying directed graph. For the edge (u, v) the edge attribute + 'cw' is set to the neighbor of u that follows immediately after v in + clockwise direction. + + In order for a PlanarEmbedding to be valid it must fulfill multiple + conditions. It is possible to check if these conditions are fulfilled with + the method :meth:`check_structure`. + The conditions are: + + * Edges must go in both directions (because the edge attributes differ) + * Every edge must have a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with non zero degree must have a node attribute 'first_nbr'. + + As long as a PlanarEmbedding is invalid only the following methods should + be called: + + * :meth:`add_half_edge_ccw` + * :meth:`add_half_edge_cw` + * :meth:`connect_components` + * :meth:`add_half_edge_first` + + Even though the graph is a subclass of nx.DiGraph, it can still be used + for algorithms that require undirected graphs, because the method + :meth:`is_directed` is overridden. This is possible, because a valid + PlanarGraph must have edges in both directions. + + **Half edges:** + + In methods like `add_half_edge_ccw` the term "half-edge" is used, which is + a term that is used in `doubly connected edge lists + `_. It is used + to emphasize that the edge is only in one direction and there exists + another half-edge in the opposite direction. + While conventional edges always have two faces (including outer face) next + to them, it is possible to assign each half-edge *exactly one* face. + For a half-edge (u, v) that is orientated such that u is below v then the + face that belongs to (u, v) is to the right of this half-edge. + + See Also + -------- + is_planar : + Preferred way to check if an existing graph is planar. + + check_planarity : + A convenient way to create a `PlanarEmbedding`. If not planar, + it returns a subgraph that shows this. + + Examples + -------- + + Create an embedding of a star graph (compare `nx.star_graph(3)`): + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_cw(0, 1, None) + >>> G.add_half_edge_cw(0, 2, 1) + >>> G.add_half_edge_cw(0, 3, 2) + >>> G.add_half_edge_cw(1, 0, None) + >>> G.add_half_edge_cw(2, 0, None) + >>> G.add_half_edge_cw(3, 0, None) + + Alternatively the same embedding can also be defined in counterclockwise + orientation. The following results in exactly the same PlanarEmbedding: + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_ccw(0, 1, None) + >>> G.add_half_edge_ccw(0, 3, 1) + >>> G.add_half_edge_ccw(0, 2, 3) + >>> G.add_half_edge_ccw(1, 0, None) + >>> G.add_half_edge_ccw(2, 0, None) + >>> G.add_half_edge_ccw(3, 0, None) + + After creating a graph, it is possible to validate that the PlanarEmbedding + object is correct: + + >>> G.check_structure() + + """ + + def get_data(self): + """Converts the adjacency structure into a better readable structure. + + Returns + ------- + embedding : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + set_data + + """ + embedding = {} + for v in self: + embedding[v] = list(self.neighbors_cw_order(v)) + return embedding + + def set_data(self, data): + """Inserts edges according to given sorted neighbor list. + + The input format is the same as the output format of get_data(). + + Parameters + ---------- + data : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + get_data + + """ + for v in data: + for w in reversed(data[v]): + self.add_half_edge_first(v, w) + + def neighbors_cw_order(self, v): + """Generator for the neighbors of v in clockwise order. + + Parameters + ---------- + v : node + + Yields + ------ + node + + """ + if len(self[v]) == 0: + # v has no neighbors + return + start_node = self.nodes[v]["first_nbr"] + yield start_node + current_node = self[v][start_node]["cw"] + while start_node != current_node: + yield current_node + current_node = self[v][current_node]["cw"] + + def check_structure(self): + """Runs without exceptions if this object is valid. + + Checks that the following properties are fulfilled: + + * Edges go in both directions (because the edge attributes differ). + * Every edge has a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with a degree larger than 0 has a node attribute 'first_nbr'. + + Running this method verifies that the underlying Graph must be planar. + + Raises + ------ + NetworkXException + This exception is raised with a short explanation if the + PlanarEmbedding is invalid. + """ + # Check fundamental structure + for v in self: + try: + sorted_nbrs = set(self.neighbors_cw_order(v)) + except KeyError as err: + msg = f"Bad embedding. Missing orientation for a neighbor of {v}" + raise nx.NetworkXException(msg) from err + + unsorted_nbrs = set(self[v]) + if sorted_nbrs != unsorted_nbrs: + msg = "Bad embedding. Edge orientations not set correctly." + raise nx.NetworkXException(msg) + for w in self[v]: + # Check if opposite half-edge exists + if not self.has_edge(w, v): + msg = "Bad embedding. Opposite half-edge is missing." + raise nx.NetworkXException(msg) + + # Check planarity + counted_half_edges = set() + for component in nx.connected_components(self): + if len(component) == 1: + # Don't need to check single node component + continue + num_nodes = len(component) + num_half_edges = 0 + num_faces = 0 + for v in component: + for w in self.neighbors_cw_order(v): + num_half_edges += 1 + if (v, w) not in counted_half_edges: + # We encountered a new face + num_faces += 1 + # Mark all half-edges belonging to this face + self.traverse_face(v, w, counted_half_edges) + num_edges = num_half_edges // 2 # num_half_edges is even + if num_nodes - num_edges + num_faces != 2: + # The result does not match Euler's formula + msg = "Bad embedding. The graph does not match Euler's formula" + raise nx.NetworkXException(msg) + + def add_half_edge_ccw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added counter clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_cw + connect_components + add_half_edge_first + + """ + if reference_neighbor is None: + # The start node has no neighbors + self.add_edge(start_node, end_node) # Add edge to graph + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + else: + ccw_reference = self[start_node][reference_neighbor]["ccw"] + self.add_half_edge_cw(start_node, end_node, ccw_reference) + + if reference_neighbor == self.nodes[start_node].get("first_nbr", None): + # Update first neighbor + self.nodes[start_node]["first_nbr"] = end_node + + def add_half_edge_cw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_ccw + connect_components + add_half_edge_first + """ + self.add_edge(start_node, end_node) # Add edge to graph + + if reference_neighbor is None: + # The start node has no neighbors + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + return + + if reference_neighbor not in self[start_node]: + raise nx.NetworkXException( + "Cannot add edge. Reference neighbor does not exist" + ) + + # Get half-edge at the other side + cw_reference = self[start_node][reference_neighbor]["cw"] + # Alter half-edge data structures + self[start_node][reference_neighbor]["cw"] = end_node + self[start_node][end_node]["cw"] = cw_reference + self[start_node][cw_reference]["ccw"] = end_node + self[start_node][end_node]["ccw"] = reference_neighbor + + def connect_components(self, v, w): + """Adds half-edges for (v, w) and (w, v) at some position. + + This method should only be called if v and w are in different + components, or it might break the embedding. + This especially means that if `connect_components(v, w)` + is called it is not allowed to call `connect_components(w, v)` + afterwards. The neighbor orientations in both directions are + all set correctly after the first call. + + Parameters + ---------- + v : node + w : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + add_half_edge_first + """ + self.add_half_edge_first(v, w) + self.add_half_edge_first(w, v) + + def add_half_edge_first(self, start_node, end_node): + """The added half-edge is inserted at the first position in the order. + + Parameters + ---------- + start_node : node + end_node : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + connect_components + """ + if start_node in self and "first_nbr" in self.nodes[start_node]: + reference = self.nodes[start_node]["first_nbr"] + else: + reference = None + self.add_half_edge_ccw(start_node, end_node, reference) + + def next_face_half_edge(self, v, w): + """Returns the following half-edge left of a face. + + Parameters + ---------- + v : node + w : node + + Returns + ------- + half-edge : tuple + """ + new_node = self[w][v]["ccw"] + return w, new_node + + def traverse_face(self, v, w, mark_half_edges=None): + """Returns nodes on the face that belong to the half-edge (v, w). + + The face that is traversed lies to the right of the half-edge (in an + orientation where v is below w). + + Optionally it is possible to pass a set to which all encountered half + edges are added. Before calling this method, this set must not include + any half-edges that belong to the face. + + Parameters + ---------- + v : node + Start node of half-edge. + w : node + End node of half-edge. + mark_half_edges: set, optional + Set to which all encountered half-edges are added. + + Returns + ------- + face : list + A list of nodes that lie on this face. + """ + if mark_half_edges is None: + mark_half_edges = set() + + face_nodes = [v] + mark_half_edges.add((v, w)) + prev_node = v + cur_node = w + # Last half-edge is (incoming_node, v) + incoming_node = self[v][w]["cw"] + + while cur_node != v or prev_node != incoming_node: + face_nodes.append(cur_node) + prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node) + if (prev_node, cur_node) in mark_half_edges: + raise nx.NetworkXException("Bad planar embedding. Impossible face.") + mark_half_edges.add((prev_node, cur_node)) + + return face_nodes + + def is_directed(self): + """A valid PlanarEmbedding is undirected. + + All reverse edges are contained, i.e. for every existing + half-edge (v, w) the half-edge in the opposite direction (w, v) is also + contained. + """ + return False diff --git a/phivenv/Lib/site-packages/networkx/algorithms/polynomials.py b/phivenv/Lib/site-packages/networkx/algorithms/polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..57ecf0d09a976df4cc909d1ac9f0fe8780f66f32 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/polynomials.py @@ -0,0 +1,305 @@ +"""Provides algorithms supporting the computation of graph polynomials. + +Graph polynomials are polynomial-valued graph invariants that encode a wide +variety of structural information. Examples include the Tutte polynomial, +chromatic polynomial, characteristic polynomial, and matching polynomial. An +extensive treatment is provided in [1]_. + +For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly` +method can be used to compute the characteristic polynomial from the adjacency +matrix of a graph. Consider the complete graph ``K_4``: + +>>> import sympy +>>> x = sympy.Symbol("x") +>>> G = nx.complete_graph(4) +>>> A = nx.adjacency_matrix(G) +>>> M = sympy.SparseMatrix(A.todense()) +>>> M.charpoly(x).as_expr() +x**4 - 6*x**2 - 8*x - 3 + + +.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials" +""" +from collections import deque + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["tutte_polynomial", "chromatic_polynomial"] + + +@not_implemented_for("directed") +@nx._dispatch +def tutte_polynomial(G): + r"""Returns the Tutte polynomial of `G` + + This function computes the Tutte polynomial via an iterative version of + the deletion-contraction algorithm. + + The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in + two variables. It encodes a wide array of information related to the + edge-connectivity of a graph; "Many problems about graphs can be reduced to + problems of finding and evaluating the Tutte polynomial at certain values" [1]_. + In fact, every deletion-contraction-expressible feature of a graph is a + specialization of the Tutte polynomial [2]_ (see Notes for examples). + + There are several equivalent definitions; here are three: + + Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the + number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of + `G`, and `c(A)` the number of connected components of the graph with vertex + set `V` and edge set `A` [3]_: + + .. math:: + + T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)} + + Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning + tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict + linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of + $E \setminus T \cup {e}$. An edge `e` is internally active with respect to + `T` and `L` if `e` is the least edge in `B_e` according to the linear order + `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges + in $E \setminus T$ that are internally active with respect to `T` and `L`. + Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex + are the same. An edge `e` is externally active with respect to `T` and `L` + if `e` is the least edge in `P_e` according to the linear order `L`. The + external activity of `T` (denoted `e(T)`) is the number of edges in + $E \setminus T$ that are externally active with respect to `T` and `L`. + Then [4]_ [5]_: + + .. math:: + + T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)} + + Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e` + the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained + from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`, + and `l(G)` the number of self-loops of `G`: + + .. math:: + T_G(x, y) = \begin{cases} + x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\ + T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop} + \end{cases} + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the Tutte polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.tutte_polynomial(C) + x**4 + x**3 + x**2 + x + y + + >>> D = nx.diamond_graph() + >>> nx.tutte_polynomial(D) + x**3 + 2*x**2 + 2*x*y + x + y**2 + y + + Notes + ----- + Some specializations of the Tutte polynomial: + + - `T_G(1, 1)` counts the number of spanning trees of `G` + - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G` + - `T_G(2, 1)` counts the number of spanning forests in `G` + - `T_G(0, 2)` counts the number of strong orientations of `G` + - `T_G(2, 0)` counts the number of acyclic orientations of `G` + + Edge contraction is defined and deletion-contraction is introduced in [6]_. + Combinatorial meaning of the coefficients is introduced in [7]_. + Universality, properties, and applications are discussed in [8]_. + + Practically, up-front computation of the Tutte polynomial may be useful when + users wish to repeatedly calculate edge-connectivity-related information + about one or more graphs. + + References + ---------- + .. [1] M. Brandt, + "The Tutte Polynomial." + Talking About Combinatorial Objects Seminar, 2015 + https://math.berkeley.edu/~brandtm/talks/tutte.pdf + .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto, + "Computing the Tutte polynomial in vertex-exponential time" + 49th Annual IEEE Symposium on Foundations of Computer Science, 2008 + https://ieeexplore.ieee.org/abstract/document/4691000 + .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 14 + .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 46 + .. [5] A. Nešetril, J. Goodall, + "Graph invariants, homomorphisms, and the Tutte polynomial" + https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf + .. [6] D. B. West, + "Introduction to Graph Theory," p. 84 + .. [7] G. Coutinho, + "A brief introduction to the Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf + .. [8] J. A. Ellis-Monaghan, C. Merino, + "Graph polynomials and their applications I: The Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://arxiv.org/pdf/0803.3079.pdf + """ + import sympy + + x = sympy.Symbol("x") + y = sympy.Symbol("y") + stack = deque() + stack.append(nx.MultiGraph(G)) + + polynomial = 0 + while stack: + G = stack.pop() + bridges = set(nx.bridges(G)) + + e = None + for i in G.edges: + if (i[0], i[1]) not in bridges and i[0] != i[1]: + e = i + break + if not e: + loops = list(nx.selfloop_edges(G, keys=True)) + polynomial += x ** len(bridges) * y ** len(loops) + else: + # deletion-contraction + C = nx.contracted_edge(G, e, self_loops=True) + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return sympy.simplify(polynomial) + + +@not_implemented_for("directed") +@nx._dispatch +def chromatic_polynomial(G): + r"""Returns the chromatic polynomial of `G` + + This function computes the chromatic polynomial via an iterative version of + the deletion-contraction algorithm. + + The chromatic polynomial `X_G(x)` is a fundamental graph polynomial + invariant in one variable. Evaluating `X_G(k)` for an natural number `k` + enumerates the proper k-colorings of `G`. + + There are several equivalent definitions; here are three: + + Def 1 (explicit formula): + For `G` an undirected graph, `c(G)` the number of connected components of + `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with + edge set `S` [1]_: + + .. math:: + + X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))} + + + Def 2 (interpolating polynomial): + For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`, + and `k_i` the number of distinct ways to color the vertices of `G` with `i` + unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the + unique Lagrange interpolating polynomial of degree `n(G)` through the points + `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_. + + + Def 3 (chromatic recurrence): + For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting + edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)` + the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_: + + .. math:: + X_G(x) = \begin{cases} + x^{n(G)}, & \text{if $e(G)=0$} \\ + X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$} + \end{cases} + + This formulation is also known as the Fundamental Reduction Theorem [4]_. + + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the chromatic polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.chromatic_polynomial(C) + x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x + + >>> G = nx.complete_graph(4) + >>> nx.chromatic_polynomial(G) + x**4 - 6*x**3 + 11*x**2 - 6*x + + Notes + ----- + Interpretation of the coefficients is discussed in [5]_. Several special + cases are listed in [2]_. + + The chromatic polynomial is a specialization of the Tutte polynomial; in + particular, ``X_G(x) = T_G(x, 0)`` [6]_. + + The chromatic polynomial may take negative arguments, though evaluations + may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates + the acyclic orientations of `G` [7]_. + + References + ---------- + .. [1] D. B. West, + "Introduction to Graph Theory," p. 222 + .. [2] E. W. Weisstein + "Chromatic Polynomial" + MathWorld--A Wolfram Web Resource + https://mathworld.wolfram.com/ChromaticPolynomial.html + .. [3] D. B. West, + "Introduction to Graph Theory," p. 221 + .. [4] J. Zhang, J. Goodall, + "An Introduction to Chromatic Polynomials" + https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf + .. [5] R. C. Read, + "An Introduction to Chromatic Polynomials" + Journal of Combinatorial Theory, 1968 + https://math.berkeley.edu/~mrklug/ReadChromatic.pdf + .. [6] W. T. Tutte, + "Graph-polynomials" + Advances in Applied Mathematics, 2004 + https://www.sciencedirect.com/science/article/pii/S0196885803000411 + .. [7] R. P. Stanley, + "Acyclic orientations of graphs" + Discrete Mathematics, 2006 + https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf + """ + import sympy + + x = sympy.Symbol("x") + stack = deque() + stack.append(nx.MultiGraph(G, contraction_idx=0)) + + polynomial = 0 + while stack: + G = stack.pop() + edges = list(G.edges) + if not edges: + polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G) + else: + e = edges[0] + C = nx.contracted_edge(G, e, self_loops=True) + C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1 + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return polynomial diff --git a/phivenv/Lib/site-packages/networkx/algorithms/reciprocity.py b/phivenv/Lib/site-packages/networkx/algorithms/reciprocity.py new file mode 100644 index 0000000000000000000000000000000000000000..cb36ae9d55127ba69f7cc9e163c69004620d8722 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/reciprocity.py @@ -0,0 +1,97 @@ +"""Algorithms to calculate reciprocity in a directed graph.""" +import networkx as nx +from networkx import NetworkXError + +from ..utils import not_implemented_for + +__all__ = ["reciprocity", "overall_reciprocity"] + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatch +def reciprocity(G, nodes=None): + r"""Compute the reciprocity in a directed graph. + + The reciprocity of a directed graph is defined as the ratio + of the number of edges pointing in both directions to the total + number of edges in the graph. + Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$. + + The reciprocity of a single node u is defined similarly, + it is the ratio of the number of edges in both directions to + the total number of edges attached to node u. + + Parameters + ---------- + G : graph + A networkx directed graph + nodes : container of nodes, optional (default=whole graph) + Compute reciprocity for nodes in this container. + + Returns + ------- + out : dictionary + Reciprocity keyed by node label. + + Notes + ----- + The reciprocity is not defined for isolated nodes. + In such cases this function will return None. + + """ + # If `nodes` is not specified, calculate the reciprocity of the graph. + if nodes is None: + return overall_reciprocity(G) + + # If `nodes` represents a single node in the graph, return only its + # reciprocity. + if nodes in G: + reciprocity = next(_reciprocity_iter(G, nodes))[1] + if reciprocity is None: + raise NetworkXError("Not defined for isolated nodes.") + else: + return reciprocity + + # Otherwise, `nodes` represents an iterable of nodes, so return a + # dictionary mapping node to its reciprocity. + return dict(_reciprocity_iter(G, nodes)) + + +def _reciprocity_iter(G, nodes): + """Return an iterator of (node, reciprocity).""" + n = G.nbunch_iter(nodes) + for node in n: + pred = set(G.predecessors(node)) + succ = set(G.successors(node)) + overlap = pred & succ + n_total = len(pred) + len(succ) + + # Reciprocity is not defined for isolated nodes. + # Return None. + if n_total == 0: + yield (node, None) + else: + reciprocity = 2 * len(overlap) / n_total + yield (node, reciprocity) + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatch +def overall_reciprocity(G): + """Compute the reciprocity for the whole graph. + + See the doc of reciprocity for the definition. + + Parameters + ---------- + G : graph + A networkx graph + + """ + n_all_edge = G.number_of_edges() + n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2 + + if n_all_edge == 0: + raise NetworkXError("Not defined for empty graphs") + + return n_overlap_edge / n_all_edge diff --git a/phivenv/Lib/site-packages/networkx/algorithms/regular.py b/phivenv/Lib/site-packages/networkx/algorithms/regular.py new file mode 100644 index 0000000000000000000000000000000000000000..f9397fab2fd97c347204f11f158ecc7fd7f4be63 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/regular.py @@ -0,0 +1,212 @@ +"""Functions for computing and verifying regular graphs.""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_regular", "is_k_regular", "k_factor"] + + +@nx._dispatch +def is_regular(G): + """Determines whether the graph ``G`` is a regular graph. + + A regular graph is a graph where each vertex has the same degree. A + regular digraph is a graph where the indegree and outdegree of each + vertex are equal. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph or digraph is regular. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_regular(G) + True + + """ + n1 = nx.utils.arbitrary_element(G) + if not G.is_directed(): + d1 = G.degree(n1) + return all(d1 == d for _, d in G.degree) + else: + d_in = G.in_degree(n1) + in_regular = all(d_in == d for _, d in G.in_degree) + d_out = G.out_degree(n1) + out_regular = all(d_out == d for _, d in G.out_degree) + return in_regular and out_regular + + +@not_implemented_for("directed") +@nx._dispatch +def is_k_regular(G, k): + """Determines whether the graph ``G`` is a k-regular graph. + + A k-regular graph is a graph where each vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph is k-regular. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_k_regular(G, k=3) + False + + """ + return all(d == k for n, d in G.degree) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="matching_weight") +def k_factor(G, k, matching_weight="weight"): + """Compute a k-factor of G + + A k-factor of a graph is a spanning k-regular subgraph. + A spanning k-regular subgraph of G is a subgraph that contains + each vertex of G and a subset of the edges of G such that each + vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + matching_weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + Used for finding the max-weighted perfect matching. + If key not found, uses 1 as weight. + + Returns + ------- + G2 : NetworkX graph + A k-factor of G + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> G2 = nx.k_factor(G, k=1) + >>> G2.edges() + EdgeView([(1, 2), (3, 4)]) + + References + ---------- + .. [1] "An algorithm for computing simple k-factors.", + Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport, + Information processing letters, 2009. + """ + + from networkx.algorithms.matching import is_perfect_matching, max_weight_matching + + class LargeKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.g = g + self.k = k + self.degree = degree + + self.outer_vertices = [(node, x) for x in range(degree)] + self.core_vertices = [(node, x + degree) for x in range(degree - k)] + + def replace_node(self): + adj_view = self.g[self.original] + neighbors = list(adj_view.keys()) + edge_attrs = list(adj_view.values()) + for outer, neighbor, edge_attrs in zip( + self.outer_vertices, neighbors, edge_attrs + ): + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for outer in self.outer_vertices: + self.g.add_edge(core, outer) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in list(adj_view.items()): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + g.remove_nodes_from(self.outer_vertices) + g.remove_nodes_from(self.core_vertices) + + class SmallKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.k = k + self.degree = degree + self.g = g + + self.outer_vertices = [(node, x) for x in range(degree)] + self.inner_vertices = [(node, x + degree) for x in range(degree)] + self.core_vertices = [(node, x + 2 * degree) for x in range(k)] + + def replace_node(self): + adj_view = self.g[self.original] + for outer, inner, (neighbor, edge_attrs) in zip( + self.outer_vertices, self.inner_vertices, list(adj_view.items()) + ): + self.g.add_edge(outer, inner) + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for inner in self.inner_vertices: + self.g.add_edge(core, inner) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in adj_view.items(): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + self.g.remove_nodes_from(self.outer_vertices) + self.g.remove_nodes_from(self.inner_vertices) + self.g.remove_nodes_from(self.core_vertices) + + # Step 1 + if any(d < k for _, d in G.degree): + raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k") + g = G.copy() + + # Step 2 + gadgets = [] + for node, degree in list(g.degree): + if k < degree / 2.0: + gadget = SmallKGadget(k, degree, node, g) + else: + gadget = LargeKGadget(k, degree, node, g) + gadget.replace_node() + gadgets.append(gadget) + + # Step 3 + matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight) + + # Step 4 + if not is_perfect_matching(g, matching): + raise nx.NetworkXUnfeasible( + "Cannot find k-factor because no perfect matching exists" + ) + + for edge in g.edges(): + if edge not in matching and (edge[1], edge[0]) not in matching: + g.remove_edge(edge[0], edge[1]) + + for gadget in gadgets: + gadget.restore_node() + + return g diff --git a/phivenv/Lib/site-packages/networkx/algorithms/richclub.py b/phivenv/Lib/site-packages/networkx/algorithms/richclub.py new file mode 100644 index 0000000000000000000000000000000000000000..e9980f0347b144d4d3b3386e15ab89305aa3ea5c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/richclub.py @@ -0,0 +1,121 @@ +"""Functions for computing rich-club coefficients.""" + +from itertools import accumulate + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["rich_club_coefficient"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def rich_club_coefficient(G, normalized=True, Q=100, seed=None): + r"""Returns the rich-club coefficient of the graph `G`. + + For each degree *k*, the *rich-club coefficient* is the ratio of the + number of actual to the number of potential edges for nodes with + degree greater than *k*: + + .. math:: + + \phi(k) = \frac{2 E_k}{N_k (N_k - 1)} + + where `N_k` is the number of nodes with degree larger than *k*, and + `E_k` is the number of edges among those nodes. + + Parameters + ---------- + G : NetworkX graph + Undirected graph with neither parallel edges nor self-loops. + normalized : bool (optional) + Normalize using randomized network as in [1]_ + Q : float (optional, default=100) + If `normalized` is True, perform `Q * m` double-edge + swaps, where `m` is the number of edges in `G`, to use as a + null-model for normalization. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + rc : dictionary + A dictionary, keyed by degree, with rich-club coefficient values. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42) + >>> rc[0] + 0.4 + + Notes + ----- + The rich club definition and algorithm are found in [1]_. This + algorithm ignores any edge weights and is not defined for directed + graphs or graphs with parallel edges or self loops. + + Estimates for appropriate values of `Q` are found in [2]_. + + References + ---------- + .. [1] Julian J. McAuley, Luciano da Fontoura Costa, + and Tibério S. Caetano, + "The rich-club phenomenon across complex network hierarchies", + Applied Physics Letters Vol 91 Issue 8, August 2007. + https://arxiv.org/abs/physics/0701290 + .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon, + "Uniform generation of random graphs with arbitrary degree + sequences", 2006. https://arxiv.org/abs/cond-mat/0312028 + """ + if nx.number_of_selfloops(G) > 0: + raise Exception( + "rich_club_coefficient is not implemented for " "graphs with self loops." + ) + rc = _compute_rc(G) + if normalized: + # make R a copy of G, randomize with Q*|E| double edge swaps + # and use rich_club coefficient of R to normalize + R = G.copy() + E = R.number_of_edges() + nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed) + rcran = _compute_rc(R) + rc = {k: v / rcran[k] for k, v in rc.items()} + return rc + + +def _compute_rc(G): + """Returns the rich-club coefficient for each degree in the graph + `G`. + + `G` is an undirected graph without multiedges. + + Returns a dictionary mapping degree to rich-club coefficient for + that degree. + + """ + deghist = nx.degree_histogram(G) + total = sum(deghist) + # Compute the number of nodes with degree greater than `k`, for each + # degree `k` (omitting the last entry, which is zero). + nks = (total - cs for cs in accumulate(deghist) if total - cs > 1) + # Create a sorted list of pairs of edge endpoint degrees. + # + # The list is sorted in reverse order so that we can pop from the + # right side of the list later, instead of popping from the left + # side of the list, which would have a linear time cost. + edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True) + ek = G.number_of_edges() + k1, k2 = edge_degrees.pop() + rc = {} + for d, nk in enumerate(nks): + while k1 <= d: + if len(edge_degrees) == 0: + ek = 0 + break + k1, k2 = edge_degrees.pop() + ek -= 1 + rc[d] = 2 * ek / (nk * (nk - 1)) + return rc diff --git a/phivenv/Lib/site-packages/networkx/algorithms/similarity.py b/phivenv/Lib/site-packages/networkx/algorithms/similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..3d943c20d7e2a1a5e01b9f825802cb6d6a99047e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/similarity.py @@ -0,0 +1,1710 @@ +""" Functions measuring similarity using graph edit distance. + +The graph edit distance is the number of edge/node changes needed +to make two graphs isomorphic. + +The default algorithm/implementation is sub-optimal for some graphs. +The problem of finding the exact Graph Edit Distance (GED) is NP-hard +so it is often slow. If the simple interface `graph_edit_distance` +takes too long for your graph, try `optimize_graph_edit_distance` +and/or `optimize_edit_paths`. + +At the same time, I encourage capable people to investigate +alternative GED algorithms, in order to improve the choices available. +""" + +import math +import time +import warnings +from dataclasses import dataclass +from itertools import product + +import networkx as nx + +__all__ = [ + "graph_edit_distance", + "optimal_edit_paths", + "optimize_graph_edit_distance", + "optimize_edit_paths", + "simrank_similarity", + "panther_similarity", + "generate_random_paths", +] + + +def debug_print(*args, **kwargs): + print(*args, **kwargs) + + +@nx._dispatch( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + roots=None, + upper_bound=None, + timeout=None, +): + """Returns GED (graph edit distance) between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + upper_bound : numeric + Maximum edit distance to consider. Return None if no edit + distance under or equal to upper_bound exists. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> nx.graph_edit_distance(G1, G2) + 7.0 + + >>> G1 = nx.star_graph(5) + >>> G2 = nx.star_graph(5) + >>> nx.graph_edit_distance(G1, G2, roots=(0, 0)) + 0.0 + >>> nx.graph_edit_distance(G1, G2, roots=(1, 0)) + 8.0 + + See Also + -------- + optimal_edit_paths, optimize_graph_edit_distance, + + is_isomorphic: test for graph edit distance of 0 + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + bestcost = None + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + roots, + timeout, + ): + # assert bestcost is None or cost < bestcost + bestcost = cost + return bestcost + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def optimal_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns all minimum-cost edit paths transforming G1 to G2. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + edit_paths : list of tuples (node_edit_path, edge_edit_path) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + + cost : numeric + Optimal edit path cost (graph edit distance). + + Examples + -------- + >>> G1 = nx.cycle_graph(4) + >>> G2 = nx.wheel_graph(5) + >>> paths, cost = nx.optimal_edit_paths(G1, G2) + >>> len(paths) + 40 + >>> cost + 5.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + paths = [] + bestcost = None + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + False, + ): + # assert bestcost is None or cost <= bestcost + if bestcost is not None and cost < bestcost: + paths = [] + paths.append((vertex_path, edge_path)) + bestcost = cost + return paths, bestcost + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def optimize_graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns consecutive approximations of GED (graph edit distance) + between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + Generator of consecutive approximations of graph edit distance. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> for v in nx.optimize_graph_edit_distance(G1, G2): + ... minv = v + >>> minv + 7.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + """ + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + ): + yield cost + + +@nx._dispatch( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def optimize_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, + strictly_decreasing=True, + roots=None, + timeout=None, +): + """GED (graph edit distance) calculation: advanced interface. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Graph edit distance is defined as minimum cost of edit path. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + strictly_decreasing : bool + If True, return consecutive approximations of strictly + decreasing cost. Otherwise, return all edit paths of cost + less than or equal to the previous minimum cost. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Returns + ------- + Generator of tuples (node_edit_path, edge_edit_path, cost) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + cost : numeric + + See Also + -------- + graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + # TODO: support DiGraph + + import numpy as np + import scipy as sp + + @dataclass + class CostMatrix: + C: ... + lsa_row_ind: ... + lsa_col_ind: ... + ls: ... + + def make_CostMatrix(C, m, n): + # assert(C.shape == (m + n, m + n)) + lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C) + + # Fixup dummy assignments: + # each substitution i<->j should have dummy assignment m+j<->n+i + # NOTE: fast reduce of Cv relies on it + # assert len(lsa_row_ind) == len(lsa_col_ind) + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + subst_ind = [k for k, i, j in indexes if i < m and j < n] + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + dummy_ind = [k for k, i, j in indexes if i >= m and j >= n] + # assert len(subst_ind) == len(dummy_ind) + lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m + lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n + + return CostMatrix( + C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum() + ) + + def extract_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k in i or k - m in j for k in range(m + n)] + col_ind = [k in j or k - n in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k not in i and k - m not in j for k in range(m + n)] + col_ind = [k not in j and k - n not in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_ind(ind, i): + # assert set(ind) == set(range(len(ind))) + rind = ind[[k not in i for k in ind]] + for k in set(i): + rind[rind >= k] -= 1 + return rind + + def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None): + """ + Parameters: + u, v: matched vertices, u=None or v=None for + deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_uv: partial vertex edit path + list of tuples (u, v) of previously matched vertex + mappings u<->v, u=None or v=None for + deletion/insertion + + Returns: + list of (i, j): indices of edge mappings g<->h + localCe: local CostMatrix of edge mappings + (basically submatrix of Ce at cross of rows i, cols j) + """ + M = len(pending_g) + N = len(pending_h) + # assert Ce.C.shape == (M + N, M + N) + + # only attempt to match edges after one node match has been made + # this will stop self-edges on the first node being automatically deleted + # even when a substitution is the better option + if matched_uv is None or len(matched_uv) == 0: + g_ind = [] + h_ind = [] + else: + g_ind = [ + i + for i in range(M) + if pending_g[i][:2] == (u, u) + or any( + pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv + ) + ] + h_ind = [ + j + for j in range(N) + if pending_h[j][:2] == (v, v) + or any( + pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv + ) + ] + + m = len(g_ind) + n = len(h_ind) + + if m or n: + C = extract_C(Ce.C, g_ind, h_ind, M, N) + # assert C.shape == (m + n, m + n) + + # Forbid structurally invalid matches + # NOTE: inf remembered from Ce construction + for k, i in enumerate(g_ind): + g = pending_g[i][:2] + for l, j in enumerate(h_ind): + h = pending_h[j][:2] + if nx.is_directed(G1) or nx.is_directed(G2): + if any( + g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q) + for p, q in matched_uv + ): + continue + else: + if any( + g in ((p, u), (u, p)) and h in ((q, v), (v, q)) + for p, q in matched_uv + ): + continue + if g == (u, u) or any(g == (p, p) for p, q in matched_uv): + continue + if h == (v, v) or any(h == (q, q) for p, q in matched_uv): + continue + C[k, l] = inf + + localCe = make_CostMatrix(C, m, n) + ij = [ + ( + g_ind[k] if k < m else M + h_ind[l], + h_ind[l] if l < n else N + g_ind[k], + ) + for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind) + if k < m or l < n + ] + + else: + ij = [] + localCe = CostMatrix(np.empty((0, 0)), [], [], 0) + + return ij, localCe + + def reduce_Ce(Ce, ij, m, n): + if len(ij): + i, j = zip(*ij) + m_i = m - sum(1 for t in i if t < m) + n_j = n - sum(1 for t in j if t < n) + return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j) + return Ce + + def get_edit_ops( + matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of + (i, j): indices of vertex mapping u<->v + Cv_ij: reduced CostMatrix of pending vertex mappings + (basically Cv with row i, col j removed) + list of (x, y): indices of edge mappings g<->h + Ce_xy: reduced CostMatrix of pending edge mappings + (basically Ce with rows x, cols y removed) + cost: total cost of edit operation + NOTE: most promising ops first + """ + m = len(pending_u) + n = len(pending_v) + # assert Cv.C.shape == (m + n, m + n) + + # 1) a vertex mapping from optimal linear sum assignment + i, j = min( + (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n + ) + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls): + pass + else: + # get reduced Cv efficiently + Cv_ij = CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + reduce_ind(Cv.lsa_row_ind, (i, m + j)), + reduce_ind(Cv.lsa_col_ind, (j, n + i)), + Cv.ls - Cv.C[i, j], + ) + yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls + + # 2) other candidates, sorted by lower-bound cost estimate + other = [] + fixed_i, fixed_j = i, j + if m <= n: + candidates = ( + (t, fixed_j) + for t in range(m + n) + if t != fixed_i and (t < m or t == m + fixed_j) + ) + else: + candidates = ( + (fixed_i, t) + for t in range(m + n) + if t != fixed_j and (t < n or t == n + fixed_i) + ) + for i, j in candidates: + if prune(matched_cost + Cv.C[i, j] + Ce.ls): + continue + Cv_ij = make_CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + m - 1 if i < m else m, + n - 1 if j < n else n, + ) + # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls): + continue + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls): + continue + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls): + continue + other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls)) + + yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls) + + def get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv, + matched_gh, + pending_g, + pending_h, + Ce, + matched_cost, + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + matched_gh: partial edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of (vertex_path, edge_path, cost) + vertex_path: complete vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + edge_path: complete edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + cost: total cost of edit path + NOTE: path costs are non-increasing + """ + # debug_print('matched-uv:', matched_uv) + # debug_print('matched-gh:', matched_gh) + # debug_print('matched-cost:', matched_cost) + # debug_print('pending-u:', pending_u) + # debug_print('pending-v:', pending_v) + # debug_print(Cv.C) + # assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u)) + # assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v)) + # debug_print('pending-g:', pending_g) + # debug_print('pending-h:', pending_h) + # debug_print(Ce.C) + # assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g)) + # assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h)) + # debug_print() + + if prune(matched_cost + Cv.ls + Ce.ls): + return + + if not max(len(pending_u), len(pending_v)): + # assert not len(pending_g) + # assert not len(pending_h) + # path completed! + # assert matched_cost <= maxcost_value + nonlocal maxcost_value + maxcost_value = min(maxcost_value, matched_cost) + yield matched_uv, matched_gh, matched_cost + + else: + edit_ops = get_edit_ops( + matched_uv, + pending_u, + pending_v, + Cv, + pending_g, + pending_h, + Ce, + matched_cost, + ) + for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops: + i, j = ij + # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost + if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls): + continue + + # dive deeper + u = pending_u.pop(i) if i < len(pending_u) else None + v = pending_v.pop(j) if j < len(pending_v) else None + matched_uv.append((u, v)) + for x, y in xy: + len_g = len(pending_g) + len_h = len(pending_h) + matched_gh.append( + ( + pending_g[x] if x < len_g else None, + pending_h[y] if y < len_h else None, + ) + ) + sortedx = sorted(x for x, y in xy) + sortedy = sorted(y for x, y in xy) + G = [ + (pending_g.pop(x) if x < len(pending_g) else None) + for x in reversed(sortedx) + ] + H = [ + (pending_h.pop(y) if y < len(pending_h) else None) + for y in reversed(sortedy) + ] + + yield from get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv_ij, + matched_gh, + pending_g, + pending_h, + Ce_xy, + matched_cost + edit_cost, + ) + + # backtrack + if u is not None: + pending_u.insert(i, u) + if v is not None: + pending_v.insert(j, v) + matched_uv.pop() + for x, g in zip(sortedx, reversed(G)): + if g is not None: + pending_g.insert(x, g) + for y, h in zip(sortedy, reversed(H)): + if h is not None: + pending_h.insert(y, h) + for _ in xy: + matched_gh.pop() + + # Initialization + + pending_u = list(G1.nodes) + pending_v = list(G2.nodes) + + initial_cost = 0 + if roots: + root_u, root_v = roots + if root_u not in pending_u or root_v not in pending_v: + raise nx.NodeNotFound("Root node not in graph.") + + # remove roots from pending + pending_u.remove(root_u) + pending_v.remove(root_v) + + # cost matrix of vertex mappings + m = len(pending_u) + n = len(pending_v) + C = np.zeros((m + n, m + n)) + if node_subst_cost: + C[0:m, 0:n] = np.array( + [ + node_subst_cost(G1.nodes[u], G2.nodes[v]) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v]) + elif node_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(node_match(G1.nodes[u], G2.nodes[v])) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v]) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if node_del_cost: + del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u] + else: + del_costs = [1] * len(pending_u) + # assert not m or min(del_costs) >= 0 + if node_ins_cost: + ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v] + else: + ins_costs = [1] * len(pending_v) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Cv = make_CostMatrix(C, m, n) + # debug_print(f"Cv: {m} x {n}") + # debug_print(Cv.C) + + pending_g = list(G1.edges) + pending_h = list(G2.edges) + + # cost matrix of edge mappings + m = len(pending_g) + n = len(pending_h) + C = np.zeros((m + n, m + n)) + if edge_subst_cost: + C[0:m, 0:n] = np.array( + [ + edge_subst_cost(G1.edges[g], G2.edges[h]) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + elif edge_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(edge_match(G1.edges[g], G2.edges[h])) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if edge_del_cost: + del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g] + else: + del_costs = [1] * len(pending_g) + # assert not m or min(del_costs) >= 0 + if edge_ins_cost: + ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h] + else: + ins_costs = [1] * len(pending_h) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Ce = make_CostMatrix(C, m, n) + # debug_print(f'Ce: {m} x {n}') + # debug_print(Ce.C) + # debug_print() + + maxcost_value = Cv.C.sum() + Ce.C.sum() + 1 + + if timeout is not None: + if timeout <= 0: + raise nx.NetworkXError("Timeout value must be greater than 0") + start = time.perf_counter() + + def prune(cost): + if timeout is not None: + if time.perf_counter() - start > timeout: + return True + if upper_bound is not None: + if cost > upper_bound: + return True + if cost > maxcost_value: + return True + if strictly_decreasing and cost >= maxcost_value: + return True + return False + + # Now go! + + done_uv = [] if roots is None else [roots] + + for vertex_path, edge_path, cost in get_edit_paths( + done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost + ): + # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None) + # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None) + # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None) + # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None) + # print(vertex_path, edge_path, cost, file = sys.stderr) + # assert cost == maxcost_value + yield list(vertex_path), list(edge_path), cost + + +@nx._dispatch +def simrank_similarity( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + SimRank is a similarity metric that says "two objects are considered + to be similar if they are referenced by similar objects." [1]_. + + The pseudo-code definition from the paper is:: + + def simrank(G, u, v): + in_neighbors_u = G.predecessors(u) + in_neighbors_v = G.predecessors(v) + scale = C / (len(in_neighbors_u) * len(in_neighbors_v)) + return scale * sum(simrank(G, w, x) + for w, x in product(in_neighbors_u, + in_neighbors_v)) + + where ``G`` is the graph, ``u`` is the source, ``v`` is the target, + and ``C`` is a float decay or importance factor between 0 and 1. + + The SimRank algorithm for determining node similarity is defined in + [2]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : dictionary or float + If ``source`` and ``target`` are both ``None``, this returns a + dictionary of dictionaries, where keys are node pairs and value + are similarity of the pair of nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns a + dictionary mapping node to the similarity of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.simrank_similarity(G) + {0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}} + >>> nx.simrank_similarity(G, source=0) + {0: 1.0, 1: 0.0} + >>> nx.simrank_similarity(G, source=0, target=0) + 1.0 + + The result of this function can be converted to a numpy array + representing the SimRank matrix by using the node order of the + graph to determine which row and column represent each node. + Other ordering of nodes is also possible. + + >>> import numpy as np + >>> sim = nx.simrank_similarity(G) + >>> np.array([[sim[u][v] for v in G] for u in G]) + array([[1., 0.], + [0., 1.]]) + >>> sim_1d = nx.simrank_similarity(G, source=0) + >>> np.array([sim[0][v] for v in G]) + array([1., 0.]) + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/SimRank + .. [2] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + import numpy as np + + nodelist = list(G) + s_indx = None if source is None else nodelist.index(source) + t_indx = None if target is None else nodelist.index(target) + + x = _simrank_similarity_numpy( + G, s_indx, t_indx, importance_factor, max_iterations, tolerance + ) + + if isinstance(x, np.ndarray): + if x.ndim == 1: + return dict(zip(G, x)) + # else x.ndim == 2 + return {u: dict(zip(G, row)) for u, row in zip(G, x)} + return x + + +def _simrank_similarity_python( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + This pure Python version is provided for pedagogical purposes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_python(G) + {0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}} + >>> nx.similarity._simrank_similarity_python(G, source=0) + {0: 1, 1: 0.0} + >>> nx.similarity._simrank_similarity_python(G, source=0, target=0) + 1 + """ + # build up our similarity adjacency dictionary output + newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G} + + # These functions compute the update to the similarity value of the nodes + # `u` and `v` with respect to the previous similarity values. + def avg_sim(s): + return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0 + + Gadj = G.pred if G.is_directed() else G.adj + + def sim(u, v): + return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v]))) + + for its in range(max_iterations): + oldsim = newsim + newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G} + is_close = all( + all( + abs(newsim[u][v] - old) <= tolerance * (1 + abs(old)) + for v, old in nbrs.items() + ) + for u, nbrs in oldsim.items() + ) + if is_close: + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source][target] + if source is not None: + return newsim[source] + return newsim + + +def _simrank_similarity_numpy( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``. + + The SimRank algorithm for determining node similarity is defined in + [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : numpy array or float + If ``source`` and ``target`` are both ``None``, this returns a + 2D array containing SimRank scores of the nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns an + 1D array containing SimRank scores of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_numpy(G) + array([[1., 0.], + [0., 1.]]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0) + array([1., 0.]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + 1.0 + + References + ---------- + .. [1] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + # This algorithm follows roughly + # + # S = max{C * (A.T * S * A), I} + # + # where C is the importance factor, A is the column normalized + # adjacency matrix, and I is the identity matrix. + import numpy as np + + adjacency_matrix = nx.to_numpy_array(G) + + # column-normalize the ``adjacency_matrix`` + s = np.array(adjacency_matrix.sum(axis=0)) + s[s == 0] = 1 + adjacency_matrix /= s # adjacency_matrix.sum(axis=0) + + newsim = np.eye(len(G), dtype=np.float64) + for its in range(max_iterations): + prevsim = newsim.copy() + newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix) + np.fill_diagonal(newsim, 1.0) + + if np.allclose(prevsim, newsim, atol=tolerance): + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source, target] + if source is not None: + return newsim[source] + return newsim + + +@nx._dispatch(edge_attrs="weight") +def panther_similarity( + G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight" +): + r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``. + + Panther is a similarity metric that says "two objects are considered + to be similar if they frequently appear on the same paths." [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + source : node + Source node for which to find the top `k` similar other nodes + k : int (default = 5) + The number of most similar nodes to return + path_length : int (default = 5) + How long the randomly generated paths should be (``T`` in [1]_) + c : float (default = 0.5) + A universal positive constant used to scale the number + of sample random paths to generate. + delta : float (default = 0.1) + The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), + where $R$ is the number of random paths and $\phi$ is the probability + that an element sampled from a set $A \subseteq D$, where $D$ is the domain. + eps : float or None (default = None) + The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, + if no value is provided, the recommended computed value will be used. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + similarity : dictionary + Dictionary of nodes to similarity scores (as floats). Note: + the self-similarity (i.e., ``v``) will not be included in + the returned dictionary. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> sim = nx.panther_similarity(G, 0) + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + num_nodes = G.number_of_nodes() + if num_nodes < k: + warnings.warn( + f"Number of nodes is {num_nodes}, but requested k is {k}. " + "Setting k to number of nodes." + ) + k = num_nodes + # According to [1], they empirically determined + # a good value for ``eps`` to be sqrt( 1 / |E| ) + if eps is None: + eps = np.sqrt(1.0 / G.number_of_edges()) + + inv_node_map = {name: index for index, name in enumerate(G.nodes)} + node_map = np.array(G) + + # Calculate the sample size ``R`` for how many paths + # to randomly generate + t_choose_2 = math.comb(path_length, 2) + sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) + index_map = {} + _ = list( + generate_random_paths( + G, sample_size, path_length=path_length, index_map=index_map, weight=weight + ) + ) + S = np.zeros(num_nodes) + + inv_sample_size = 1 / sample_size + + source_paths = set(index_map[source]) + + # Calculate the path similarities + # between ``source`` (v) and ``node`` (v_j) + # using our inverted index mapping of + # vertices to paths + for node, paths in index_map.items(): + # Only consider paths where both + # ``node`` and ``source`` are present + common_paths = source_paths.intersection(paths) + S[inv_node_map[node]] = len(common_paths) * inv_sample_size + + # Retrieve top ``k`` similar + # Note: the below performed anywhere from 4-10x faster + # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` + top_k_unsorted = np.argpartition(S, -k)[-k:] + top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] + + # Add back the similarity scores + top_k_sorted_names = (node_map[n] for n in top_k_sorted) + top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted])) + + # Remove the self-similarity + top_k_with_val.pop(source, None) + return top_k_with_val + + +@nx._dispatch(edge_attrs="weight") +def generate_random_paths( + G, sample_size, path_length=5, index_map=None, weight="weight" +): + """Randomly generate `sample_size` paths of length `path_length`. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + sample_size : integer + The number of paths to generate. This is ``R`` in [1]_. + path_length : integer (default = 5) + The maximum size of the path to randomly generate. + This is ``T`` in [1]_. According to the paper, ``T >= 5`` is + recommended. + index_map : dictionary, optional + If provided, this will be populated with the inverted + index of nodes mapped to the set of generated random path + indices within ``paths``. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + paths : generator of lists + Generator of `sample_size` paths each with length `path_length`. + + Examples + -------- + Note that the return value is the list of paths: + + >>> G = nx.star_graph(3) + >>> random_path = nx.generate_random_paths(G, 2) + + By passing a dictionary into `index_map`, it will build an + inverted index mapping of nodes to the paths in which that node is present: + + >>> G = nx.star_graph(3) + >>> index_map = {} + >>> random_path = nx.generate_random_paths(G, 3, index_map=index_map) + >>> paths_containing_node_0 = [random_path[path_idx] for path_idx in index_map.get(0, [])] + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + # Calculate transition probabilities between + # every pair of vertices according to Eq. (3) + adj_mat = nx.to_numpy_array(G, weight=weight) + inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1) + transition_probabilities = adj_mat * inv_row_sums + + node_map = np.array(G) + num_nodes = G.number_of_nodes() + + for path_index in range(sample_size): + # Sample current vertex v = v_i uniformly at random + node_index = np.random.randint(0, high=num_nodes) + node = node_map[node_index] + + # Add v into p_r and add p_r into the path set + # of v, i.e., P_v + path = [node] + + # Build the inverted index (P_v) of vertices to paths + if index_map is not None: + if node in index_map: + index_map[node].add(path_index) + else: + index_map[node] = {path_index} + + starting_index = node_index + for _ in range(path_length): + # Randomly sample a neighbor (v_j) according + # to transition probabilities from ``node`` (v) to its neighbors + neighbor_index = np.random.choice( + num_nodes, p=transition_probabilities[starting_index] + ) + + # Set current vertex (v = v_j) + starting_index = neighbor_index + + # Add v into p_r + neighbor_node = node_map[neighbor_index] + path.append(neighbor_node) + + # Add p_r into P_v + if index_map is not None: + if neighbor_node in index_map: + index_map[neighbor_node].add(path_index) + else: + index_map[neighbor_node] = {path_index} + + yield path diff --git a/phivenv/Lib/site-packages/networkx/algorithms/simple_paths.py b/phivenv/Lib/site-packages/networkx/algorithms/simple_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..cb288a67c3bf2e073236cae78b02c0e9d266baae --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/simple_paths.py @@ -0,0 +1,978 @@ +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "all_simple_paths", + "is_simple_path", + "shortest_simple_paths", + "all_simple_edge_paths", +] + + +@nx._dispatch +def is_simple_path(G, nodes): + """Returns True if and only if `nodes` form a simple path in `G`. + + A *simple path* in a graph is a nonempty sequence of nodes in which + no node appears more than once in the sequence, and each adjacent + pair of nodes in the sequence is adjacent in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. + nodes : list + A list of one or more nodes in the graph `G`. + + Returns + ------- + bool + Whether the given list of nodes represents a simple path in `G`. + + Notes + ----- + An empty list of nodes is not a path but a list of one node is a + path. Here's an explanation why. + + This function operates on *node paths*. One could also consider + *edge paths*. There is a bijection between node paths and edge + paths. + + The *length of a path* is the number of edges in the path, so a list + of nodes of length *n* corresponds to a path of length *n* - 1. + Thus the smallest edge path would be a list of zero edges, the empty + path. This corresponds to a list of one node. + + To convert between a node path and an edge path, you can use code + like the following:: + + >>> from networkx.utils import pairwise + >>> nodes = [0, 1, 2, 3] + >>> edges = list(pairwise(nodes)) + >>> edges + [(0, 1), (1, 2), (2, 3)] + >>> nodes = [edges[0][0]] + [v for u, v in edges] + >>> nodes + [0, 1, 2, 3] + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.is_simple_path(G, [2, 3, 0]) + True + >>> nx.is_simple_path(G, [0, 2]) + False + + """ + # The empty list is not a valid path. Could also return + # NetworkXPointlessConcept here. + if len(nodes) == 0: + return False + + # If the list is a single node, just check that the node is actually + # in the graph. + if len(nodes) == 1: + return nodes[0] in G + + # check that all nodes in the list are in the graph, if at least one + # is not in the graph, then this is not a simple path + if not all(n in G for n in nodes): + return False + + # If the list contains repeated nodes, then it's not a simple path + if len(set(nodes)) != len(nodes): + return False + + # Test that each adjacent pair of nodes is adjacent. + return all(v in G[u] for u, v in pairwise(nodes)) + + +@nx._dispatch +def all_simple_paths(G, source, target, cutoff=None): + """Generate all simple paths in the graph G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. If it is possible to traverse the same sequence of + nodes in multiple ways, namely through parallel edges, then it will be + returned multiple times (once for each viable edge combination). + + Examples + -------- + This iterator generates lists of nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=3): + ... print(path) + ... + [0, 1, 2, 3] + [0, 1, 3] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + + You can generate only those paths that are shorter than a certain + length by using the `cutoff` keyword argument:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2) + >>> print(list(paths)) + [[0, 1, 3], [0, 2, 3], [0, 3]] + + To get each path as the corresponding list of edges, you can use the + :func:`networkx.utils.pairwise` helper function:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3) + >>> for path in map(nx.utils.pairwise, paths): + ... print(list(path)) + [(0, 1), (1, 2), (2, 3)] + [(0, 1), (1, 3)] + [(0, 2), (2, 1), (1, 3)] + [(0, 2), (2, 3)] + [(0, 3)] + + Pass an iterable of nodes as target to generate all paths ending in any of several nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]): + ... print(path) + ... + [0, 1, 2] + [0, 1, 2, 3] + [0, 1, 3] + [0, 1, 3, 2] + [0, 2] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + [0, 3, 1, 2] + [0, 3, 2] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph using a functional programming approach:: + + >>> from itertools import chain + >>> from itertools import product + >>> from itertools import starmap + >>> from functools import partial + >>> + >>> chaini = chain.from_iterable + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = partial(nx.all_simple_paths, G) + >>> list(chaini(starmap(all_paths, product(roots, leaves)))) + [[0, 1, 2], [0, 3, 2]] + + The same list computed using an iterative approach:: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = [] + >>> for root in roots: + ... for leaf in leaves: + ... paths = nx.all_simple_paths(G, root, leaf) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 2], [0, 3, 2]] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph passing all leaves together to avoid unnecessary + compute:: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = [v for v, d in G.out_degree() if d == 0] + >>> all_paths = [] + >>> for root in roots: + ... paths = nx.all_simple_paths(G, root, leaves) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]] + + If parallel edges offer multiple ways to traverse a given sequence of + nodes, this sequence of nodes will be returned multiple times: + + >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)]) + >>> list(nx.all_simple_paths(G, 0, 2)) + [[0, 1, 2], [0, 1, 2]] + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + This function does not check that a path exists between `source` and + `target`. For large graphs, this may result in very long runtimes. + Consider using `has_path` to check that a path exists between `source` and + `target` before calling this function on large graphs. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, has_path + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError as err: + raise nx.NodeNotFound(f"target node {target} not in graph") from err + if source in targets: + return _empty_generator() + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return _empty_generator() + if G.is_multigraph(): + return _all_simple_paths_multigraph(G, source, targets, cutoff) + else: + return _all_simple_paths_graph(G, source, targets, cutoff) + + +def _empty_generator(): + yield from () + + +def _all_simple_paths_graph(G, source, targets, cutoff): + visited = {source: True} + stack = [iter(G[source])] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = True + if targets - set(visited.keys()): # expand stack until find all targets + stack.append(iter(G[child])) + else: + visited.popitem() # maybe other ways to child + else: # len(visited) == cutoff: + for target in (targets & (set(children) | {child})) - set(visited.keys()): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +def _all_simple_paths_multigraph(G, source, targets, cutoff): + visited = {source: True} + stack = [(v for u, v in G.edges(source))] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = True + if targets - set(visited.keys()): + stack.append((v for u, v in G.edges(child))) + else: + visited.popitem() + else: # len(visited) == cutoff: + for target in targets - set(visited.keys()): + count = ([child] + list(children)).count(target) + for i in range(count): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +@nx._dispatch +def all_simple_edge_paths(G, source, target, cutoff=None): + """Generate lists of edges for all simple paths in G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. + For multigraphs, the list of edges have elements of the form `(u,v,k)`. + Where `k` corresponds to the edge key. + + Examples + -------- + + Print the simple path edges of a Graph:: + + >>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)]) + >>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)): + ... print(path) + [(1, 2), (2, 4)] + [(1, 3), (3, 4)] + + Print the simple path edges of a MultiGraph. Returned edges come with + their associated keys:: + + >>> mg = nx.MultiGraph() + >>> mg.add_edge(1, 2, key="k0") + 'k0' + >>> mg.add_edge(1, 2, key="k1") + 'k1' + >>> mg.add_edge(2, 3, key="k0") + 'k0' + >>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)): + ... print(path) + [(1, 2, 'k0'), (2, 3, 'k0')] + [(1, 2, 'k1'), (2, 3, 'k0')] + + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, all_simple_paths + + """ + if source not in G: + raise nx.NodeNotFound("source node %s not in graph" % source) + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError: + raise nx.NodeNotFound("target node %s not in graph" % target) + if source in targets: + return [] + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return [] + if G.is_multigraph(): + for simp_path in _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + yield simp_path + else: + for simp_path in _all_simple_paths_graph(G, source, targets, cutoff): + yield list(zip(simp_path[:-1], simp_path[1:])) + + +def _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + if not cutoff or cutoff < 1: + return [] + visited = [source] + stack = [iter(G.edges(source, keys=True))] + + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.pop() + elif len(visited) < cutoff: + if child[1] in targets: + yield visited[1:] + [child] + elif child[1] not in [v[0] for v in visited[1:]]: + visited.append(child) + stack.append(iter(G.edges(child[1], keys=True))) + else: # len(visited) == cutoff: + for u, v, k in [child] + list(children): + if v in targets: + yield visited[1:] + [(u, v, k)] + stack.pop() + visited.pop() + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def shortest_simple_paths(G, source, target, weight=None): + """Generate all simple paths in the graph G from source to target, + starting from shortest ones. + + A simple path is a path with no repeated nodes. + + If a weighted shortest path search is to be used, no negative weights + are allowed. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + weight : string or function + If it is a string, it is the name of the edge attribute to be + used as a weight. + + If it is a function, the weight of an edge is the value returned + by the function. The function must accept exactly three positional + arguments: the two endpoints of an edge and the dictionary of edge + attributes for that edge. The function must return a number. + + If None all edges are considered to have unit weight. Default + value None. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths, in order from + shortest to longest. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + NetworkXError + If source or target nodes are not in the input graph. + + NetworkXNotImplemented + If the input graph is a Multi[Di]Graph. + + Examples + -------- + + >>> G = nx.cycle_graph(7) + >>> paths = list(nx.shortest_simple_paths(G, 0, 3)) + >>> print(paths) + [[0, 1, 2, 3], [0, 6, 5, 4, 3]] + + You can use this function to efficiently compute the k shortest/best + paths between two nodes. + + >>> from itertools import islice + >>> def k_shortest_paths(G, source, target, k, weight=None): + ... return list( + ... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k) + ... ) + >>> for path in k_shortest_paths(G, 0, 3, 2): + ... print(path) + [0, 1, 2, 3] + [0, 6, 5, 4, 3] + + Notes + ----- + This procedure is based on algorithm by Jin Y. Yen [1]_. Finding + the first $K$ paths requires $O(KN^3)$ operations. + + See Also + -------- + all_shortest_paths + shortest_path + all_simple_paths + + References + ---------- + .. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a + Network", Management Science, Vol. 17, No. 11, Theory Series + (Jul., 1971), pp. 712-716. + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + + if target not in G: + raise nx.NodeNotFound(f"target node {target} not in graph") + + if weight is None: + length_func = len + shortest_path_func = _bidirectional_shortest_path + else: + wt = _weight_function(G, weight) + + def length_func(path): + return sum( + wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:]) + ) + + shortest_path_func = _bidirectional_dijkstra + + listA = [] + listB = PathBuffer() + prev_path = None + while True: + if not prev_path: + length, path = shortest_path_func(G, source, target, weight=weight) + listB.push(length, path) + else: + ignore_nodes = set() + ignore_edges = set() + for i in range(1, len(prev_path)): + root = prev_path[:i] + root_length = length_func(root) + for path in listA: + if path[:i] == root: + ignore_edges.add((path[i - 1], path[i])) + try: + length, spur = shortest_path_func( + G, + root[-1], + target, + ignore_nodes=ignore_nodes, + ignore_edges=ignore_edges, + weight=weight, + ) + path = root[:-1] + spur + listB.push(root_length + length, path) + except nx.NetworkXNoPath: + pass + ignore_nodes.add(root[-1]) + + if listB: + path = listB.pop() + yield path + listA.append(path) + prev_path = path + else: + break + + +class PathBuffer: + def __init__(self): + self.paths = set() + self.sortedpaths = [] + self.counter = count() + + def __len__(self): + return len(self.sortedpaths) + + def push(self, cost, path): + hashable_path = tuple(path) + if hashable_path not in self.paths: + heappush(self.sortedpaths, (cost, next(self.counter), path)) + self.paths.add(hashable_path) + + def pop(self): + (cost, num, path) = heappop(self.sortedpaths) + hashable_path = tuple(path) + self.paths.remove(hashable_path) + return path + + +def _bidirectional_shortest_path( + G, source, target, ignore_nodes=None, ignore_edges=None, weight=None +): + """Returns the shortest path between source and target ignoring + nodes and edges in the containers ignore_nodes and ignore_edges. + + This is a custom modification of the standard bidirectional shortest + path implementation at networkx.algorithms.unweighted + + Parameters + ---------- + G : NetworkX graph + + source : node + starting node for path + + target : node + ending node for path + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + weight : None + This function accepts a weight argument for convenience of + shortest_simple_paths function. It will be ignored. + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + shortest_path + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from w to target + while w is not None: + path.append(w) + w = succ[w] + # from source to w + w = pred[path[0]] + while w is not None: + path.insert(0, w) + w = pred[w] + + return len(path), path + + +def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None): + """Bidirectional shortest path helper. + Returns (pred,succ,w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + # found path + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def _bidirectional_dijkstra( + G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None +): + """Dijkstra's algorithm for shortest paths using bidirectional search. + + This function returns the shortest path between source and target + ignoring nodes and edges in the containers ignore_nodes and + ignore_edges. + + This is a custom modification of the standard Dijkstra bidirectional + shortest path implementation at networkx.algorithms.weighted + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight: string, function, optional (default='weight') + Edge data key or weight function corresponding to the edge weight + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + Returns + ------- + length : number + Shortest path length. + + Returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from the source. + The second stores the path from the source to that node. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is pi*r*r while the + others are 2*pi*r/2*r/2, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not in graph") + return (0, [source]) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + push = heappush + pop = heappop + # Init: Forward Backward + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) tuples for + # extracting next node to expand + seen = [{source: 0}, {target: 0}] # dictionary of distances to + # nodes seen + c = count() + # initialize fringe heap + push(fringe[0], (0, next(c), source)) + push(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + neighs = [Gsucc, Gpred] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = pop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + wt = _weight_function(G, weight) + for w in neighs[dir](v): + if dir == 0: # forward + minweight = wt(v, w, G.get_edge_data(v, w)) + vwLength = dists[dir][v] + minweight + else: # back, must remember to change v,w->w,v + minweight = wt(w, v, G.get_edge_data(w, v)) + vwLength = dists[dir][v] + minweight + + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + push(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/phivenv/Lib/site-packages/networkx/algorithms/smallworld.py b/phivenv/Lib/site-packages/networkx/algorithms/smallworld.py new file mode 100644 index 0000000000000000000000000000000000000000..172c4f9a879a62b5657f8b347dd1d4d6c6cc295b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/smallworld.py @@ -0,0 +1,403 @@ +"""Functions for estimating the small-world-ness of graphs. + +A small world network is characterized by a small average shortest path length, +and a large clustering coefficient. + +Small-worldness is commonly measured with the coefficient sigma or omega. + +Both coefficients compare the average clustering coefficient and shortest path +length of a given graph against the same quantities for an equivalent random +or lattice graph. + +For more information, see the Wikipedia article on small-world network [1]_. + +.. [1] Small-world network:: https://en.wikipedia.org/wiki/Small-world_network + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["random_reference", "lattice_reference", "sigma", "omega"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def random_reference(G, niter=1, connectivity=True, seed=None): + """Compute a random graph by swapping edges of a given graph. + + Parameters + ---------- + G : graph + An undirected graph with 4 or more nodes. + + niter : integer (optional, default=1) + An edge is rewired approximately `niter` times. + + connectivity : boolean (optional, default=True) + When True, ensure connectivity for the randomized graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The randomized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Maslov and Sneppen + (2002) [1]_. + + References + ---------- + .. [1] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + nnodes = len(G) + nedges = nx.number_of_edges(G) + niter = niter * nedges + ntries = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + swapcount = 0 + + for i in range(niter): + n = 0 + while n < ntries: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + swapcount += 1 + break + n += 1 + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(4) +@nx._dispatch +def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): + """Latticize the given graph by swapping edges. + + Parameters + ---------- + G : graph + An undirected graph. + + niter : integer (optional, default=1) + An edge is rewired approximately niter times. + + D : numpy.array (optional, default=None) + Distance to the diagonal matrix. + + connectivity : boolean (optional, default=True) + Ensure connectivity for the latticized graph when set to True. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The latticized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Sporns et al. [1]_. + which is inspired from the original work by Maslov and Sneppen(2002) [2]_. + + References + ---------- + .. [1] Sporns, Olaf, and Jonathan D. Zwi. + "The small world of the cerebral cortex." + Neuroinformatics 2.2 (2004): 145-162. + .. [2] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + import numpy as np + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + + nnodes = len(G) + nedges = nx.number_of_edges(G) + if D is None: + D = np.zeros((nnodes, nnodes)) + un = np.arange(1, nnodes) + um = np.arange(nnodes - 1, 0, -1) + u = np.append((0,), np.where(un < um, un, um)) + + for v in range(int(np.ceil(nnodes / 2))): + D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) + D[v, :] = D[nnodes - v - 1, :][::-1] + + niter = niter * nedges + # maximal number of rewiring attempts per 'niter' + max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + + for _ in range(niter): + n = 0 + while n < max_attempts: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + bi = keys.index(b) + di = keys.index(d) + + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: + # only swap if we get closer to the diagonal + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + break + n += 1 + + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def sigma(G, niter=100, nrand=10, seed=None): + """Returns the small-world coefficient (sigma) of the given graph. + + The small-world coefficient is defined as: + sigma = C/Cr / L/Lr + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Cr and Lr are respectively the average + clustering coefficient and average shortest path length of an equivalent + random graph. + + A graph is commonly classified as small-world if sigma>1. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + niter : integer (optional, default=100) + Approximate number of rewiring per edge to compute the equivalent + random graph. + nrand : integer (optional, default=10) + Number of random graphs generated to compute the average clustering + coefficient (Cr) and average shortest path length (Lr). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + sigma : float + The small-world coefficient of G. + + Notes + ----- + The implementation is adapted from Humphries et al. [1]_ [2]_. + + References + ---------- + .. [1] The brainstem reticular formation is a small-world, not scale-free, + network M. D. Humphries, K. Gurney and T. J. Prescott, + Proc. Roy. Soc. B 2006 273, 503-511, doi:10.1098/rspb.2005.3354. + .. [2] Humphries and Gurney (2008). + "Network 'Small-World-Ness': A Quantitative Method for Determining + Canonical Network Equivalence". + PLoS One. 3 (4). PMID 18446219. doi:10.1371/journal.pone.0002051. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + for i in range(nrand): + Gr = random_reference(G, niter=niter, seed=seed) + randMetrics["C"].append(nx.transitivity(Gr)) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + C = nx.transitivity(G) + L = nx.average_shortest_path_length(G) + Cr = np.mean(randMetrics["C"]) + Lr = np.mean(randMetrics["L"]) + + sigma = (C / Cr) / (L / Lr) + + return sigma + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def omega(G, niter=5, nrand=10, seed=None): + """Returns the small-world coefficient (omega) of a graph + + The small-world coefficient of a graph G is: + + omega = Lr/L - C/Cl + + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Lr is the average shortest path length + of an equivalent random graph and Cl is the average clustering coefficient + of an equivalent lattice graph. + + The small-world coefficient (omega) measures how much G is like a lattice + or a random graph. Negative values mean G is similar to a lattice whereas + positive values mean G is a random graph. + Values close to 0 mean that G has small-world characteristics. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + niter: integer (optional, default=5) + Approximate number of rewiring per edge to compute the equivalent + random graph. + + nrand: integer (optional, default=10) + Number of random graphs generated to compute the maximal clustering + coefficient (Cr) and average shortest path length (Lr). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + + Returns + ------- + omega : float + The small-world coefficient (omega) + + Notes + ----- + The implementation is adapted from the algorithm by Telesford et al. [1]_. + + References + ---------- + .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011). + "The Ubiquity of Small-World Networks". + Brain Connectivity. 1 (0038): 367-75. PMC 3604768. PMID 22432451. + doi:10.1089/brain.2011.0038. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + + # Calculate initial average clustering coefficient which potentially will + # get replaced by higher clustering coefficients from generated lattice + # reference graphs + Cl = nx.average_clustering(G) + + niter_lattice_reference = niter + niter_random_reference = niter * 2 + + for _ in range(nrand): + # Generate random graph + Gr = random_reference(G, niter=niter_random_reference, seed=seed) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + # Generate lattice graph + Gl = lattice_reference(G, niter=niter_lattice_reference, seed=seed) + + # Replace old clustering coefficient, if clustering is higher in + # generated lattice reference + Cl_temp = nx.average_clustering(Gl) + if Cl_temp > Cl: + Cl = Cl_temp + + C = nx.average_clustering(G) + L = nx.average_shortest_path_length(G) + Lr = np.mean(randMetrics["L"]) + + omega = (Lr / L) - (C / Cl) + + return omega diff --git a/phivenv/Lib/site-packages/networkx/algorithms/smetric.py b/phivenv/Lib/site-packages/networkx/algorithms/smetric.py new file mode 100644 index 0000000000000000000000000000000000000000..80ae314bbdda4c101b15f0ef20983c4f1e259cb0 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/smetric.py @@ -0,0 +1,60 @@ +import networkx as nx + +__all__ = ["s_metric"] + + +@nx._dispatch +def s_metric(G, **kwargs): + """Returns the s-metric [1]_ of graph. + + The s-metric is defined as the sum of the products ``deg(u) * deg(v)`` + for every edge ``(u, v)`` in `G`. + + Parameters + ---------- + G : graph + The graph used to compute the s-metric. + normalized : bool (optional) + Normalize the value. + + .. deprecated:: 3.2 + + The `normalized` keyword argument is deprecated and will be removed + in the future + + Returns + ------- + s : float + The s-metric of the graph. + + References + ---------- + .. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger, + Towards a Theory of Scale-Free Graphs: + Definition, Properties, and Implications (Extended Version), 2005. + https://arxiv.org/abs/cond-mat/0501169 + """ + # NOTE: This entire code block + the **kwargs in the signature can all be + # removed when the deprecation expires. + # Normalized is always False, since all `normalized=True` did was raise + # a NotImplementedError + if kwargs: + # Warn for `normalize`, raise for any other kwarg + if "normalized" in kwargs: + import warnings + + warnings.warn( + "\n\nThe `normalized` keyword is deprecated and will be removed\n" + "in the future. To silence this warning, remove `normalized`\n" + "when calling `s_metric`.\n\n" + "The value of `normalized` is ignored.", + DeprecationWarning, + stacklevel=3, + ) + else: + # Typical raising behavior for Python when kwarg not recognized + raise TypeError( + f"s_metric got an unexpected keyword argument '{list(kwargs.keys())[0]}'" + ) + + return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges())) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/sparsifiers.py b/phivenv/Lib/site-packages/networkx/algorithms/sparsifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..a94aee0d09d65edb9591037bb7b9e3b59972d0d2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/sparsifiers.py @@ -0,0 +1,295 @@ +"""Functions for computing sparsifiers of graphs.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["spanner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch(edge_attrs="weight") +def spanner(G, stretch, weight=None, seed=None): + """Returns a spanner of the given graph with the given stretch. + + A spanner of a graph G = (V, E) with stretch t is a subgraph + H = (V, E_S) such that E_S is a subset of E and the distance between + any pair of nodes in H is at most t times the distance between the + nodes in G. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + stretch : float + The stretch of the spanner. + + weight : object + The edge attribute to use as distance. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A spanner of the given graph with the given stretch. + + Raises + ------ + ValueError + If a stretch less than 1 is given. + + Notes + ----- + This function implements the spanner algorithm by Baswana and Sen, + see [1]. + + This algorithm is a randomized las vegas algorithm: The expected + running time is O(km) where k = (stretch + 1) // 2 and m is the + number of edges in G. The returned graph is always a spanner of the + given graph with the specified stretch. For weighted graphs the + number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is + defined as above and n is the number of nodes in G. For unweighted + graphs the number of edges is O(n^(1 + 1 / k) + kn). + + References + ---------- + [1] S. Baswana, S. Sen. A Simple and Linear Time Randomized + Algorithm for Computing Sparse Spanners in Weighted Graphs. + Random Struct. Algorithms 30(4): 532-563 (2007). + """ + if stretch < 1: + raise ValueError("stretch must be at least 1") + + k = (stretch + 1) // 2 + + # initialize spanner H with empty edge set + H = nx.empty_graph() + H.add_nodes_from(G.nodes) + + # phase 1: forming the clusters + # the residual graph has V' from the paper as its node set + # and E' from the paper as its edge set + residual_graph = _setup_residual_graph(G, weight) + # clustering is a dictionary that maps nodes in a cluster to the + # cluster center + clustering = {v: v for v in G.nodes} + sample_prob = math.pow(G.number_of_nodes(), -1 / k) + size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k) + + i = 0 + while i < k - 1: + # step 1: sample centers + sampled_centers = set() + for center in set(clustering.values()): + if seed.random() < sample_prob: + sampled_centers.add(center) + + # combined loop for steps 2 and 3 + edges_to_add = set() + edges_to_remove = set() + new_clustering = {} + for v in residual_graph.nodes: + if clustering[v] in sampled_centers: + continue + + # step 2: find neighboring (sampled) clusters and + # lightest edges to them + lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts( + residual_graph, clustering, v + ) + neighboring_sampled_centers = ( + set(lightest_edge_weight.keys()) & sampled_centers + ) + + # step 3: add edges to spanner + if not neighboring_sampled_centers: + # connect to each neighboring center via lightest edge + for neighbor in lightest_edge_neighbor.values(): + edges_to_add.add((v, neighbor)) + # remove all incident edges + for neighbor in residual_graph.adj[v]: + edges_to_remove.add((v, neighbor)) + + else: # there is a neighboring sampled center + closest_center = min( + neighboring_sampled_centers, key=lightest_edge_weight.get + ) + closest_center_weight = lightest_edge_weight[closest_center] + closest_center_neighbor = lightest_edge_neighbor[closest_center] + + edges_to_add.add((v, closest_center_neighbor)) + new_clustering[v] = closest_center + + # connect to centers with edge weight less than + # closest_center_weight + for center, edge_weight in lightest_edge_weight.items(): + if edge_weight < closest_center_weight: + neighbor = lightest_edge_neighbor[center] + edges_to_add.add((v, neighbor)) + + # remove edges to centers with edge weight less than + # closest_center_weight + for neighbor in residual_graph.adj[v]: + neighbor_cluster = clustering[neighbor] + neighbor_weight = lightest_edge_weight[neighbor_cluster] + if ( + neighbor_cluster == closest_center + or neighbor_weight < closest_center_weight + ): + edges_to_remove.add((v, neighbor)) + + # check whether iteration added too many edges to spanner, + # if so repeat + if len(edges_to_add) > size_limit: + # an iteration is repeated O(1) times on expectation + continue + + # iteration succeeded + i = i + 1 + + # actually add edges to spanner + for u, v in edges_to_add: + _add_edge_to_spanner(H, residual_graph, u, v, weight) + + # actually delete edges from residual graph + residual_graph.remove_edges_from(edges_to_remove) + + # copy old clustering data to new_clustering + for node, center in clustering.items(): + if center in sampled_centers: + new_clustering[node] = center + clustering = new_clustering + + # step 4: remove intra-cluster edges + for u in residual_graph.nodes: + for v in list(residual_graph.adj[u]): + if clustering[u] == clustering[v]: + residual_graph.remove_edge(u, v) + + # update residual graph node set + for v in list(residual_graph.nodes): + if v not in clustering: + residual_graph.remove_node(v) + + # phase 2: vertex-cluster joining + for v in residual_graph.nodes: + lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v) + for neighbor in lightest_edge_neighbor.values(): + _add_edge_to_spanner(H, residual_graph, v, neighbor, weight) + + return H + + +def _setup_residual_graph(G, weight): + """Setup residual graph as a copy of G with unique edges weights. + + The node set of the residual graph corresponds to the set V' from + the Baswana-Sen paper and the edge set corresponds to the set E' + from the paper. + + This function associates distinct weights to the edges of the + residual graph (even for unweighted input graphs), as required by + the algorithm. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + weight : object + The edge attribute to use as distance. + + Returns + ------- + NetworkX graph + The residual graph used for the Baswana-Sen algorithm. + """ + residual_graph = G.copy() + + # establish unique edge weights, even for unweighted graphs + for u, v in G.edges(): + if not weight: + residual_graph[u][v]["weight"] = (id(u), id(v)) + else: + residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v)) + + return residual_graph + + +def _lightest_edge_dicts(residual_graph, clustering, node): + """Find the lightest edge to each cluster. + + Searches for the minimum-weight edge to each cluster adjacent to + the given node. + + Parameters + ---------- + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. + + clustering : dictionary + The current clustering of the nodes. + + node : node + The node from which the search originates. + + Returns + ------- + lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary + lightest_edge_neighbor is a dictionary that maps a center C to + a node v in the corresponding cluster such that the edge from + the given node to v is the lightest edge from the given node to + any node in cluster. lightest_edge_weight maps a center C to the + weight of the aforementioned edge. + + Notes + ----- + If a cluster has no node that is adjacent to the given node in the + residual graph then the center of the cluster is not a key in the + returned dictionaries. + """ + lightest_edge_neighbor = {} + lightest_edge_weight = {} + for neighbor in residual_graph.adj[node]: + neighbor_center = clustering[neighbor] + weight = residual_graph[node][neighbor]["weight"] + if ( + neighbor_center not in lightest_edge_weight + or weight < lightest_edge_weight[neighbor_center] + ): + lightest_edge_neighbor[neighbor_center] = neighbor + lightest_edge_weight[neighbor_center] = weight + return lightest_edge_neighbor, lightest_edge_weight + + +def _add_edge_to_spanner(H, residual_graph, u, v, weight): + """Add the edge {u, v} to the spanner H and take weight from + the residual graph. + + Parameters + ---------- + H : NetworkX graph + The spanner under construction. + + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. The weight + for the edge is taken from this graph. + + u : node + One endpoint of the edge. + + v : node + The other endpoint of the edge. + + weight : object + The edge attribute to use as distance. + """ + H.add_edge(u, v) + if weight: + H[u][v][weight] = residual_graph[u][v]["weight"][0] diff --git a/phivenv/Lib/site-packages/networkx/algorithms/structuralholes.py b/phivenv/Lib/site-packages/networkx/algorithms/structuralholes.py new file mode 100644 index 0000000000000000000000000000000000000000..c676177b38e584af178332cfdf65e0a8f0d3c7e4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/structuralholes.py @@ -0,0 +1,283 @@ +"""Functions for computing measures of structural holes.""" + +import networkx as nx + +__all__ = ["constraint", "local_constraint", "effective_size"] + + +@nx._dispatch(edge_attrs="weight") +def mutual_weight(G, u, v, weight=None): + """Returns the sum of the weights of the edge from `u` to `v` and + the edge from `v` to `u` in `G`. + + `weight` is the edge data key that represents the edge weight. If + the specified key is `None` or is not in the edge data for an edge, + that edge is assumed to have weight 1. + + Pre-conditions: `u` and `v` must both be in `G`. + + """ + try: + a_uv = G[u][v].get(weight, 1) + except KeyError: + a_uv = 0 + try: + a_vu = G[v][u].get(weight, 1) + except KeyError: + a_vu = 0 + return a_uv + a_vu + + +@nx._dispatch(edge_attrs="weight") +def normalized_mutual_weight(G, u, v, norm=sum, weight=None): + """Returns normalized mutual weight of the edges from `u` to `v` + with respect to the mutual weights of the neighbors of `u` in `G`. + + `norm` specifies how the normalization factor is computed. It must + be a function that takes a single argument and returns a number. + The argument will be an iterable of mutual weights + of pairs ``(u, w)``, where ``w`` ranges over each (in- and + out-)neighbor of ``u``. Commons values for `normalization` are + ``sum`` and ``max``. + + `weight` can be ``None`` or a string, if None, all edge weights + are considered equal. Otherwise holds the name of the edge + attribute used as weight. + + """ + scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u))) + return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale + + +@nx._dispatch(edge_attrs="weight") +def effective_size(G, nodes=None, weight=None): + r"""Returns the effective size of all nodes in the graph ``G``. + + The *effective size* of a node's ego network is based on the concept + of redundancy. A person's ego network has redundancy to the extent + that her contacts are connected to each other as well. The + nonredundant part of a person's relationships is the effective + size of her ego network [1]_. Formally, the effective size of a + node $u$, denoted $e(u)$, is defined by + + .. math:: + + e(u) = \sum_{v \in N(u) \setminus \{u\}} + \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right) + + where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$ + is the mutual weight of $v$ and $w$ divided by $v$ highest mutual + weight with any of its neighbors. The *mutual weight* of $u$ and $v$ + is the sum of the weights of edges joining them (edge weights are + assumed to be one if the graph is unweighted). + + For the case of unweighted and undirected graphs, Borgatti proposed + a simplified formula to compute effective size [2]_ + + .. math:: + + e(u) = n - \frac{2t}{n} + + where `t` is the number of ties in the ego network (not including + ties to ego) and `n` is the number of nodes (excluding ego). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. Directed graphs are treated like + undirected graphs when computing neighbors of ``v``. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the effective size. + If None, the effective size of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the effective size of the node as values. + + Notes + ----- + Burt also defined the related concept of *efficiency* of a node's ego + network, which is its effective size divided by the degree of that + node [1]_. So you can easily compute efficiency: + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + >>> esize = nx.effective_size(G) + >>> efficiency = {n: v / G.degree(n) for n, v in esize.items()} + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + *Structural Holes: The Social Structure of Competition.* + Cambridge: Harvard University Press, 1995. + + .. [2] Borgatti, S. + "Structural Holes: Unpacking Burt's Redundancy Measures" + CONNECTIONS 20(1):35-38. + http://www.analytictech.com/connections/v20(1)/holes.htm + + """ + + def redundancy(G, u, v, weight=None): + nmw = normalized_mutual_weight + r = sum( + nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return 1 - r + + effective_size = {} + if nodes is None: + nodes = G + # Use Borgatti's simplified formula for unweighted and undirected graphs + if not G.is_directed() and weight is None: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + E = nx.ego_graph(G, v, center=False, undirected=True) + effective_size[v] = len(E) - (2 * E.size()) / len(E) + else: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + effective_size[v] = sum( + redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v)) + ) + return effective_size + + +@nx._dispatch(edge_attrs="weight") +def constraint(G, nodes=None, weight=None): + r"""Returns the constraint on all nodes in the graph ``G``. + + The *constraint* is a measure of the extent to which a node *v* is + invested in those nodes that are themselves invested in the + neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`, + is defined by + + .. math:: + + c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w) + + where $N(v)$ is the subset of the neighbors of `v` that are either + predecessors or successors of `v` and $\ell(v, w)$ is the local + constraint on `v` with respect to `w` [1]_. For the definition of local + constraint, see :func:`local_constraint`. + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. This can be either directed or undirected. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the constraint. If + None, the constraint of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the constraint on the node as values. + + See also + -------- + local_constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + if nodes is None: + nodes = G + constraint = {} + for v in nodes: + # Constraint is not defined for isolated nodes + if len(G[v]) == 0: + constraint[v] = float("nan") + continue + constraint[v] = sum( + local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v)) + ) + return constraint + + +@nx._dispatch(edge_attrs="weight") +def local_constraint(G, u, v, weight=None): + r"""Returns the local constraint on the node ``u`` with respect to + the node ``v`` in the graph ``G``. + + Formally, the *local constraint on u with respect to v*, denoted + $\ell(v)$, is defined by + + .. math:: + + \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2, + + where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual + weight* of $u$ and $v$ is the sum of the weights of edges joining + them (edge weights are assumed to be one if the graph is + unweighted). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``u`` and ``v``. This can be either + directed or undirected. + + u : node + A node in the graph ``G``. + + v : node + A node in the graph ``G``. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + float + The constraint of the node ``v`` in the graph ``G``. + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + nmw = normalized_mutual_weight + direct = nmw(G, u, v, weight=weight) + indirect = sum( + nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return (direct + indirect) ** 2 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/summarization.py b/phivenv/Lib/site-packages/networkx/algorithms/summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..26665e09b1ab559a6982311d2ee3d93ea9cc6a00 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/summarization.py @@ -0,0 +1,561 @@ +""" +Graph summarization finds smaller representations of graphs resulting in faster +runtime of algorithms, reduced storage needs, and noise reduction. +Summarization has applications in areas such as visualization, pattern mining, +clustering and community detection, and more. Core graph summarization +techniques are grouping/aggregation, bit-compression, +simplification/sparsification, and influence based. Graph summarization +algorithms often produce either summary graphs in the form of supergraphs or +sparsified graphs, or a list of independent structures. Supergraphs are the +most common product, which consist of supernodes and original nodes and are +connected by edges and superedges, which represent aggregate edges between +nodes and supernodes. + +Grouping/aggregation based techniques compress graphs by representing +close/connected nodes and edges in a graph by a single node/edge in a +supergraph. Nodes can be grouped together into supernodes based on their +structural similarities or proximity within a graph to reduce the total number +of nodes in a graph. Edge-grouping techniques group edges into lossy/lossless +nodes called compressor or virtual nodes to reduce the total number of edges in +a graph. Edge-grouping techniques can be lossless, meaning that they can be +used to re-create the original graph, or techniques can be lossy, requiring +less space to store the summary graph, but at the expense of lower +reconstruction accuracy of the original graph. + +Bit-compression techniques minimize the amount of information needed to +describe the original graph, while revealing structural patterns in the +original graph. The two-part minimum description length (MDL) is often used to +represent the model and the original graph in terms of the model. A key +difference between graph compression and graph summarization is that graph +summarization focuses on finding structural patterns within the original graph, +whereas graph compression focuses on compressions the original graph to be as +small as possible. **NOTE**: Some bit-compression methods exist solely to +compress a graph without creating a summary graph or finding comprehensible +structural patterns. + +Simplification/Sparsification techniques attempt to create a sparse +representation of a graph by removing unimportant nodes and edges from the +graph. Sparsified graphs differ from supergraphs created by +grouping/aggregation by only containing a subset of the original nodes and +edges of the original graph. + +Influence based techniques aim to find a high-level description of influence +propagation in a large graph. These methods are scarce and have been mostly +applied to social graphs. + +*dedensification* is a grouping/aggregation based technique to compress the +neighborhoods around high-degree nodes in unweighted graphs by adding +compressor nodes that summarize multiple edges of the same type to +high-degree nodes (nodes with a degree greater than a given threshold). +Dedensification was developed for the purpose of increasing performance of +query processing around high-degree nodes in graph databases and enables direct +operations on the compressed graph. The structural patterns surrounding +high-degree nodes in the original is preserved while using fewer edges and +adding a small number of compressor nodes. The degree of nodes present in the +original graph is also preserved. The current implementation of dedensification +supports graphs with one edge type. + +For more information on graph summarization, see `Graph Summarization Methods +and Applications: A Survey `_ +""" +from collections import Counter, defaultdict + +import networkx as nx + +__all__ = ["dedensify", "snap_aggregation"] + + +@nx._dispatch +def dedensify(G, threshold, prefix=None, copy=True): + """Compresses neighborhoods around high-degree nodes + + Reduces the number of edges to high-degree nodes by adding compressor nodes + that summarize multiple edges of the same type to high-degree nodes (nodes + with a degree greater than a given threshold). Dedensification also has + the added benefit of reducing the number of edges around high-degree nodes. + The implementation currently supports graphs with a single edge type. + + Parameters + ---------- + G: graph + A networkx graph + threshold: int + Minimum degree threshold of a node to be considered a high degree node. + The threshold must be greater than or equal to 2. + prefix: str or None, optional (default: None) + An optional prefix for denoting compressor nodes + copy: bool, optional (default: True) + Indicates if dedensification should be done inplace + + Returns + ------- + dedensified networkx graph : (graph, set) + 2-tuple of the dedensified graph and set of compressor nodes + + Notes + ----- + According to the algorithm in [1]_, removes edges in a graph by + compressing/decompressing the neighborhoods around high degree nodes by + adding compressor nodes that summarize multiple edges of the same type + to high-degree nodes. Dedensification will only add a compressor node when + doing so will reduce the total number of edges in the given graph. This + implementation currently supports graphs with a single edge type. + + Examples + -------- + Dedensification will only add compressor nodes when doing so would result + in fewer edges:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> original_graph.number_of_edges() + 15 + >>> c_graph.number_of_edges() + 14 + + A dedensified, directed graph can be "densified" to reconstruct the + original graph:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> # re-densifies the compressed graph into the original graph + >>> for c_node in c_nodes: + ... all_neighbors = set(nx.all_neighbors(c_graph, c_node)) + ... out_neighbors = set(c_graph.neighbors(c_node)) + ... for out_neighbor in out_neighbors: + ... c_graph.remove_edge(c_node, out_neighbor) + ... in_neighbors = all_neighbors - out_neighbors + ... for in_neighbor in in_neighbors: + ... c_graph.remove_edge(in_neighbor, c_node) + ... for out_neighbor in out_neighbors: + ... c_graph.add_edge(in_neighbor, out_neighbor) + ... c_graph.remove_node(c_node) + ... + >>> nx.is_isomorphic(original_graph, c_graph) + True + + References + ---------- + .. [1] Maccioni, A., & Abadi, D. J. (2016, August). + Scalable pattern matching over compressed graphs via dedensification. + In Proceedings of the 22nd ACM SIGKDD International Conference on + Knowledge Discovery and Data Mining (pp. 1755-1764). + http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf + """ + if threshold < 2: + raise nx.NetworkXError("The degree threshold must be >= 2") + + degrees = G.in_degree if G.is_directed() else G.degree + # Group nodes based on degree threshold + high_degree_nodes = {n for n, d in degrees if d > threshold} + low_degree_nodes = G.nodes() - high_degree_nodes + + auxiliary = {} + for node in G: + high_degree_neighbors = frozenset(high_degree_nodes & set(G[node])) + if high_degree_neighbors: + if high_degree_neighbors in auxiliary: + auxiliary[high_degree_neighbors].add(node) + else: + auxiliary[high_degree_neighbors] = {node} + + if copy: + G = G.copy() + + compressor_nodes = set() + for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxiliary.items()): + low_degree_node_count = len(low_degree_nodes) + high_degree_node_count = len(high_degree_nodes) + old_edges = high_degree_node_count * low_degree_node_count + new_edges = high_degree_node_count + low_degree_node_count + if old_edges <= new_edges: + continue + compression_node = "".join(str(node) for node in high_degree_nodes) + if prefix: + compression_node = str(prefix) + compression_node + for node in low_degree_nodes: + for high_node in high_degree_nodes: + if G.has_edge(node, high_node): + G.remove_edge(node, high_node) + + G.add_edge(node, compression_node) + for node in high_degree_nodes: + G.add_edge(compression_node, node) + compressor_nodes.add(compression_node) + return G, compressor_nodes + + +def _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, +): + """ + Build the summary graph from the data structures produced in the SNAP aggregation algorithm + + Used in the SNAP aggregation algorithm to build the output summary graph and supernode + lookup dictionary. This process uses the original graph and the data structures to + create the supernodes with the correct node attributes, and the superedges with the correct + edge attributes + + Parameters + ---------- + G: networkx.Graph + the original graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + node_attributes: iterable + An iterable of the node attributes considered in the summarization process + edge_attributes: iterable + An iterable of the edge attributes considered in the summarization process + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + prefix: string + The prefix to be added to all supernodes + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes + superedge_attribute: str + The edge attribute for recording the edge types represented by superedges + + Returns + ------- + summary graph: Networkx graph + """ + output = G.__class__() + node_label_lookup = {} + for index, group_id in enumerate(groups): + group_set = groups[group_id] + supernode = f"{prefix}{index}" + node_label_lookup[group_id] = supernode + supernode_attributes = { + attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes + } + supernode_attributes[supernode_attribute] = group_set + output.add_node(supernode, **supernode_attributes) + + for group_id in groups: + group_set = groups[group_id] + source_supernode = node_label_lookup[group_id] + for other_group, group_edge_types in neighbor_info[ + next(iter(group_set)) + ].items(): + if group_edge_types: + target_supernode = node_label_lookup[other_group] + summary_graph_edge = (source_supernode, target_supernode) + + edge_types = [ + dict(zip(edge_attributes, edge_type)) + for edge_type in group_edge_types + ] + + has_edge = output.has_edge(*summary_graph_edge) + if output.is_multigraph(): + if not has_edge: + for edge_type in edge_types: + output.add_edge(*summary_graph_edge, **edge_type) + elif not output.is_directed(): + existing_edge_data = output.get_edge_data(*summary_graph_edge) + for edge_type in edge_types: + if edge_type not in existing_edge_data.values(): + output.add_edge(*summary_graph_edge, **edge_type) + else: + superedge_attributes = {superedge_attribute: edge_types} + output.add_edge(*summary_graph_edge, **superedge_attributes) + + return output + + +def _snap_eligible_group(G, groups, group_lookup, edge_types): + """ + Determines if a group is eligible to be split. + + A group is eligible to be split if all nodes in the group have edges of the same type(s) + with the same other groups. + + Parameters + ---------- + G: graph + graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + group_lookup: dict + dictionary of nodes and their current corresponding group ID + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + + Returns + ------- + tuple: group ID to split, and neighbor-groups participation_counts data structure + """ + neighbor_info = {node: {gid: Counter() for gid in groups} for node in group_lookup} + for group_id in groups: + current_group = groups[group_id] + + # build neighbor_info for nodes in group + for node in current_group: + neighbor_info[node] = {group_id: Counter() for group_id in groups} + edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node) + for edge in edges: + neighbor = edge[1] + edge_type = edge_types[edge] + neighbor_group_id = group_lookup[neighbor] + neighbor_info[node][neighbor_group_id][edge_type] += 1 + + # check if group_id is eligible to be split + group_size = len(current_group) + for other_group_id in groups: + edge_counts = Counter() + for node in current_group: + edge_counts.update(neighbor_info[node][other_group_id].keys()) + + if not all(count == group_size for count in edge_counts.values()): + # only the neighbor_info of the returned group_id is required for handling group splits + return group_id, neighbor_info + + # if no eligible groups, complete neighbor_info is calculated + return None, neighbor_info + + +def _snap_split(groups, neighbor_info, group_lookup, group_id): + """ + Splits a group based on edge types and updates the groups accordingly + + Splits the group with the given group_id based on the edge types + of the nodes so that each new grouping will all have the same + edges with other nodes. + + Parameters + ---------- + groups: dict + A dictionary of unique group IDs and their corresponding node groups + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + group_lookup: dict + dictionary of nodes and their current corresponding group ID + group_id: object + ID of group to be split + + Returns + ------- + dict + The updated groups based on the split + """ + new_group_mappings = defaultdict(set) + for node in groups[group_id]: + signature = tuple( + frozenset(edge_types) for edge_types in neighbor_info[node].values() + ) + new_group_mappings[signature].add(node) + + # leave the biggest new_group as the original group + new_groups = sorted(new_group_mappings.values(), key=len) + for new_group in new_groups[:-1]: + # Assign unused integer as the new_group_id + # ids are tuples, so will not interact with the original group_ids + new_group_id = len(groups) + groups[new_group_id] = new_group + groups[group_id] -= new_group + for node in new_group: + group_lookup[node] = new_group_id + + return groups + + +@nx._dispatch(node_attrs="[node_attributes]", edge_attrs="[edge_attributes]") +def snap_aggregation( + G, + node_attributes, + edge_attributes=(), + prefix="Supernode-", + supernode_attribute="group", + superedge_attribute="types", +): + """Creates a summary graph based on attributes and connectivity. + + This function uses the Summarization by Grouping Nodes on Attributes + and Pairwise edges (SNAP) algorithm for summarizing a given + graph by grouping nodes by node attributes and their edge attributes + into supernodes in a summary graph. This name SNAP should not be + confused with the Stanford Network Analysis Project (SNAP). + + Here is a high-level view of how this algorithm works: + + 1) Group nodes by node attribute values. + + 2) Iteratively split groups until all nodes in each group have edges + to nodes in the same groups. That is, until all the groups are homogeneous + in their member nodes' edges to other groups. For example, + if all the nodes in group A only have edge to nodes in group B, then the + group is homogeneous and does not need to be split. If all nodes in group B + have edges with nodes in groups {A, C}, but some also have edges with other + nodes in B, then group B is not homogeneous and needs to be split into + groups have edges with {A, C} and a group of nodes having + edges with {A, B, C}. This way, viewers of the summary graph can + assume that all nodes in the group have the exact same node attributes and + the exact same edges. + + 3) Build the output summary graph, where the groups are represented by + super-nodes. Edges represent the edges shared between all the nodes in each + respective groups. + + A SNAP summary graph can be used to visualize graphs that are too large to display + or visually analyze, or to efficiently identify sets of similar nodes with similar connectivity + patterns to other sets of similar nodes based on specified node and/or edge attributes in a graph. + + Parameters + ---------- + G: graph + Networkx Graph to be summarized + node_attributes: iterable, required + An iterable of the node attributes used to group nodes in the summarization process. Nodes + with the same values for these attributes will be grouped together in the summary graph. + edge_attributes: iterable, optional + An iterable of the edge attributes considered in the summarization process. If provided, unique + combinations of the attribute values found in the graph are used to + determine the edge types in the graph. If not provided, all edges + are considered to be of the same type. + prefix: str + The prefix used to denote supernodes in the summary graph. Defaults to 'Supernode-'. + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes. Defaults to 'group'. + superedge_attribute: str + The edge attribute for recording the edge types of multiple edges. Defaults to 'types'. + + Returns + ------- + networkx.Graph: summary graph + + Examples + -------- + SNAP aggregation takes a graph and summarizes it in the context of user-provided + node and edge attributes such that a viewer can more easily extract and + analyze the information represented by the graph + + >>> nodes = { + ... "A": dict(color="Red"), + ... "B": dict(color="Red"), + ... "C": dict(color="Red"), + ... "D": dict(color="Red"), + ... "E": dict(color="Blue"), + ... "F": dict(color="Blue"), + ... } + >>> edges = [ + ... ("A", "E", "Strong"), + ... ("B", "F", "Strong"), + ... ("C", "E", "Weak"), + ... ("D", "F", "Weak"), + ... ] + >>> G = nx.Graph() + >>> for node in nodes: + ... attributes = nodes[node] + ... G.add_node(node, **attributes) + ... + >>> for source, target, type in edges: + ... G.add_edge(source, target, type=type) + ... + >>> node_attributes = ('color', ) + >>> edge_attributes = ('type', ) + >>> summary_graph = nx.snap_aggregation(G, node_attributes=node_attributes, edge_attributes=edge_attributes) + + Notes + ----- + The summary graph produced is called a maximum Attribute-edge + compatible (AR-compatible) grouping. According to [1]_, an + AR-compatible grouping means that all nodes in each group have the same + exact node attribute values and the same exact edges and + edge types to one or more nodes in the same groups. The maximal + AR-compatible grouping is the grouping with the minimal cardinality. + + The AR-compatible grouping is the most detailed grouping provided by + any of the SNAP algorithms. + + References + ---------- + .. [1] Y. Tian, R. A. Hankins, and J. M. Patel. Efficient aggregation + for graph summarization. In Proc. 2008 ACM-SIGMOD Int. Conf. + Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada, + June 2008. + """ + edge_types = { + edge: tuple(attrs.get(attr) for attr in edge_attributes) + for edge, attrs in G.edges.items() + } + if not G.is_directed(): + if G.is_multigraph(): + # list is needed to avoid mutating while iterating + edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()] + else: + # list is needed to avoid mutating while iterating + edges = [((v, u), etype) for (u, v), etype in edge_types.items()] + edge_types.update(edges) + + group_lookup = { + node: tuple(attrs[attr] for attr in node_attributes) + for node, attrs in G.nodes.items() + } + groups = defaultdict(set) + for node, node_type in group_lookup.items(): + groups[node_type].add(node) + + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + while eligible_group_id: + groups = _snap_split(groups, neighbor_info, group_lookup, eligible_group_id) + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + return _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, + ) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/swap.py b/phivenv/Lib/site-packages/networkx/algorithms/swap.py new file mode 100644 index 0000000000000000000000000000000000000000..926be49831ef34129674e39b4d98f89525011b29 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/swap.py @@ -0,0 +1,405 @@ +"""Swap edges in a graph. +""" + +import math + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["double_edge_swap", "connected_double_edge_swap", "directed_edge_swap"] + + +@nx.utils.not_implemented_for("undirected") +@py_random_state(3) +@nx._dispatch +def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None): + """Swap three edges in a directed graph while keeping the node degrees fixed. + + A directed edge swap swaps three edges such that a -> b -> c -> d becomes + a -> c -> b -> d. This pattern of swapping allows all possible states with the + same in- and out-degree distribution in a directed graph to be reached. + + If the swap would create parallel edges (e.g. if a -> c already existed in the + previous example), another attempt is made to find a suitable trio of edges. + + Parameters + ---------- + G : DiGraph + A directed graph + + nswap : integer (optional, default=1) + Number of three-edge (directed) swaps to perform + + max_tries : integer (optional, default=100) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + The graph after the edges are swapped. + + Raises + ------ + NetworkXError + If `G` is not directed, or + If nswap > max_tries, or + If there are fewer than 4 nodes or 3 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + + References + ---------- + .. [1] Erdős, Péter L., et al. “A Simple Havel-Hakimi Type Algorithm to Realize + Graphical Degree Sequences of Directed Graphs.” ArXiv:0905.4913 [Math], + Jan. 2010. https://doi.org/10.48550/arXiv.0905.4913. + Published 2010 in Elec. J. Combinatorics (17(1)). R66. + http://www.combinatorics.org/Volume_17/PDF/v17i1r66.pdf + .. [2] “Combinatorics - Reaching All Possible Simple Directed Graphs with a given + Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange, + https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022. + """ + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("DiGraph has fewer than four nodes.") + if len(G.edges) < 3: + raise nx.NetworkXError("DiGraph has fewer than 3 edges") + + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + tries = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + + while swapcount < nswap: + # choose source node index from discrete distribution + start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0] + start = keys[start_index] + tries += 1 + + if tries > max_tries: + msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})." + raise nx.NetworkXAlgorithmError(msg) + + # If the given node doesn't have any out edges, then there isn't anything to swap + if G.out_degree(start) == 0: + continue + second = seed.choice(list(G.succ[start])) + if start == second: + continue + + if G.out_degree(second) == 0: + continue + third = seed.choice(list(G.succ[second])) + if second == third: + continue + + if G.out_degree(third) == 0: + continue + fourth = seed.choice(list(G.succ[third])) + if third == fourth: + continue + + if ( + third not in G.succ[start] + and fourth not in G.succ[second] + and second not in G.succ[third] + ): + # Swap nodes + G.add_edge(start, third) + G.add_edge(third, second) + G.add_edge(second, fourth) + G.remove_edge(start, second) + G.remove_edge(second, third) + G.remove_edge(third, fourth) + swapcount += 1 + + return G + + +@py_random_state(3) +@nx._dispatch +def double_edge_swap(G, nswap=1, max_tries=100, seed=None): + """Swap two edges in the graph while keeping the node degrees fixed. + + A double-edge swap removes two randomly chosen edges u-v and x-y + and creates the new edges u-x and v-y:: + + u--v u v + becomes | | + x--y x y + + If either the edge u-x or v-y already exist no swap is performed + and another attempt is made to find a suitable edge pair. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + max_tries : integer (optional) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The graph after double edge swaps. + + Raises + ------ + NetworkXError + If `G` is directed, or + If `nswap` > `max_tries`, or + If there are fewer than 4 nodes or 2 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + """ + if G.is_directed(): + raise nx.NetworkXError( + "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." + ) + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer than 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + n = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + while swapcount < nswap: + # if random.random() < 0.5: continue # trick to avoid periodicities? + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ui == xi: + continue # same source, skip + u = keys[ui] # convert index to label + x = keys[xi] + # choose target uniformly from neighbors + v = seed.choice(list(G[u])) + y = seed.choice(list(G[x])) + if v == y: + continue # same target, skip + if (x not in G[u]) and (y not in G[v]): # don't create parallel edges + G.add_edge(u, x) + G.add_edge(v, y) + G.remove_edge(u, v) + G.remove_edge(x, y) + swapcount += 1 + if n >= max_tries: + e = ( + f"Maximum number of swap attempts ({n}) exceeded " + f"before desired swaps achieved ({nswap})." + ) + raise nx.NetworkXAlgorithmError(e) + n += 1 + return G + + +@py_random_state(3) +@nx._dispatch +def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None): + """Attempts the specified number of double-edge swaps in the graph `G`. + + A double-edge swap removes two randomly chosen edges `(u, v)` and `(x, + y)` and creates the new edges `(u, x)` and `(v, y)`:: + + u--v u v + becomes | | + x--y x y + + If either `(u, x)` or `(v, y)` already exist, then no swap is performed + so the actual number of swapped edges is always *at most* `nswap`. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + _window_threshold : integer + + The window size below which connectedness of the graph will be checked + after each swap. + + The "window" in this function is a dynamically updated integer that + represents the number of swap attempts to make before checking if the + graph remains connected. It is an optimization used to decrease the + running time of the algorithm in exchange for increased complexity of + implementation. + + If the window size is below this threshold, then the algorithm checks + after each swap if the graph remains connected by checking if there is a + path joining the two nodes whose edge was just removed. If the window + size is above this threshold, then the algorithm performs do all the + swaps in the window and only then check if the graph is still connected. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + int + The number of successful swaps + + Raises + ------ + + NetworkXError + + If the input graph is not connected, or if the graph has fewer than four + nodes. + + Notes + ----- + + The initial graph `G` must be connected, and the resulting graph is + connected. The graph `G` is modified in place. + + References + ---------- + .. [1] C. Gkantsidis and M. Mihail and E. Zegura, + The Markov chain simulation method for generating connected + power law random graphs, 2003. + http://citeseer.ist.psu.edu/gkantsidis03markov.html + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + n = 0 + swapcount = 0 + deg = G.degree() + # Label key for nodes + dk = [n for n, d in G.degree()] + cdf = nx.utils.cumulative_distribution([d for n, d in G.degree()]) + discrete_sequence = nx.utils.discrete_sequence + window = 1 + while n < nswap: + wcount = 0 + swapped = [] + # If the window is small, we just check each time whether the graph is + # connected by checking if the nodes that were just separated are still + # connected. + if window < _window_threshold: + # This Boolean keeps track of whether there was a failure or not. + fail = False + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + # If G remains connected... + if nx.has_path(G, u, v): + wcount += 1 + # Otherwise, undo the changes. + else: + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + fail = True + # If one of the swaps failed, reduce the window size. + if fail: + window = math.ceil(window / 2) + else: + window += 1 + # If the window is large, then there is a good chance that a bunch of + # swaps will work. It's quicker to do all those swaps first and then + # check if the graph remains connected. + else: + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + wcount += 1 + # If the graph remains connected, increase the window size. + if nx.is_connected(G): + window += 1 + # Otherwise, undo the changes from the previous window and decrease + # the window size. + else: + while swapped: + (u, v, x, y) = swapped.pop() + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + window = math.ceil(window / 2) + return swapcount diff --git a/phivenv/Lib/site-packages/networkx/algorithms/threshold.py b/phivenv/Lib/site-packages/networkx/algorithms/threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..0839321de0d9037f64e1f654c00ecd1e5ccfb470 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/threshold.py @@ -0,0 +1,979 @@ +""" +Threshold Graphs - Creation, manipulation and identification. +""" +from math import sqrt + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["is_threshold_graph", "find_threshold_graph"] + + +@nx._dispatch +def is_threshold_graph(G): + """ + Returns `True` if `G` is a threshold graph. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph` + + Returns + ------- + bool + `True` if `G` is a threshold graph, `False` otherwise. + + Examples + -------- + >>> from networkx.algorithms.threshold import is_threshold_graph + >>> G = nx.path_graph(3) + >>> is_threshold_graph(G) + True + >>> G = nx.barbell_graph(3, 3) + >>> is_threshold_graph(G) + False + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return is_threshold_sequence([d for n, d in G.degree()]) + + +def is_threshold_sequence(degree_sequence): + """ + Returns True if the sequence is a threshold degree sequence. + + Uses the property that a threshold graph must be constructed by + adding either dominating or isolated nodes. Thus, it can be + deconstructed iteratively by removing a node of degree zero or a + node that connects to the remaining nodes. If this deconstruction + fails then the sequence is not a threshold sequence. + """ + ds = degree_sequence[:] # get a copy so we don't destroy original + ds.sort() + while ds: + if ds[0] == 0: # if isolated node + ds.pop(0) # remove it + continue + if ds[-1] != len(ds) - 1: # is the largest degree node dominating? + return False # no, not a threshold degree sequence + ds.pop() # yes, largest is the dominating node + ds = [d - 1 for d in ds] # remove it and decrement all degrees + return True + + +def creation_sequence(degree_sequence, with_labels=False, compact=False): + """ + Determines the creation sequence for the given threshold degree sequence. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present when it + is added. The first node added is by convention 'd'. + This list can be converted to a string if desired using "".join(cs) + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + + Returns None if the sequence is not a threshold sequence + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(degree_sequence, dict): # labeled degree sequence + ds = [[degree, label] for (label, degree) in degree_sequence.items()] + else: + ds = [[d, i] for i, d in enumerate(degree_sequence)] + ds.sort() + cs = [] # creation sequence + while ds: + if ds[0][0] == 0: # isolated node + (d, v) = ds.pop(0) + if len(ds) > 0: # make sure we start with a d + cs.insert(0, (v, "i")) + else: + cs.insert(0, (v, "d")) + continue + if ds[-1][0] != len(ds) - 1: # Not dominating node + return None # not a threshold degree sequence + (d, v) = ds.pop() + cs.insert(0, (v, "d")) + ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +def make_compact(creation_sequence): + """ + Returns the creation sequence in a compact form + that is the number of 'i's and 'd's alternating. + + Examples + -------- + >>> from networkx.algorithms.threshold import make_compact + >>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"]) + [1, 2, 2, 3] + >>> make_compact(["d", "d", "d", "i", "d", "d"]) + [3, 1, 2] + + Notice that the first number is the first vertex + to be used for construction and so is always 'd'. + + Labeled creation sequences lose their labels in the + compact representation. + + >>> make_compact([3, 1, 2]) + [3, 1, 2] + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = creation_sequence[:] + elif isinstance(first, tuple): # labeled creation sequence + cs = [s[1] for s in creation_sequence] + elif isinstance(first, int): # compact creation sequence + return creation_sequence + else: + raise TypeError("Not a valid creation sequence type") + + ccs = [] + count = 1 # count the run lengths of d's or i's. + for i in range(1, len(cs)): + if cs[i] == cs[i - 1]: + count += 1 + else: + ccs.append(count) + count = 1 + ccs.append(count) # don't forget the last one + return ccs + + +def uncompact(creation_sequence): + """ + Converts a compact creation sequence for a threshold + graph to a standard creation sequence (unlabeled). + If the creation_sequence is already standard, return it. + See creation_sequence. + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + return creation_sequence + elif isinstance(first, tuple): # labeled creation sequence + return creation_sequence + elif isinstance(first, int): # compact creation sequence + ccscopy = creation_sequence[:] + else: + raise TypeError("Not a valid creation sequence type") + cs = [] + while ccscopy: + cs.extend(ccscopy.pop(0) * ["d"]) + if ccscopy: + cs.extend(ccscopy.pop(0) * ["i"]) + return cs + + +def creation_sequence_to_weights(creation_sequence): + """ + Returns a list of node weights which create the threshold + graph designated by the creation sequence. The weights + are scaled so that the threshold is 1.0. The order of the + nodes is the same as that in the creation sequence. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + wseq = creation_sequence[:] + else: + wseq = list(creation_sequence) # string like 'ddidid' + elif isinstance(first, tuple): # labeled creation sequence + wseq = [v[1] for v in creation_sequence] + elif isinstance(first, int): # compact creation sequence + wseq = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + # pass through twice--first backwards + wseq.reverse() + w = 0 + prev = "i" + for j, s in enumerate(wseq): + if s == "i": + wseq[j] = w + prev = s + elif prev == "i": + prev = s + w += 1 + wseq.reverse() # now pass through forwards + for j, s in enumerate(wseq): + if s == "d": + wseq[j] = w + prev = s + elif prev == "d": + prev = s + w += 1 + # Now scale weights + if prev == "d": + w += 1 + wscale = 1 / w + return [ww * wscale for ww in wseq] + # return wseq + + +def weights_to_creation_sequence( + weights, threshold=1, with_labels=False, compact=False +): + """ + Returns a creation sequence for a threshold graph + determined by the weights and threshold given as input. + If the sum of two node weights is greater than the + threshold value, an edge is created between these nodes. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present + when it is added. The first node added is by convention 'd'. + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(weights, dict): # labeled weights + wseq = [[w, label] for (label, w) in weights.items()] + else: + wseq = [[w, i] for i, w in enumerate(weights)] + wseq.sort() + cs = [] # creation sequence + cutoff = threshold - wseq[-1][0] + while wseq: + if wseq[0][0] < cutoff: # isolated node + (w, label) = wseq.pop(0) + cs.append((label, "i")) + else: + (w, label) = wseq.pop() + cs.append((label, "d")) + cutoff = threshold - wseq[-1][0] + if len(wseq) == 1: # make sure we start with a d + (w, label) = wseq.pop() + cs.append((label, "d")) + # put in correct order + cs.reverse() + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +# Manipulating NetworkX.Graphs in context of threshold graphs +@nx._dispatch(graphs=None) +def threshold_graph(creation_sequence, create_using=None): + """ + Create a threshold graph from the creation sequence or compact + creation_sequence. + + The input sequence can be a + + creation sequence (e.g. ['d','i','d','d','d','i']) + labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')]) + compact creation sequence (e.g. [2,1,1,2,0]) + + Use cs=creation_sequence(degree_sequence,labeled=True) + to convert a degree sequence to a creation sequence. + + Returns None if the sequence is not valid + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + ci = list(enumerate(creation_sequence)) + elif isinstance(first, tuple): # labeled creation sequence + ci = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + ci = list(enumerate(cs)) + else: + print("not a valid creation sequence type") + return None + + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + G.name = "Threshold Graph" + + # add nodes and edges + # if type is 'i' just add nodea + # if type is a d connect to everything previous + while ci: + (v, node_type) = ci.pop(0) + if node_type == "d": # dominating type, connect to all existing nodes + # We use `for u in list(G):` instead of + # `for u in G:` because we edit the graph `G` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for u in list(G): + G.add_edge(v, u) + G.add_node(v) + return G + + +@nx._dispatch +def find_alternating_4_cycle(G): + """ + Returns False if there aren't any alternating 4 cycles. + Otherwise returns the cycle as [a,b,c,d] where (a,b) + and (c,d) are edges and (a,c) and (b,d) are not. + """ + for u, v in G.edges(): + for w in G.nodes(): + if not G.has_edge(u, w) and u != w: + for x in G.neighbors(w): + if not G.has_edge(v, x) and v != x: + return [u, v, w, x] + return False + + +@nx._dispatch +def find_threshold_graph(G, create_using=None): + """ + Returns a threshold subgraph that is close to largest in `G`. + + The threshold graph will contain the largest degree node in G. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, or `MultiDiGraph` + create_using : NetworkX graph class or `None` (default), optional + Type of graph to use when constructing the threshold graph. + If `None`, infer the appropriate graph type from the input. + + Returns + ------- + graph : + A graph instance representing the threshold graph + + Examples + -------- + >>> from networkx.algorithms.threshold import find_threshold_graph + >>> G = nx.barbell_graph(3, 3) + >>> T = find_threshold_graph(G) + >>> T.nodes # may vary + NodeView((7, 8, 5, 6)) + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return threshold_graph(find_creation_sequence(G), create_using) + + +@nx._dispatch +def find_creation_sequence(G): + """ + Find a threshold subgraph that is close to largest in G. + Returns the labeled creation sequence of that threshold graph. + """ + cs = [] + # get a local pointer to the working part of the graph + H = G + while H.order() > 0: + # get new degree sequence on subgraph + dsdict = dict(H.degree()) + ds = [(d, v) for v, d in dsdict.items()] + ds.sort() + # Update threshold graph nodes + if ds[-1][0] == 0: # all are isolated + cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"])) + break # Done! + # pull off isolated nodes + while ds[0][0] == 0: + (d, iso) = ds.pop(0) + cs.append((iso, "i")) + # find new biggest node + (d, bigv) = ds.pop() + # add edges of star to t_g + cs.append((bigv, "d")) + # form subgraph of neighbors of big node + H = H.subgraph(H.neighbors(bigv)) + cs.reverse() + return cs + + +# Properties of Threshold Graphs +def triangles(creation_sequence): + """ + Compute number of triangles in the threshold graph with the + given creation sequence. + """ + # shortcut algorithm that doesn't require computing number + # of triangles at each node. + cs = creation_sequence # alias + dr = cs.count("d") # number of d's in sequence + ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's + # now add dr choose 2 triangles for every 'i' in sequence where + # dr is the number of d's to the right of the current i + for i, typ in enumerate(cs): + if typ == "i": + ntri += dr * (dr - 1) / 2 + else: + dr -= 1 + return ntri + + +def triangle_sequence(creation_sequence): + """ + Return triangle sequence for the given threshold graph creation sequence. + + """ + cs = creation_sequence + seq = [] + dr = cs.count("d") # number of d's to the right of the current pos + dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + for i, sym in enumerate(cs): + if sym == "d": + drun += 1 + tri = dcur + (dr - 1) * irun # new triangles at this d + else: # cs[i]="i": + if prevsym == "d": # new string of i's + dcur += (dr - 1) * irun # accumulate shared shortest paths + irun = 0 # reset i run counter + dr -= drun # reduce number of d's to right + drun = 0 # reset d run counter + irun += 1 + tri = dr * (dr - 1) // 2 # new triangles at this i + seq.append(tri) + prevsym = sym + return seq + + +def cluster_sequence(creation_sequence): + """ + Return cluster sequence for the given threshold graph creation sequence. + """ + triseq = triangle_sequence(creation_sequence) + degseq = degree_sequence(creation_sequence) + cseq = [] + for i, deg in enumerate(degseq): + tri = triseq[i] + if deg <= 1: # isolated vertex or single pair gets cc 0 + cseq.append(0) + continue + max_size = (deg * (deg - 1)) // 2 + cseq.append(tri / max_size) + return cseq + + +def degree_sequence(creation_sequence): + """ + Return degree sequence for the threshold graph with the given + creation sequence + """ + cs = creation_sequence # alias + seq = [] + rd = cs.count("d") # number of d to the right + for i, sym in enumerate(cs): + if sym == "d": + rd -= 1 + seq.append(rd + i) + else: + seq.append(rd) + return seq + + +def density(creation_sequence): + """ + Return the density of the graph with this creation_sequence. + The density is the fraction of possible edges present. + """ + N = len(creation_sequence) + two_size = sum(degree_sequence(creation_sequence)) + two_possible = N * (N - 1) + den = two_size / two_possible + return den + + +def degree_correlation(creation_sequence): + """ + Return the degree-degree correlation over all edges. + """ + cs = creation_sequence + s1 = 0 # deg_i*deg_j + s2 = 0 # deg_i^2+deg_j^2 + s3 = 0 # deg_i+deg_j + m = 0 # number of edges + rd = cs.count("d") # number of d nodes to the right + rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s + ds = degree_sequence(cs) + for i, sym in enumerate(cs): + if sym == "d": + if i != rdi[0]: + print("Logic error in degree_correlation", i, rdi) + raise ValueError + rdi.pop(0) + degi = ds[i] + for dj in rdi: + degj = ds[dj] + s1 += degj * degi + s2 += degi**2 + degj**2 + s3 += degi + degj + m += 1 + denom = 2 * m * s2 - s3 * s3 + numer = 4 * m * s1 - s3 * s3 + if denom == 0: + if numer == 0: + return 1 + raise ValueError(f"Zero Denominator but Numerator is {numer}") + return numer / denom + + +def shortest_path(creation_sequence, u, v): + """ + Find the shortest path between u and v in a + threshold graph G with the given creation_sequence. + + For an unlabeled creation_sequence, the vertices + u and v must be integers in (0,len(sequence)) referring + to the position of the desired vertices in the sequence. + + For a labeled creation_sequence, u and v are labels of vertices. + + Use cs=creation_sequence(degree_sequence,with_labels=True) + to convert a degree sequence to a creation sequence. + + Returns a list of vertices from u to v. + Example: if they are neighbors, it returns [u,v] + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))] + elif isinstance(first, tuple): # labeled creation sequence + cs = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + ci = uncompact(creation_sequence) + cs = [(i, ci[i]) for i in range(len(ci))] + else: + raise TypeError("Not a valid creation sequence type") + + verts = [s[0] for s in cs] + if v not in verts: + raise ValueError(f"Vertex {v} not in graph from creation_sequence") + if u not in verts: + raise ValueError(f"Vertex {u} not in graph from creation_sequence") + # Done checking + if u == v: + return [u] + + uindex = verts.index(u) + vindex = verts.index(v) + bigind = max(uindex, vindex) + if cs[bigind][1] == "d": + return [u, v] + # must be that cs[bigind][1]=='i' + cs = cs[bigind:] + while cs: + vert = cs.pop() + if vert[1] == "d": + return [u, vert[0], v] + # All after u are type 'i' so no connection + return -1 + + +def shortest_path_length(creation_sequence, i): + """ + Return the shortest path length from indicated node to + every other node for the threshold graph with the given + creation sequence. + Node is indicated by index i in creation_sequence unless + creation_sequence is labeled in which case, i is taken to + be the label of the node. + + Paths lengths in threshold graphs are at most 2. + Length to unreachable nodes is set to -1. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + cs = creation_sequence[:] + else: + cs = list(creation_sequence) + elif isinstance(first, tuple): # labeled creation sequence + cs = [v[1] for v in creation_sequence] + i = [v[0] for v in creation_sequence].index(i) + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + + # Compute + N = len(cs) + spl = [2] * N # length 2 to every node + spl[i] = 0 # except self which is 0 + # 1 for all d's to the right + for j in range(i + 1, N): + if cs[j] == "d": + spl[j] = 1 + if cs[i] == "d": # 1 for all nodes to the left + for j in range(i): + spl[j] = 1 + # and -1 for any trailing i to indicate unreachable + for j in range(N - 1, 0, -1): + if cs[j] == "d": + break + spl[j] = -1 + return spl + + +def betweenness_sequence(creation_sequence, normalized=True): + """ + Return betweenness for the threshold graph with the given creation + sequence. The result is unscaled. To scale the values + to the interval [0,1] divide by (n-1)*(n-2). + """ + cs = creation_sequence + seq = [] # betweenness + lastchar = "d" # first node is always a 'd' + dr = float(cs.count("d")) # number of d's to the right of current pos + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + dlast = 0.0 # betweenness of last d + for i, c in enumerate(cs): + if c == "d": # cs[i]=="d": + # betweenness = amt shared with earlier d's and i's + # + new isolated nodes covered + # + new paths to all previous nodes + b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr + drun += 1 # update counter + else: # cs[i]="i": + if lastchar == "d": # if this is a new run of i's + dlast = b # accumulate betweenness + dr -= drun # update number of d's to the right + drun = 0 # reset d counter + irun = 0 # reset i counter + b = 0 # isolated nodes have zero betweenness + irun += 1 # add another i to the run + seq.append(float(b)) + lastchar = c + + # normalize by the number of possible shortest paths + if normalized: + order = len(cs) + scale = 1.0 / ((order - 1) * (order - 2)) + seq = [s * scale for s in seq] + + return seq + + +def eigenvectors(creation_sequence): + """ + Return a 2-tuple of Laplacian eigenvalues and eigenvectors + for the threshold network with creation_sequence. + The first value is a list of eigenvalues. + The second value is a list of eigenvectors. + The lists are in the same order so corresponding eigenvectors + and eigenvalues are in the same position in the two lists. + + Notice that the order of the eigenvalues returned by eigenvalues(cs) + may not correspond to the order of these eigenvectors. + """ + ccs = make_compact(creation_sequence) + N = sum(ccs) + vec = [0] * N + val = vec[:] + # get number of type d nodes to the right (all for first node) + dr = sum(ccs[::2]) + + nn = ccs[0] + vec[0] = [1.0 / sqrt(N)] * N + val[0] = 0 + e = dr + dr -= nn + type_d = True + i = 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(dd * dd + i) + vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + if len(ccs) == 1: + return (val, vec) + for nn in ccs[1:]: + scale = 1.0 / sqrt(nn * i * (i + nn)) + vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn) + # find eigenvalue + type_d = not type_d + if type_d: + e = i + dr + dr -= nn + else: + e = dr + val[i] = e + st = i + i += 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(i - st + dd * dd) + vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + return (val, vec) + + +def spectral_projection(u, eigenpairs): + """ + Returns the coefficients of each eigenvector + in a projection of the vector u onto the normalized + eigenvectors which are contained in eigenpairs. + + eigenpairs should be a list of two objects. The + first is a list of eigenvalues and the second a list + of eigenvectors. The eigenvectors should be lists. + + There's not a lot of error checking on lengths of + arrays, etc. so be careful. + """ + coeff = [] + evect = eigenpairs[1] + for ev in evect: + c = sum(evv * uv for (evv, uv) in zip(ev, u)) + coeff.append(c) + return coeff + + +def eigenvalues(creation_sequence): + """ + Return sequence of eigenvalues of the Laplacian of the threshold + graph for the given creation_sequence. + + Based on the Ferrer's diagram method. The spectrum is integral + and is the conjugate of the degree sequence. + + See:: + + @Article{degree-merris-1994, + author = {Russel Merris}, + title = {Degree maximal graphs are Laplacian integral}, + journal = {Linear Algebra Appl.}, + year = {1994}, + volume = {199}, + pages = {381--389}, + } + + """ + degseq = degree_sequence(creation_sequence) + degseq.sort() + eiglist = [] # zero is always one eigenvalue + eig = 0 + row = len(degseq) + bigdeg = degseq.pop() + while row: + if bigdeg < row: + eiglist.append(eig) + row -= 1 + else: + eig += 1 + if degseq: + bigdeg = degseq.pop() + else: + bigdeg = 0 + return eiglist + + +# Threshold graph creation routines + + +@py_random_state(2) +def random_threshold_sequence(n, p, seed=None): + """ + Create a random threshold sequence of size n. + A creation sequence is built by randomly choosing d's with + probability p and i's with probability 1-p. + + s=nx.random_threshold_sequence(10,0.5) + + returns a threshold sequence of length 10 with equal + probably of an i or a d at each position. + + A "random" threshold graph can be built with + + G=nx.threshold_graph(s) + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + if not (0 <= p <= 1): + raise ValueError("p must be in [0,1]") + + cs = ["d"] # threshold sequences always start with a d + for i in range(1, n): + if seed.random() < p: + cs.append("d") + else: + cs.append("i") + return cs + + +# maybe *_d_threshold_sequence routines should +# be (or be called from) a single routine with a more descriptive name +# and a keyword parameter? +def right_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # connected case m >n-1 + ind = n - 1 + sum = n - 1 + while sum < m: + cs[ind] = "d" + ind -= 1 + sum += ind + ind = m - (sum - ind) + cs[ind] = "d" + return cs + + +def left_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # Connected case when M>N-1 + cs[n - 1] = "d" + sum = n - 1 + ind = 1 + while sum < m: + cs[ind] = "d" + sum += ind + ind += 1 + if sum > m: # be sure not to change the first vertex + cs[sum - m] = "i" + return cs + + +@py_random_state(3) +def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None): + """ + Perform a "swap" operation on a threshold sequence. + + The swap preserves the number of nodes and edges + in the graph for the given sequence. + The resulting sequence is still a threshold sequence. + + Perform one split and one combine operation on the + 'd's of a creation sequence for a threshold graph. + This operation maintains the number of nodes and edges + in the graph, but shifts the edges from node to node + maintaining the threshold quality of the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + # preprocess the creation sequence + dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"] + # split + if seed.random() < p_split: + choice = seed.choice(dlist) + split_to = seed.choice(range(choice)) + flip_side = choice - split_to + if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i": + cs[choice] = "i" + cs[split_to] = "d" + cs[flip_side] = "d" + dlist.remove(choice) + # don't add or combine may reverse this action + # dlist.extend([split_to,flip_side]) + # print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side) + # combine + if seed.random() < p_combine and dlist: + first_choice = seed.choice(dlist) + second_choice = seed.choice(dlist) + target = first_choice + second_choice + if target >= len(cs) or cs[target] == "d" or first_choice == second_choice: + return cs + # OK to combine + cs[first_choice] = "i" + cs[second_choice] = "i" + cs[target] = "d" + # print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target) + + return cs diff --git a/phivenv/Lib/site-packages/networkx/algorithms/time_dependent.py b/phivenv/Lib/site-packages/networkx/algorithms/time_dependent.py new file mode 100644 index 0000000000000000000000000000000000000000..e83f42ad92cb8eeba13041f83e791dc166ee473c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/time_dependent.py @@ -0,0 +1,142 @@ +"""Time dependent algorithms.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["cd_index"] + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs={"time": None, "weight": 1}) +def cd_index(G, node, time_delta, *, time="time", weight=None): + r"""Compute the CD index for `node` within the graph `G`. + + Calculates the CD index for the given node of the graph, + considering only its predecessors who have the `time` attribute + smaller than or equal to the `time` attribute of the `node` + plus `time_delta`. + + Parameters + ---------- + G : graph + A directed networkx graph whose nodes have `time` attributes and optionally + `weight` attributes (if a weight is not given, it is considered 1). + node : node + The node for which the CD index is calculated. + time_delta : numeric or timedelta + Amount of time after the `time` attribute of the `node`. The value of + `time_delta` must support comparison with the `time` node attribute. For + example, if the `time` attribute of the nodes are `datetime.datetime` + objects, then `time_delta` should be a `datetime.timedelta` object. + time : string (Optional, default is "time") + The name of the node attribute that will be used for the calculations. + weight : string (Optional, default is None) + The name of the node attribute used as weight. + + Returns + ------- + float + The CD index calculated for the node `node` within the graph `G`. + + Raises + ------ + NetworkXError + If not all nodes have a `time` attribute or + `time_delta` and `time` attribute types are not compatible or + `n` equals 0. + + NetworkXNotImplemented + If `G` is a non-directed graph or a multigraph. + + Examples + -------- + >>> from datetime import datetime, timedelta + >>> G = nx.DiGraph() + >>> nodes = { + ... 1: {"time": datetime(2015, 1, 1)}, + ... 2: {"time": datetime(2012, 1, 1), 'weight': 4}, + ... 3: {"time": datetime(2010, 1, 1)}, + ... 4: {"time": datetime(2008, 1, 1)}, + ... 5: {"time": datetime(2014, 1, 1)} + ... } + >>> G.add_nodes_from([(n, nodes[n]) for n in nodes]) + >>> edges = [(1, 3), (1, 4), (2, 3), (3, 4), (3, 5)] + >>> G.add_edges_from(edges) + >>> delta = timedelta(days=5 * 365) + >>> nx.cd_index(G, 3, time_delta=delta, time="time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=delta, time="time", weight="weight") + 0.12 + + Integers can also be used for the time values: + >>> node_times = {1: 2015, 2: 2012, 3: 2010, 4: 2008, 5: 2014} + >>> nx.set_node_attributes(G, node_times, "new_time") + >>> nx.cd_index(G, 3, time_delta=4, time="new_time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=4, time="new_time", weight="weight") + 0.12 + + Notes + ----- + This method implements the algorithm for calculating the CD index, + as described in the paper by Funk and Owen-Smith [1]_. The CD index + is used in order to check how consolidating or destabilizing a patent + is, hence the nodes of the graph represent patents and the edges show + the citations between these patents. The mathematical model is given + below: + + .. math:: + CD_{t}=\frac{1}{n_{t}}\sum_{i=1}^{n}\frac{-2f_{it}b_{it}+f_{it}}{w_{it}}, + + where `f_{it}` equals 1 if `i` cites the focal patent else 0, `b_{it}` equals + 1 if `i` cites any of the focal patents successors else 0, `n_{t}` is the number + of forward citations in `i` and `w_{it}` is a matrix of weight for patent `i` + at time `t`. + + The `datetime.timedelta` package can lead to off-by-one issues when converting + from years to days. In the example above `timedelta(days=5 * 365)` looks like + 5 years, but it isn't because of leap year days. So it gives the same result + as `timedelta(days=4 * 365)`. But using `timedelta(days=5 * 365 + 1)` gives + a 5 year delta **for this choice of years** but may not if the 5 year gap has + more than 1 leap year. To avoid these issues, use integers to represent years, + or be very careful when you convert units of time. + + References + ---------- + .. [1] Funk, Russell J., and Jason Owen-Smith. + "A dynamic network measure of technological change." + Management science 63, no. 3 (2017): 791-817. + http://russellfunk.org/cdindex/static/papers/funk_ms_2017.pdf + + """ + if not all(time in G.nodes[n] for n in G): + raise nx.NetworkXError("Not all nodes have a 'time' attribute.") + + try: + # get target_date + target_date = G.nodes[node][time] + time_delta + # keep the predecessors that existed before the target date + pred = {i for i in G.pred[node] if G.nodes[i][time] <= target_date} + except: + raise nx.NetworkXError( + "Addition and comparison are not supported between 'time_delta' " + "and 'time' types." + ) + + # -1 if any edge between node's predecessors and node's successors, else 1 + b = [-1 if any(j in G[i] for j in G[node]) else 1 for i in pred] + + # n is size of the union of the focal node's predecessors and its successors' predecessors + n = len(pred.union(*(G.pred[s].keys() - {node} for s in G[node]))) + if n == 0: + raise nx.NetworkXError("The cd index cannot be defined.") + + # calculate cd index + if weight is None: + return round(sum(bi for bi in b) / n, 2) + else: + # If a node has the specified weight attribute, its weight is used in the calculation + # otherwise, a weight of 1 is assumed for that node + weights = [G.nodes[i].get(weight, 1) for i in pred] + return round(sum(bi / wt for bi, wt in zip(b, weights)) / n, 2) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/tournament.py b/phivenv/Lib/site-packages/networkx/algorithms/tournament.py new file mode 100644 index 0000000000000000000000000000000000000000..0b164cb3b16cb74a77a15225e5ea8b1d8e8fdad2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/tournament.py @@ -0,0 +1,406 @@ +"""Functions concerning tournament graphs. + +A `tournament graph`_ is a complete oriented graph. In other words, it +is a directed graph in which there is exactly one directed edge joining +each pair of distinct nodes. For each function in this module that +accepts a graph as input, you must provide a tournament graph. The +responsibility is on the caller to ensure that the graph is a tournament +graph: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + +To access the functions in this module, you must access them through the +:mod:`networkx.tournament` module:: + + >>> nx.tournament.is_reachable(G, 0, 1) + True + +.. _tournament graph: https://en.wikipedia.org/wiki/Tournament_%28graph_theory%29 + +""" +from itertools import combinations + +import networkx as nx +from networkx.algorithms.simple_paths import is_simple_path as is_path +from networkx.utils import arbitrary_element, not_implemented_for, py_random_state + +__all__ = [ + "hamiltonian_path", + "is_reachable", + "is_strongly_connected", + "is_tournament", + "random_tournament", + "score_sequence", +] + + +def index_satisfying(iterable, condition): + """Returns the index of the first element in `iterable` that + satisfies the given condition. + + If no such element is found (that is, when the iterable is + exhausted), this returns the length of the iterable (that is, one + greater than the last index of the iterable). + + `iterable` must not be empty. If `iterable` is empty, this + function raises :exc:`ValueError`. + + """ + # Pre-condition: iterable must not be empty. + for i, x in enumerate(iterable): + if condition(x): + return i + # If we reach the end of the iterable without finding an element + # that satisfies the condition, return the length of the iterable, + # which is one greater than the index of its last element. If the + # iterable was empty, `i` will not be defined, so we raise an + # exception. + try: + return i + 1 + except NameError as err: + raise ValueError("iterable must be non-empty") from err + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def is_tournament(G): + """Returns True if and only if `G` is a tournament. + + A tournament is a directed graph, with neither self-loops nor + multi-edges, in which there is exactly one directed edge joining + each pair of distinct nodes. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the given graph is a tournament graph. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + + Notes + ----- + Some definitions require a self-loop on each node, but that is not + the convention used here. + + """ + # In a tournament, there is exactly one directed edge joining each pair. + return ( + all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2)) + and nx.number_of_selfloops(G) == 0 + ) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def hamiltonian_path(G): + """Returns a Hamiltonian path in the given tournament graph. + + Each tournament has a Hamiltonian path. If furthermore, the + tournament is strongly connected, then the returned Hamiltonian path + is a Hamiltonian cycle (by joining the endpoints of the path). + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + path : list + A list of nodes which form a Hamiltonian path in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.hamiltonian_path(G) + [0, 1, 2, 3] + + Notes + ----- + This is a recursive implementation with an asymptotic running time + of $O(n^2)$, ignoring multiplicative polylogarithmic factors, where + $n$ is the number of nodes in the graph. + + """ + if len(G) == 0: + return [] + if len(G) == 1: + return [arbitrary_element(G)] + v = arbitrary_element(G) + hampath = hamiltonian_path(G.subgraph(set(G) - {v})) + # Get the index of the first node in the path that does *not* have + # an edge to `v`, then insert `v` before that node. + index = index_satisfying(hampath, lambda u: v not in G[u]) + hampath.insert(index, v) + return hampath + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_tournament(n, seed=None): + r"""Returns a random tournament graph on `n` nodes. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + A tournament on `n` nodes, with exactly one directed edge joining + each pair of distinct nodes. + + Notes + ----- + This algorithm adds, for each pair of distinct nodes, an edge with + uniformly random orientation. In other words, `\binom{n}{2}` flips + of an unbiased coin decide the orientations of the edges in the + graph. + + """ + # Flip an unbiased coin for each pair of distinct nodes. + coins = (seed.random() for i in range((n * (n - 1)) // 2)) + pairs = combinations(range(n), 2) + edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins)) + return nx.DiGraph(edges) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def score_sequence(G): + """Returns the score sequence for the given tournament graph. + + The score sequence is the sorted list of the out-degrees of the + nodes of the graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + list + A sorted list of the out-degrees of the nodes of `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (0, 2), (0, 3), (2, 1), (3, 2)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.score_sequence(G) + [1, 1, 2, 2] + + """ + return sorted(d for v, d in G.out_degree()) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def tournament_matrix(G): + r"""Returns the tournament matrix for the given tournament graph. + + This function requires SciPy. + + The *tournament matrix* of a tournament graph with edge set *E* is + the matrix *T* defined by + + .. math:: + + T_{i j} = + \begin{cases} + +1 & \text{if } (i, j) \in E \\ + -1 & \text{if } (j, i) \in E \\ + 0 & \text{if } i == j. + \end{cases} + + An equivalent definition is `T = A - A^T`, where *A* is the + adjacency matrix of the graph `G`. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + SciPy sparse array + The tournament matrix of the tournament graph `G`. + + Raises + ------ + ImportError + If SciPy is not available. + + """ + A = nx.adjacency_matrix(G) + return A - A.T + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def is_reachable(G, s, t): + """Decides whether there is a path from `s` to `t` in the + tournament. + + This function is more theoretically efficient than the reachability + checks than the shortest path algorithms in + :mod:`networkx.algorithms.shortest_paths`. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + s : node + A node in the graph. + + t : node + A node in the graph. + + Returns + ------- + bool + Whether there is a path from `s` to `t` in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_reachable(G, 1, 3) + True + >>> nx.tournament.is_reachable(G, 3, 2) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic shortest path functions, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + """ + + def two_neighborhood(G, v): + """Returns the set of nodes at distance at most two from `v`. + + `G` must be a graph and `v` a node in that graph. + + The returned set includes the nodes at distance zero (that is, + the node `v` itself), the nodes at distance one (that is, the + out-neighbors of `v`), and the nodes at distance two. + + """ + # TODO This is trivially parallelizable. + return { + x for x in G if x == v or x in G[v] or any(is_path(G, [v, z, x]) for z in G) + } + + def is_closed(G, nodes): + """Decides whether the given set of nodes is closed. + + A set *S* of nodes is *closed* if for each node *u* in the graph + not in *S* and for each node *v* in *S*, there is an edge from + *u* to *v*. + + """ + # TODO This is trivially parallelizable. + return all(v in G[u] for u in set(G) - nodes for v in nodes) + + # TODO This is trivially parallelizable. + neighborhoods = [two_neighborhood(G, v) for v in G] + return all(not (is_closed(G, S) and s in S and t not in S) for S in neighborhoods) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(name="tournament_is_strongly_connected") +def is_strongly_connected(G): + """Decides whether the given tournament is strongly connected. + + This function is more theoretically efficient than the + :func:`~networkx.algorithms.components.is_strongly_connected` + function. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the tournament is strongly connected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + True + >>> G.remove_edge(3, 0) + >>> G.add_edge(0, 3) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic strong connectivity function, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + + """ + # TODO This is trivially parallelizable. + return all(is_reachable(G, u, v) for u in G for v in G) diff --git a/phivenv/Lib/site-packages/networkx/algorithms/triads.py b/phivenv/Lib/site-packages/networkx/algorithms/triads.py new file mode 100644 index 0000000000000000000000000000000000000000..0041b83d82ada9ddd5c79428168a571cbcc13fd4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/triads.py @@ -0,0 +1,565 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions for analyzing triads of a graph.""" + +from collections import defaultdict +from itertools import combinations, permutations + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = [ + "triadic_census", + "is_triad", + "all_triplets", + "all_triads", + "triads_by_type", + "triad_type", + "random_triad", +] + +#: The integer codes representing each type of triad. +#: +#: Triads that are the same up to symmetry have the same code. +TRICODES = ( + 1, + 2, + 2, + 3, + 2, + 4, + 6, + 8, + 2, + 6, + 5, + 7, + 3, + 8, + 7, + 11, + 2, + 6, + 4, + 8, + 5, + 9, + 9, + 13, + 6, + 10, + 9, + 14, + 7, + 14, + 12, + 15, + 2, + 5, + 6, + 7, + 6, + 9, + 10, + 14, + 4, + 9, + 9, + 12, + 8, + 13, + 14, + 15, + 3, + 7, + 8, + 11, + 7, + 12, + 14, + 15, + 8, + 14, + 13, + 15, + 11, + 15, + 15, + 16, +) + +#: The names of each type of triad. The order of the elements is +#: important: it corresponds to the tricodes given in :data:`TRICODES`. +TRIAD_NAMES = ( + "003", + "012", + "102", + "021D", + "021U", + "021C", + "111D", + "111U", + "030T", + "030C", + "201", + "120D", + "120U", + "120C", + "210", + "300", +) + + +#: A dictionary mapping triad code to triad name. +TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)} + + +def _tricode(G, v, u, w): + """Returns the integer code of the given triad. + + This is some fancy magic that comes from Batagelj and Mrvar's paper. It + treats each edge joining a pair of `v`, `u`, and `w` as a bit in + the binary representation of an integer. + + """ + combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32)) + return sum(x for u, v, x in combos if v in G[u]) + + +@not_implemented_for("undirected") +@nx._dispatch +def triadic_census(G, nodelist=None): + """Determines the triadic census of a directed graph. + + The triadic census is a count of how many of the 16 possible types of + triads are present in a directed graph. If a list of nodes is passed, then + only those triads are taken into account which have elements of nodelist in them. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + nodelist : list + List of nodes for which you want to calculate triadic census + + Returns + ------- + census : dict + Dictionary with triad type as keys and number of occurrences as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> triadic_census = nx.triadic_census(G) + >>> for key, value in triadic_census.items(): + ... print(f"{key}: {value}") + ... + 003: 0 + 012: 0 + 102: 0 + 021D: 0 + 021U: 0 + 021C: 0 + 111D: 0 + 111U: 0 + 030T: 2 + 030C: 2 + 201: 0 + 120D: 0 + 120U: 0 + 120C: 0 + 210: 0 + 300: 0 + + Notes + ----- + This algorithm has complexity $O(m)$ where $m$ is the number of edges in + the graph. + + Raises + ------ + ValueError + If `nodelist` contains duplicate nodes or nodes not in `G`. + If you want to ignore this you can preprocess with `set(nodelist) & G.nodes` + + See also + -------- + triad_graph + + References + ---------- + .. [1] Vladimir Batagelj and Andrej Mrvar, A subquadratic triad census + algorithm for large sparse networks with small maximum degree, + University of Ljubljana, + http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf + + """ + nodeset = set(G.nbunch_iter(nodelist)) + if nodelist is not None and len(nodelist) != len(nodeset): + raise ValueError("nodelist includes duplicate nodes or nodes not in G") + + N = len(G) + Nnot = N - len(nodeset) # can signal special counting for subset of nodes + + # create an ordering of nodes with nodeset nodes first + m = {n: i for i, n in enumerate(nodeset)} + if Nnot: + # add non-nodeset nodes later in the ordering + not_nodeset = G.nodes - nodeset + m.update((n, i + N) for i, n in enumerate(not_nodeset)) + + # build all_neighbor dicts for easy counting + # After Python 3.8 can leave off these keys(). Speedup also using G._pred + # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G} + nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G} + dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G} + + if Nnot: + sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset} + # find number of edges not incident to nodes in nodeset + sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset) + sgl_edges_outside = sgl // 2 + dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset) + dbl_edges_outside = dbl // 2 + + # Initialize the count for each triad to be zero. + census = {name: 0 for name in TRIAD_NAMES} + # Main loop over nodes + for v in nodeset: + vnbrs = nbrs[v] + dbl_vnbrs = dbl_nbrs[v] + if Nnot: + # set up counts of edges attached to v. + sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0 + for u in vnbrs: + if m[u] <= m[v]: + continue + unbrs = nbrs[u] + neighbors = (vnbrs | unbrs) - {u, v} + # Count connected triads. + for w in neighbors: + if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]): + code = _tricode(G, v, u, w) + census[TRICODE_TO_NAME[code]] += 1 + + # Use a formula for dyadic triads with edge incident to v + if u in dbl_vnbrs: + census["102"] += N - len(neighbors) - 2 + else: + census["012"] += N - len(neighbors) - 2 + + # Count edges attached to v. Subtract later to get triads with v isolated + # _out are (u,unbr) for unbrs outside boundary of nodeset + # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted) + if Nnot and u not in nodeset: + sgl_unbrs = sgl_nbrs[u] + sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset) + sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset) + dbl_unbrs = dbl_nbrs[u] + dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset) + dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset) + # if nodeset == G.nodes, skip this b/c we will find the edge later. + if Nnot: + # Count edges outside nodeset not connected with v (v isolated triads) + census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2) + census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2) + + # calculate null triads: "003" + # null triads = total number of possible triads - all found triads + total_triangles = (N * (N - 1) * (N - 2)) // 6 + triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6 + total_census = total_triangles - triangles_without_nodeset + census["003"] = total_census - sum(census.values()) + + return census + + +@nx._dispatch +def is_triad(G): + """Returns True if the graph G is a triad, else False. + + Parameters + ---------- + G : graph + A NetworkX Graph + + Returns + ------- + istriad : boolean + Whether G is a valid triad + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_triad(G) + True + >>> G.add_edge(0, 1) + >>> nx.is_triad(G) + False + """ + if isinstance(G, nx.Graph): + if G.order() == 3 and nx.is_directed(G): + if not any((n, n) in G.edges() for n in G.nodes()): + return True + return False + + +@not_implemented_for("undirected") +@nx._dispatch +def all_triplets(G): + """Returns a generator of all possible sets of 3 nodes in a DiGraph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + triplets : generator of 3-tuples + Generator of tuples of 3 nodes + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + >>> list(nx.all_triplets(G)) + [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)] + + """ + triplets = combinations(G.nodes(), 3) + return triplets + + +@not_implemented_for("undirected") +@nx._dispatch +def all_triads(G): + """A generator of all possible triads in G. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + all_triads : generator of DiGraphs + Generator of triads (order-3 DiGraphs) + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> for triad in nx.all_triads(G): + ... print(triad.edges) + [(1, 2), (2, 3), (3, 1)] + [(1, 2), (4, 1), (4, 2)] + [(3, 1), (3, 4), (4, 1)] + [(2, 3), (3, 4), (4, 2)] + + """ + triplets = combinations(G.nodes(), 3) + for triplet in triplets: + yield G.subgraph(triplet).copy() + + +@not_implemented_for("undirected") +@nx._dispatch +def triads_by_type(G): + """Returns a list of all triads for each triad type in a directed graph. + There are exactly 16 different types of triads possible. Suppose 1, 2, 3 are three + nodes, they will be classified as a particular triad type if their connections + are as follows: + + - 003: 1, 2, 3 + - 012: 1 -> 2, 3 + - 102: 1 <-> 2, 3 + - 021D: 1 <- 2 -> 3 + - 021U: 1 -> 2 <- 3 + - 021C: 1 -> 2 -> 3 + - 111D: 1 <-> 2 <- 3 + - 111U: 1 <-> 2 -> 3 + - 030T: 1 -> 2 -> 3, 1 -> 3 + - 030C: 1 <- 2 <- 3, 1 -> 3 + - 201: 1 <-> 2 <-> 3 + - 120D: 1 <- 2 -> 3, 1 <-> 3 + - 120U: 1 -> 2 <- 3, 1 <-> 3 + - 120C: 1 -> 2 -> 3, 1 <-> 3 + - 210: 1 -> 2 <-> 3, 1 <-> 3 + - 300: 1 <-> 2 <-> 3, 1 <-> 3 + + Refer to the :doc:`example gallery ` + for visual examples of the triad types. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + tri_by_type : dict + Dictionary with triad types as keys and lists of triads as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)]) + >>> dict = nx.triads_by_type(G) + >>> dict['120C'][0].edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (3, 1)]) + >>> dict['012'][0].edges() + OutEdgeView([(1, 2)]) + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + # num_triads = o * (o - 1) * (o - 2) // 6 + # if num_triads > TRIAD_LIMIT: print(WARNING) + all_tri = all_triads(G) + tri_by_type = defaultdict(list) + for triad in all_tri: + name = triad_type(triad) + tri_by_type[name].append(triad) + return tri_by_type + + +@not_implemented_for("undirected") +@nx._dispatch +def triad_type(G): + """Returns the sociological triad type for a triad. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph with 3 nodes + + Returns + ------- + triad_type : str + A string identifying the triad type + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.triad_type(G) + '030C' + >>> G.add_edge(1, 3) + >>> nx.triad_type(G) + '120C' + + Notes + ----- + There can be 6 unique edges in a triad (order-3 DiGraph) (so 2^^6=64 unique + triads given 3 nodes). These 64 triads each display exactly 1 of 16 + topologies of triads (topologies can be permuted). These topologies are + identified by the following notation: + + {m}{a}{n}{type} (for example: 111D, 210, 102) + + Here: + + {m} = number of mutual ties (takes 0, 1, 2, 3); a mutual tie is (0,1) + AND (1,0) + {a} = number of asymmetric ties (takes 0, 1, 2, 3); an asymmetric tie + is (0,1) BUT NOT (1,0) or vice versa + {n} = number of null ties (takes 0, 1, 2, 3); a null tie is NEITHER + (0,1) NOR (1,0) + {type} = a letter (takes U, D, C, T) corresponding to up, down, cyclical + and transitive. This is only used for topologies that can have + more than one form (eg: 021D and 021U). + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + if not is_triad(G): + raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)") + num_edges = len(G.edges()) + if num_edges == 0: + return "003" + elif num_edges == 1: + return "012" + elif num_edges == 2: + e1, e2 = G.edges() + if set(e1) == set(e2): + return "102" + elif e1[0] == e2[0]: + return "021D" + elif e1[1] == e2[1]: + return "021U" + elif e1[1] == e2[0] or e2[1] == e1[0]: + return "021C" + elif num_edges == 3: + for e1, e2, e3 in permutations(G.edges(), 3): + if set(e1) == set(e2): + if e3[0] in e1: + return "111U" + # e3[1] in e1: + return "111D" + elif set(e1).symmetric_difference(set(e2)) == set(e3): + if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()): + return "030C" + # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]): + return "030T" + elif num_edges == 4: + for e1, e2, e3, e4 in permutations(G.edges(), 4): + if set(e1) == set(e2): + # identify pair of symmetric edges (which necessarily exists) + if set(e3) == set(e4): + return "201" + if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)): + return "120D" + if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)): + return "120U" + if e3[1] == e4[0]: + return "120C" + elif num_edges == 5: + return "210" + elif num_edges == 6: + return "300" + + +@not_implemented_for("undirected") +@py_random_state(1) +@nx._dispatch +def random_triad(G, seed=None): + """Returns a random triad from a directed graph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G2 : subgraph + A randomly selected triad (order-3 NetworkX DiGraph) + + Raises + ------ + NetworkXError + If the input Graph has less than 3 nodes. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)]) + >>> triad = nx.random_triad(G, seed=1) + >>> triad.edges + OutEdgeView([(1, 2)]) + + """ + if len(G) < 3: + raise nx.NetworkXError( + f"G needs at least 3 nodes to form a triad; (it has {len(G)} nodes)" + ) + nodes = seed.sample(list(G.nodes()), 3) + G2 = G.subgraph(nodes) + return G2 diff --git a/phivenv/Lib/site-packages/networkx/algorithms/vitality.py b/phivenv/Lib/site-packages/networkx/algorithms/vitality.py new file mode 100644 index 0000000000000000000000000000000000000000..c41efd13f2cf40758f374fcf6ee1de18302d83a2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/vitality.py @@ -0,0 +1,76 @@ +""" +Vitality measures. +""" +from functools import partial + +import networkx as nx + +__all__ = ["closeness_vitality"] + + +@nx._dispatch(edge_attrs="weight") +def closeness_vitality(G, node=None, weight=None, wiener_index=None): + """Returns the closeness vitality for nodes in the graph. + + The *closeness vitality* of a node, defined in Section 3.6.2 of [1], + is the change in the sum of distances between all node pairs when + excluding that node. + + Parameters + ---------- + G : NetworkX graph + A strongly-connected graph. + + weight : string + The name of the edge attribute used as weight. This is passed + directly to the :func:`~networkx.wiener_index` function. + + node : object + If specified, only the closeness vitality for this node will be + returned. Otherwise, a dictionary mapping each node to its + closeness vitality will be returned. + + Other parameters + ---------------- + wiener_index : number + If you have already computed the Wiener index of the graph + `G`, you can provide that value here. Otherwise, it will be + computed for you. + + Returns + ------- + dictionary or float + If `node` is None, this function returns a dictionary + with nodes as keys and closeness vitality as the + value. Otherwise, it returns only the closeness vitality for the + specified `node`. + + The closeness vitality of a node may be negative infinity if + removing that node would disconnect the graph. + + Examples + -------- + >>> G = nx.cycle_graph(3) + >>> nx.closeness_vitality(G) + {0: 2.0, 1: 2.0, 2: 2.0} + + See Also + -------- + closeness_centrality + + References + ---------- + .. [1] Ulrik Brandes, Thomas Erlebach (eds.). + *Network Analysis: Methodological Foundations*. + Springer, 2005. + + + """ + if wiener_index is None: + wiener_index = nx.wiener_index(G, weight=weight) + if node is not None: + after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight) + return wiener_index - after + vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index) + # TODO This can be trivially parallelized. + return {v: vitality(node=v) for v in G} diff --git a/phivenv/Lib/site-packages/networkx/algorithms/voronoi.py b/phivenv/Lib/site-packages/networkx/algorithms/voronoi.py new file mode 100644 index 0000000000000000000000000000000000000000..af17f013ec89a0c59f01cc6753cacaef47eb6d97 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/voronoi.py @@ -0,0 +1,85 @@ +"""Functions for computing the Voronoi cells of a graph.""" +import networkx as nx +from networkx.utils import groups + +__all__ = ["voronoi_cells"] + + +@nx._dispatch(edge_attrs="weight") +def voronoi_cells(G, center_nodes, weight="weight"): + """Returns the Voronoi cells centered at `center_nodes` with respect + to the shortest-path distance metric. + + If $C$ is a set of nodes in the graph and $c$ is an element of $C$, + the *Voronoi cell* centered at a node $c$ is the set of all nodes + $v$ that are closer to $c$ than to any other center node in $C$ with + respect to the shortest-path distance metric. [1]_ + + For directed graphs, this will compute the "outward" Voronoi cells, + as defined in [1]_, in which distance is measured from the center + nodes to the target node. For the "inward" Voronoi cells, use the + :meth:`DiGraph.reverse` method to reverse the orientation of the + edges before invoking this function on the directed graph. + + Parameters + ---------- + G : NetworkX graph + + center_nodes : set + A nonempty set of nodes in the graph `G` that represent the + center of the Voronoi cells. + + weight : string or function + The edge attribute (or an arbitrary function) representing the + weight of an edge. This keyword argument is as described in the + documentation for :func:`~networkx.multi_source_dijkstra_path`, + for example. + + Returns + ------- + dictionary + A mapping from center node to set of all nodes in the graph + closer to that center node than to any other center node. The + keys of the dictionary are the element of `center_nodes`, and + the values of the dictionary form a partition of the nodes of + `G`. + + Examples + -------- + To get only the partition of the graph induced by the Voronoi cells, + take the collection of all values in the returned dictionary:: + + >>> G = nx.path_graph(6) + >>> center_nodes = {0, 3} + >>> cells = nx.voronoi_cells(G, center_nodes) + >>> partition = set(map(frozenset, cells.values())) + >>> sorted(map(sorted, partition)) + [[0, 1], [2, 3, 4, 5]] + + Raises + ------ + ValueError + If `center_nodes` is empty. + + References + ---------- + .. [1] Erwig, Martin. (2000),"The graph Voronoi diagram with applications." + *Networks*, 36: 156--163. + https://doi.org/10.1002/1097-0037(200010)36:3<156::AID-NET2>3.0.CO;2-L + + """ + # Determine the shortest paths from any one of the center nodes to + # every node in the graph. + # + # This raises `ValueError` if `center_nodes` is an empty set. + paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight) + # Determine the center node from which the shortest path originates. + nearest = {v: p[0] for v, p in paths.items()} + # Get the mapping from center node to all nodes closer to it than to + # any other center node. + cells = groups(nearest) + # We collect all unreachable nodes under a special key, if there are any. + unreachable = set(G) - set(nearest) + if unreachable: + cells["unreachable"] = unreachable + return cells diff --git a/phivenv/Lib/site-packages/networkx/algorithms/walks.py b/phivenv/Lib/site-packages/networkx/algorithms/walks.py new file mode 100644 index 0000000000000000000000000000000000000000..6f357ce1d42c81939f10a14a4fadd5139f6aab9b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/walks.py @@ -0,0 +1,80 @@ +"""Function for computing walks in a graph. +""" + +import networkx as nx + +__all__ = ["number_of_walks"] + + +@nx._dispatch +def number_of_walks(G, walk_length): + """Returns the number of walks connecting each pair of nodes in `G` + + A *walk* is a sequence of nodes in which each adjacent pair of nodes + in the sequence is adjacent in the graph. A walk can repeat the same + edge and go in the opposite direction just as people can walk on a + set of paths, but standing still is not counted as part of the walk. + + This function only counts the walks with `walk_length` edges. Note that + the number of nodes in the walk sequence is one more than `walk_length`. + The number of walks can grow very quickly on a larger graph + and with a larger walk length. + + Parameters + ---------- + G : NetworkX graph + + walk_length : int + A nonnegative integer representing the length of a walk. + + Returns + ------- + dict + A dictionary of dictionaries in which outer keys are source + nodes, inner keys are target nodes, and inner values are the + number of walks of length `walk_length` connecting those nodes. + + Raises + ------ + ValueError + If `walk_length` is negative + + Examples + -------- + + >>> G = nx.Graph([(0, 1), (1, 2)]) + >>> walks = nx.number_of_walks(G, 2) + >>> walks + {0: {0: 1, 1: 0, 2: 1}, 1: {0: 0, 1: 2, 2: 0}, 2: {0: 1, 1: 0, 2: 1}} + >>> total_walks = sum(sum(tgts.values()) for _, tgts in walks.items()) + + You can also get the number of walks from a specific source node using the + returned dictionary. For example, number of walks of length 1 from node 0 + can be found as follows: + + >>> walks = nx.number_of_walks(G, 1) + >>> walks[0] + {0: 0, 1: 1, 2: 0} + >>> sum(walks[0].values()) # walks from 0 of length 1 + 1 + + Similarly, a target node can also be specified: + + >>> walks[0][1] + 1 + + """ + import numpy as np + + if walk_length < 0: + raise ValueError(f"`walk_length` cannot be negative: {walk_length}") + + A = nx.adjacency_matrix(G, weight=None) + # TODO: Use matrix_power from scipy.sparse when available + # power = sp.sparse.linalg.matrix_power(A, walk_length) + power = np.linalg.matrix_power(A.toarray(), walk_length) + result = { + u: {v: power[u_idx, v_idx] for v_idx, v in enumerate(G)} + for u_idx, u in enumerate(G) + } + return result diff --git a/phivenv/Lib/site-packages/networkx/algorithms/wiener.py b/phivenv/Lib/site-packages/networkx/algorithms/wiener.py new file mode 100644 index 0000000000000000000000000000000000000000..9e81cdc72ca61870c3eb818c6d2bb82315694f37 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/algorithms/wiener.py @@ -0,0 +1,79 @@ +"""Functions related to the Wiener index of a graph.""" + +from itertools import chain + +import networkx as nx + +from .components import is_connected, is_strongly_connected +from .shortest_paths import shortest_path_length as spl + +__all__ = ["wiener_index"] + +#: Rename the :func:`chain.from_iterable` function for the sake of +#: brevity. +chaini = chain.from_iterable + + +@nx._dispatch(edge_attrs="weight") +def wiener_index(G, weight=None): + """Returns the Wiener index of the given graph. + + The *Wiener index* of a graph is the sum of the shortest-path + distances between each pair of reachable nodes. For pairs of nodes + in undirected graphs, only one orientation of the pair is counted. + + Parameters + ---------- + G : NetworkX graph + + weight : object + The edge attribute to use as distance when computing + shortest-path distances. This is passed directly to the + :func:`networkx.shortest_path_length` function. + + Returns + ------- + float + The Wiener index of the graph `G`. + + Raises + ------ + NetworkXError + If the graph `G` is not connected. + + Notes + ----- + If a pair of nodes is not reachable, the distance is assumed to be + infinity. This means that for graphs that are not + strongly-connected, this function returns ``inf``. + + The Wiener index is not usually defined for directed graphs, however + this function uses the natural generalization of the Wiener index to + directed graphs. + + Examples + -------- + The Wiener index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes, since each pair of + nodes is at distance one:: + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.wiener_index(G) == n * (n - 1) / 2 + True + + Graphs that are not strongly-connected have infinite Wiener index:: + + >>> G = nx.empty_graph(2) + >>> nx.wiener_index(G) + inf + + """ + is_directed = G.is_directed() + if (is_directed and not is_strongly_connected(G)) or ( + not is_directed and not is_connected(G) + ): + return float("inf") + total = sum(chaini(p.values() for v, p in spl(G, weight=weight))) + # Need to account for double counting pairs of nodes in undirected graphs. + return total if is_directed else total / 2 diff --git a/phivenv/Lib/site-packages/networkx/conftest.py b/phivenv/Lib/site-packages/networkx/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..0218891ff33e8168050c689ccfcb443b57c07c46 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/conftest.py @@ -0,0 +1,265 @@ +""" +Testing +======= + +General guidelines for writing good tests: + +- doctests always assume ``import networkx as nx`` so don't add that +- prefer pytest fixtures over classes with setup methods. +- use the ``@pytest.mark.parametrize`` decorator +- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy. + and add the module to the relevant entries below. + +""" +import os +import sys +import warnings +from importlib.metadata import entry_points + +import pytest + +import networkx + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + parser.addoption( + "--backend", + action="store", + default=None, + help="Run tests with a backend by auto-converting nx graphs to backend graphs", + ) + parser.addoption( + "--fallback-to-nx", + action="store_true", + default=False, + help="Run nx function if a backend doesn't implement a dispatchable function" + " (use with --backend)", + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + backend = config.getoption("--backend") + if backend is None: + backend = os.environ.get("NETWORKX_TEST_BACKEND") + if backend: + networkx.utils.backends._dispatch._automatic_backends = [backend] + fallback_to_nx = config.getoption("--fallback-to-nx") + if not fallback_to_nx: + fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX") + networkx.utils.backends._dispatch._fallback_to_nx = bool(fallback_to_nx) + # nx-loopback backend is only available when testing + if sys.version_info < (3, 10): + backends = ( + ep for ep in entry_points()["networkx.backends"] if ep.name == "nx-loopback" + ) + else: + backends = entry_points(name="nx-loopback", group="networkx.backends") + if backends: + networkx.utils.backends.backends["nx-loopback"] = next(iter(backends)) + else: + warnings.warn( + "\n\n WARNING: Mixed NetworkX configuration! \n\n" + " This environment has mixed configuration for networkx.\n" + " The test object nx-loopback is not configured correctly.\n" + " You should not be seeing this message.\n" + " Try `pip install -e .`, or change your PYTHONPATH\n" + " Make sure python finds the networkx repo you are testing\n\n" + ) + + +def pytest_collection_modifyitems(config, items): + # Setting this to True here allows tests to be set up before dispatching + # any function call to a backend. + networkx.utils.backends._dispatch._is_testing = True + if automatic_backends := networkx.utils.backends._dispatch._automatic_backends: + # Allow pluggable backends to add markers to tests (such as skip or xfail) + # when running in auto-conversion test mode + backend = networkx.utils.backends.backends[automatic_backends[0]].load() + if hasattr(backend, "on_start_tests"): + getattr(backend, "on_start_tests")(items) + + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +# TODO: The warnings below need to be dealt with, but for now we silence them. +@pytest.fixture(autouse=True) +def set_warnings(): + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="nx.nx_pydot" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="single_target_shortest_path_length will", + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="shortest_path for all_pairs", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nforest_str is deprecated" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nrandom_tree" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="Edmonds has been deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="MultiDiGraph_EdgeKey has been deprecated", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nThe `normalized`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="function `join` is deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="\n\nstrongly_connected_components_recursive", + ) + + +@pytest.fixture(autouse=True) +def add_nx(doctest_namespace): + doctest_namespace["nx"] = networkx + # TODO: remove the try-except block when we require numpy >= 2 + try: + import numpy as np + + np.set_printoptions(legacy="1.21") + except ImportError: + pass + + +# What dependencies are installed? + +try: + import numpy + + has_numpy = True +except ImportError: + has_numpy = False + +try: + import scipy + + has_scipy = True +except ImportError: + has_scipy = False + +try: + import matplotlib + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + import pandas + + has_pandas = True +except ImportError: + has_pandas = False + +try: + import pygraphviz + + has_pygraphviz = True +except ImportError: + has_pygraphviz = False + +try: + import pydot + + has_pydot = True +except ImportError: + has_pydot = False + +try: + import sympy + + has_sympy = True +except ImportError: + has_sympy = False + + +# List of files that pytest should ignore + +collect_ignore = [] + +needs_numpy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/centrality/current_flow_closeness.py", + "algorithms/node_classification.py", + "algorithms/non_randomness.py", + "algorithms/shortest_paths/dense.py", + "linalg/bethehessianmatrix.py", + "linalg/laplacianmatrix.py", + "utils/misc.py", + "algorithms/centrality/laplacian.py", +] +needs_scipy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/assortativity/correlation.py", + "algorithms/assortativity/mixing.py", + "algorithms/assortativity/pairs.py", + "algorithms/bipartite/matrix.py", + "algorithms/bipartite/spectral.py", + "algorithms/centrality/current_flow_betweenness.py", + "algorithms/centrality/current_flow_betweenness_subset.py", + "algorithms/centrality/eigenvector.py", + "algorithms/centrality/katz.py", + "algorithms/centrality/second_order.py", + "algorithms/centrality/subgraph_alg.py", + "algorithms/communicability_alg.py", + "algorithms/link_analysis/hits_alg.py", + "algorithms/link_analysis/pagerank_alg.py", + "algorithms/node_classification.py", + "algorithms/similarity.py", + "convert_matrix.py", + "drawing/layout.py", + "generators/spectral_graph_forge.py", + "linalg/algebraicconnectivity.py", + "linalg/attrmatrix.py", + "linalg/bethehessianmatrix.py", + "linalg/graphmatrix.py", + "linalg/modularitymatrix.py", + "linalg/spectrum.py", + "utils/rcm.py", + "algorithms/centrality/laplacian.py", +] +needs_matplotlib = ["drawing/nx_pylab.py"] +needs_pandas = ["convert_matrix.py"] +needs_pygraphviz = ["drawing/nx_agraph.py"] +needs_pydot = ["drawing/nx_pydot.py"] +needs_sympy = ["algorithms/polynomials.py"] + +if not has_numpy: + collect_ignore += needs_numpy +if not has_scipy: + collect_ignore += needs_scipy +if not has_matplotlib: + collect_ignore += needs_matplotlib +if not has_pandas: + collect_ignore += needs_pandas +if not has_pygraphviz: + collect_ignore += needs_pygraphviz +if not has_pydot: + collect_ignore += needs_pydot +if not has_sympy: + collect_ignore += needs_sympy diff --git a/phivenv/Lib/site-packages/networkx/convert.py b/phivenv/Lib/site-packages/networkx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd20710934ac01912eddb8dc4722e30a8d0277f --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/convert.py @@ -0,0 +1,496 @@ +"""Functions to convert NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the to_networkx_graph() function +which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a graph with a single edge from a dictionary of dictionaries + +>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G = nx.Graph(d) + +See Also +-------- +nx_agraph, nx_pydot +""" +import warnings +from collections.abc import Collection, Generator, Iterator + +import networkx as nx + +__all__ = [ + "to_networkx_graph", + "from_dict_of_dicts", + "to_dict_of_dicts", + "from_dict_of_lists", + "to_dict_of_lists", + "from_edgelist", + "to_edgelist", +] + + +def to_networkx_graph(data, create_using=None, multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1) + >>> G = nx.Graph(d) + + instead of the equivalent + + >>> G = nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : object to be converted + + Current known types are: + any NetworkX graph + dict-of-dicts + dict-of-lists + container (e.g. set, list, tuple) of edges + iterator (e.g. itertools.chain) that produces edges + generator of edges + Pandas DataFrame (row per edge) + 2D numpy array + scipy sparse array + pygraphviz agraph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data, "adj"): + try: + result = from_dict_of_dicts( + data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph(), + ) + # data.graph should be dict-like + result.graph.update(data.graph) + # data.nodes should be dict-like + # result.add_node_from(data.nodes.items()) possible but + # for custom node_attr_dict_factory which may be hashable + # will be unexpected behavior + for n, dd in data.nodes.items(): + result._node[n].update(dd) + return result + except Exception as err: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err + + # pygraphviz agraph + if hasattr(data, "is_strict"): + try: + return nx.nx_agraph.from_agraph(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err + + # dict of dicts/lists + if isinstance(data, dict): + try: + return from_dict_of_dicts( + data, create_using=create_using, multigraph_input=multigraph_input + ) + except Exception as err1: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err1)}: {err1}" + ) + try: + return from_dict_of_lists(data, create_using=create_using) + except Exception as err2: + raise TypeError("Input is not known type.") from err2 + + # Pandas DataFrame + try: + import pandas as pd + + if isinstance(data, pd.DataFrame): + if data.shape[0] == data.shape[1]: + try: + return nx.from_pandas_adjacency(data, create_using=create_using) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame adjacency matrix." + raise nx.NetworkXError(msg) from err + else: + try: + return nx.from_pandas_edgelist( + data, edge_attr=True, create_using=create_using + ) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame edge-list." + raise nx.NetworkXError(msg) from err + except ImportError: + warnings.warn("pandas not found, skipping conversion test.", ImportWarning) + + # numpy array + try: + import numpy as np + + if isinstance(data, np.ndarray): + try: + return nx.from_numpy_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + f"Failed to interpret array as an adjacency matrix." + ) from err + except ImportError: + warnings.warn("numpy not found, skipping conversion test.", ImportWarning) + + # scipy sparse array - any format + try: + import scipy + + if hasattr(data, "format"): + try: + return nx.from_scipy_sparse_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + "Input is not a correct scipy sparse array type." + ) from err + except ImportError: + warnings.warn("scipy not found, skipping conversion test.", ImportWarning) + + # Note: most general check - should remain last in order of execution + # Includes containers (e.g. list, set, dict, etc.), generators, and + # iterators (e.g. itertools.chain) of edges + + if isinstance(data, (Collection, Generator, Iterator)): + try: + return from_edgelist(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a valid edge list") from err + + raise nx.NetworkXError("Input is not a known data type for conversion.") + + +@nx._dispatch +def to_dict_of_lists(G, nodelist=None): + """Returns adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist = G + + d = {} + for n in nodelist: + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + + +@nx._dispatch(graphs=None) +def from_dict_of_lists(d, create_using=None): + """Returns a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> dol = {0: [1]} # single edge (0,1) + >>> G = nx.from_dict_of_lists(dol) + + or + + >>> G = nx.Graph(dol) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen = {} + for node, nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up + else: + G.add_edges_from( + ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist) + ) + return G + + +def to_dict_of_dicts(G, nodelist=None, edge_data=None): + """Returns adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + edge_data : scalar, optional + If provided, the value of the dictionary will be set to `edge_data` for + all edges. Usual values could be `1` or `True`. If `edge_data` is + `None` (the default), the edgedata in `G` is used, resulting in a + dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a + dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize + handling edge data. `edge_data` should *not* be a container. + + Returns + ------- + dod : dict + A nested dictionary representation of `G`. Note that the level of + nesting depends on the type of `G` and the value of `edge_data` + (see Examples). + + See Also + -------- + from_dict_of_dicts, to_dict_of_lists + + Notes + ----- + For a more custom approach to handling edge data, try:: + + dod = { + n: { + nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items() + } + for n, nbrdict in G.adj.items() + } + + where `custom` returns the desired edge data for each edge between `n` and + `nbr`, given existing edge data `dd`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.to_dict_of_dicts(G) + {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}} + + Edge data is preserved by default (``edge_data=None``), resulting + in dict-of-dict-of-dicts where the innermost dictionary contains the + edge data: + + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {'weight': 1.0}), + ... (1, 2, {'weight': 2.0}), + ... (2, 0, {'weight': 1.0}), + ... ] + ... ) + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}}, + 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}}, + 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}} + >>> d[1][2]['weight'] + 2.0 + + If `edge_data` is not `None`, edge data in the original graph (if any) is + replaced: + + >>> d = nx.to_dict_of_dicts(G, edge_data=1) + >>> d + {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}} + >>> d[1][2] + 1 + + This also applies to MultiGraphs: edge data is preserved by default: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, key='a', weight=1.0) + 'a' + >>> G.add_edge(0, 1, key='b', weight=5.0) + 'b' + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}, + 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}} + >>> d[0][1]['b']['weight'] + 5.0 + + But multi edge data is lost if `edge_data` is not `None`: + + >>> d = nx.to_dict_of_dicts(G, edge_data=10) + >>> d + {0: {1: 10}, 1: {0: 10}} + """ + dod = {} + if nodelist is None: + if edge_data is None: + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data + return dod + + +@nx._dispatch(graphs=None) +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): + """Returns a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + When True, the dict `d` is assumed + to be a dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + Otherwise this routine assumes dict-of-dict-of-dict keyed by + node to neighbor to edge data. + + Examples + -------- + >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1) + >>> G = nx.from_dict_of_dicts(dod) + + or + + >>> G = nx.Graph(dod) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + # does dict d represent a MultiGraph or MultiDiGraph? + if multigraph_input: + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( + (u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: + G.add_edges_from( + (u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, key, data) for key, data in datadict.items() + ) + seen.add((v, u)) + else: + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, data) for key, data in datadict.items() + ) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) + G[u][v][0].update(data) + seen.add((v, u)) + else: + G.add_edges_from( + ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items()) + ) + return G + + +@nx._dispatch(preserve_edge_attrs=True) +def to_edgelist(G, nodelist=None): + """Returns a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + return G.edges(nodelist, data=True) + + +@nx._dispatch(graphs=None) +def from_edgelist(edgelist, create_using=None): + """Returns a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> edgelist = [(0, 1)] # single edge (0,1) + >>> G = nx.from_edgelist(edgelist) + + or + + >>> G = nx.Graph(edgelist) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_edges_from(edgelist) + return G diff --git a/phivenv/Lib/site-packages/networkx/convert_matrix.py b/phivenv/Lib/site-packages/networkx/convert_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..a8496e7cb4f91c944ce5af94ff36e1637a7affd9 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/convert_matrix.py @@ -0,0 +1,1200 @@ +"""Functions to convert NetworkX graphs to and from common data containers +like numpy arrays, scipy sparse arrays, and pandas DataFrames. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph` +function which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a 10 node random graph from a numpy array + +>>> import numpy as np +>>> rng = np.random.default_rng() +>>> a = rng.integers(low=0, high=2, size=(10, 10)) +>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph) + +or equivalently: + +>>> DG = nx.DiGraph(a) + +which calls `from_numpy_array` internally based on the type of ``a``. + +See Also +-------- +nx_agraph, nx_pydot +""" + +import itertools +from collections import defaultdict + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_pandas_adjacency", + "to_pandas_adjacency", + "from_pandas_edgelist", + "to_pandas_edgelist", + "from_scipy_sparse_array", + "to_scipy_sparse_array", + "from_numpy_array", + "to_numpy_array", +] + + +@nx._dispatch(edge_attrs="weight") +def to_pandas_adjacency( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> df = nx.to_pandas_adjacency(G, dtype=int) + >>> df + 1 + 1 1 + >>> df.values[np.diag_indices_from(df)] *= 2 + >>> df + 1 + 1 2 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + + """ + import pandas as pd + + M = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + if nodelist is None: + nodelist = list(G) + return pd.DataFrame(data=M, index=nodelist, columns=nodelist) + + +@nx._dispatch(graphs=None) +def from_pandas_adjacency(df, create_using=None): + r"""Returns a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of df corresponds to an edge from i to j. + + If `df` has a single data type for each entry it will be converted to an + appropriate Python data type. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code: + + ``` + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + ``` + + If `df` has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_pandas_adjacency + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> df = pd.DataFrame([[1, 1], [2, 1]]) + >>> df + 0 1 + 0 1 1 + 1 2 1 + >>> G = nx.from_pandas_adjacency(df) + >>> G.name = "Graph from pandas adjacency matrix" + >>> print(G) + Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges + """ + + try: + df = df[df.index] + except Exception as err: + missing = list(set(df.index).difference(set(df.columns))) + msg = f"{missing} not in columns" + raise nx.NetworkXError("Columns must match Indices.", msg) from err + + A = df.values + G = from_numpy_array(A, create_using=create_using) + + nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) + return G + + +@nx._dispatch(preserve_edge_attrs=True) +def to_pandas_edgelist( + G, + source="source", + target="target", + nodelist=None, + dtype=None, + edge_key=None, +): + """Returns the graph edge list as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + source : str or int, optional + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int, optional + A valid column name (string or integer) for the target nodes (for the + directed case). + + nodelist : list, optional + Use only nodes specified in nodelist + + dtype : dtype, default None + Use to create the DataFrame. Data type to force. + Only a single dtype is allowed. If None, infer. + + edge_key : str or int or None, optional (default=None) + A valid column name (string or integer) for the edge keys (for the + multigraph case). If None, edge keys are not stored in the DataFrame. + + Returns + ------- + df : Pandas DataFrame + Graph edge list + + Examples + -------- + >>> G = nx.Graph( + ... [ + ... ("A", "B", {"cost": 1, "weight": 7}), + ... ("C", "E", {"cost": 9, "weight": 10}), + ... ] + ... ) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"]) + >>> df[["source", "target", "cost", "weight"]] + source target cost weight + 0 A B 1 7 + 1 C E 9 10 + + >>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})]) + >>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey') + >>> df[['source', 'target', 'cost', 'ekey']] + source target cost ekey + 0 A B 1 0 + 1 A B 9 1 + + """ + import pandas as pd + + if nodelist is None: + edgelist = G.edges(data=True) + else: + edgelist = G.edges(nodelist, data=True) + source_nodes = [s for s, _, _ in edgelist] + target_nodes = [t for _, t, _ in edgelist] + + all_attrs = set().union(*(d.keys() for _, _, d in edgelist)) + if source in all_attrs: + raise nx.NetworkXError(f"Source name {source!r} is an edge attr name") + if target in all_attrs: + raise nx.NetworkXError(f"Target name {target!r} is an edge attr name") + + nan = float("nan") + edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} + + if G.is_multigraph() and edge_key is not None: + if edge_key in all_attrs: + raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name") + edge_keys = [k for _, _, k in G.edges(keys=True)] + edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} + else: + edgelistdict = {source: source_nodes, target: target_nodes} + + edgelistdict.update(edge_attr) + return pd.DataFrame(edgelistdict, dtype=dtype) + + +@nx._dispatch(graphs=None) +def from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=None, + create_using=None, + edge_key=None, +): + """Returns a graph from Pandas DataFrame containing an edge list. + + The Pandas DataFrame should contain at least two columns of node names and + zero or more columns of edge attributes. Each row will be processed as one + edge instance. + + Note: This function iterates over DataFrame.values, which is not + guaranteed to retain the data type across columns in the row. This is only + a problem if your row is entirely numeric and a mix of ints and floats. In + that case, all values will be returned as floats. See the + DataFrame.iterrows documentation for an example. + + Parameters + ---------- + df : Pandas DataFrame + An edge list representation of a graph + + source : str or int + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int + A valid column name (string or integer) for the target nodes (for the + directed case). + + edge_attr : str or int, iterable, True, or None + A valid column name (str or int) or iterable of column names that are + used to retrieve items and add them to the graph as edge attributes. + If `True`, all of the remaining columns will be added. + If `None`, no edge attributes are added to the graph. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_key : str or None, optional (default=None) + A valid column name for the edge keys (for a MultiGraph). The values in + this column are used for the edge keys when adding edges if create_using + is a multigraph. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code: + + ``` + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + ``` + + See Also + -------- + to_pandas_edgelist + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> rng = np.random.RandomState(seed=5) + >>> ints = rng.randint(1, 11, size=(3, 2)) + >>> a = ["A", "B", "C"] + >>> b = ["D", "A", "E"] + >>> df = pd.DataFrame(ints, columns=["weight", "cost"]) + >>> df[0] = a + >>> df["b"] = b + >>> df[["weight", "cost", 0, "b"]] + weight cost 0 b + 0 4 7 A D + 1 7 1 B A + 2 10 9 C E + >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"]) + >>> G["E"]["C"]["weight"] + 10 + >>> G["E"]["C"]["cost"] + 9 + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2], + ... "target": [2, 2, 3], + ... "weight": [3, 4, 5], + ... "color": ["red", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist(edges, edge_attr=True) + >>> G[0][2]["color"] + 'red' + + Build multigraph with custom keys: + + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2, 0], + ... "target": [2, 2, 3, 2], + ... "my_edge_key": ["A", "B", "C", "D"], + ... "weight": [3, 4, 5, 6], + ... "color": ["red", "blue", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist( + ... edges, + ... edge_key="my_edge_key", + ... edge_attr=["weight", "color"], + ... create_using=nx.MultiGraph(), + ... ) + >>> G[0][2] + AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}}) + + + """ + g = nx.empty_graph(0, create_using) + + if edge_attr is None: + g.add_edges_from(zip(df[source], df[target])) + return g + + reserved_columns = [source, target] + + # Additional columns requested + attr_col_headings = [] + attribute_data = [] + if edge_attr is True: + attr_col_headings = [c for c in df.columns if c not in reserved_columns] + elif isinstance(edge_attr, (list, tuple)): + attr_col_headings = edge_attr + else: + attr_col_headings = [edge_attr] + if len(attr_col_headings) == 0: + raise nx.NetworkXError( + f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}" + ) + + try: + attribute_data = zip(*[df[col] for col in attr_col_headings]) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_attr argument: {edge_attr}" + raise nx.NetworkXError(msg) from err + + if g.is_multigraph(): + # => append the edge keys from the df to the bundled data + if edge_key is not None: + try: + multigraph_edge_keys = df[edge_key] + attribute_data = zip(attribute_data, multigraph_edge_keys) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_key argument: {edge_key}" + raise nx.NetworkXError(msg) from err + + for s, t, attrs in zip(df[source], df[target], attribute_data): + if edge_key is not None: + attrs, multigraph_edge_key = attrs + key = g.add_edge(s, t, key=multigraph_edge_key) + else: + key = g.add_edge(s, t) + + g[s][t][key].update(zip(attr_col_headings, attrs)) + else: + for s, t, attrs in zip(df[source], df[target], attribute_data): + g.add_edge(s, t) + g[s][t].update(zip(attr_col_headings, attrs)) + + return g + + +@nx._dispatch(edge_attrs="weight") +def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse array + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry i,j corresponds to an edge from i to j. + + The matrix entries are populated using the edge attribute held in + parameter weight. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting SciPy sparse array can be modified as follows: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_array(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal() * 2) + >>> print(A.toarray()) + [[2]] + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> S = nx.to_scipy_sparse_array(G, nodelist=[0, 1, 2]) + >>> print(S.toarray()) + [[0 2 0] + [1 0 0] + [0 0 4]] + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph has no nodes or edges") + + if nodelist is None: + nodelist = list(G) + nlen = len(G) + else: + nlen = len(nodelist) + if nlen == 0: + raise nx.NetworkXError("nodelist has no nodes") + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + if nlen < len(G): + G = G.subgraph(nodelist) + + index = dict(zip(nodelist, range(nlen))) + coefficients = zip( + *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1)) + ) + try: + row, col, data = coefficients + except ValueError: + # there is no edge in the subgraph + row, col, data = [], [], [] + + if G.is_directed(): + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype) + else: + # symmetrize matrix + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = list(nx.selfloop_edges(G, data=weight, default=1)) + if selfloops: + diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops)) + d += diag_data + r += diag_index + c += diag_index + A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err + + +def _csr_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Row** format to + an iterable of weighted edge triples. + + """ + nrows = A.shape[0] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(nrows): + for j in range(indptr[i], indptr[i + 1]): + yield i, int(indices[j]), data[j] + + +def _csc_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Column** format to + an iterable of weighted edge triples. + + """ + ncols = A.shape[1] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(ncols): + for j in range(indptr[i], indptr[i + 1]): + yield int(indices[j]), i, data[j] + + +def _coo_gen_triples(A): + """Converts a SciPy sparse array in **Coordinate** format to an iterable + of weighted edge triples. + + """ + return ((int(i), int(j), d) for i, j, d in zip(A.row, A.col, A.data)) + + +def _dok_gen_triples(A): + """Converts a SciPy sparse array in **Dictionary of Keys** format to an + iterable of weighted edge triples. + + """ + for (r, c), v in A.items(): + yield r, c, v + + +def _generate_weighted_edges(A): + """Returns an iterable over (u, v, w) triples, where u and v are adjacent + vertices and w is the weight of the edge joining u and v. + + `A` is a SciPy sparse array (in any format). + + """ + if A.format == "csr": + return _csr_gen_triples(A) + if A.format == "csc": + return _csc_gen_triples(A) + if A.format == "dok": + return _dok_gen_triples(A) + # If A is in any other format (including COO), convert it to COO format. + return _coo_gen_triples(A.tocoo()) + + +@nx._dispatch(graphs=None) +def from_scipy_sparse_array( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + array. + + Parameters + ---------- + A: scipy.sparse array + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_array(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = _generate_weighted_edges(A) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_weighted_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G + + +@nx._dispatch(edge_attrs="weight") # edge attrs may also be obtained from `dtype` +def to_numpy_array( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data type, optional + A NumPy data type used to initialize the array. If None, then the NumPy + default is used. The dtype can be structured if `weight=None`, in which + case the dtype field names are used to look up edge attributes. The + result is a structured array where each named field in the dtype + corresponds to the adjacency for that edge attribute. See examples for + details. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : callable, optional + An function that determines how weights in multigraphs are handled. + The function should accept a sequence of weights and return a single + value. The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. `weight` must be ``None`` if a structured + dtype is used. + + nonedge : array_like (default = 0.0) + The value used to represent non-edges in the adjacency matrix. + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as ``nan``. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + Raises + ------ + NetworkXError + If `dtype` is a structured dtype and `G` is a multigraph + ValueError + If `dtype` is a structured dtype and `weight` is not `None` + + See Also + -------- + from_numpy_array + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + Entries in the adjacency matrix are given by the `weight` edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) + array([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist` + and their edges are not included in the adjacency matrix. Here is an example: + + >>> G = nx.Graph() + >>> G.add_edge(3, 1) + >>> G.add_edge(2, 0) + >>> G.add_edge(2, 1) + >>> G.add_edge(3, 0) + >>> nx.to_numpy_array(G, nodelist=[1, 2, 3]) + array([[0., 1., 1.], + [1., 0., 0.], + [1., 0., 0.]]) + + This function can also be used to create adjacency matrices for multiple + edge attributes with structured dtypes: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, weight=10) + >>> G.add_edge(1, 2, cost=5) + >>> G.add_edge(2, 3, weight=3, cost=-4.0) + >>> dtype = np.dtype([("weight", int), ("cost", float)]) + >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None) + >>> A["weight"] + array([[ 0, 10, 0, 0], + [10, 0, 1, 0], + [ 0, 1, 0, 3], + [ 0, 0, 3, 0]]) + >>> A["cost"] + array([[ 0., 1., 0., 0.], + [ 1., 0., 5., 0.], + [ 0., 5., 0., -4.], + [ 0., 0., -4., 0.]]) + + As stated above, the argument "nonedge" is useful especially when there are + actually edges with weight 0 in the graph. Setting a nonedge value different than 0, + makes it much clearer to differentiate such 0-weighted edges and actual nonedge values. + + >>> G = nx.Graph() + >>> G.add_edge(3, 1, weight=2) + >>> G.add_edge(2, 0, weight=0) + >>> G.add_edge(2, 1, weight=0) + >>> G.add_edge(3, 0, weight=1) + >>> nx.to_numpy_array(G, nonedge=-1.) + array([[-1., 2., -1., 1.], + [ 2., -1., 0., -1.], + [-1., 0., -1., 0.], + [ 1., -1., 0., -1.]]) + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + nlen = len(nodelist) + + # Input validation + nodeset = set(nodelist) + if nodeset - set(G): + raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") + if len(nodeset) < nlen: + raise nx.NetworkXError("nodelist contains duplicates.") + + A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) + + # Corner cases: empty nodelist or graph without any edges + if nlen == 0 or G.number_of_edges() == 0: + return A + + # If dtype is structured and weight is None, use dtype field names as + # edge attributes + edge_attrs = None # Only single edge attribute by default + if A.dtype.names: + if weight is None: + edge_attrs = dtype.names + else: + raise ValueError( + "Specifying `weight` not supported for structured dtypes\n." + "To create adjacency matrices from structured dtypes, use `weight=None`." + ) + + # Map nodes to row/col in matrix + idx = dict(zip(nodelist, range(nlen))) + if len(nodelist) < len(G): + G = G.subgraph(nodelist).copy() + + # Collect all edge weights and reduce with `multigraph_weights` + if G.is_multigraph(): + if edge_attrs: + raise nx.NetworkXError( + "Structured arrays are not supported for MultiGraphs" + ) + d = defaultdict(list) + for u, v, wt in G.edges(data=weight, default=1.0): + d[(idx[u], idx[v])].append(wt) + i, j = np.array(list(d.keys())).T # indices + wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights + else: + i, j, wts = [], [], [] + + # Special branch: multi-attr adjacency from structured dtypes + if edge_attrs: + # Extract edges with all data + for u, v, data in G.edges(data=True): + i.append(idx[u]) + j.append(idx[v]) + wts.append(data) + # Map each attribute to the appropriate named field in the + # structured dtype + for attr in edge_attrs: + attr_data = [wt.get(attr, 1.0) for wt in wts] + A[attr][i, j] = attr_data + if not G.is_directed(): + A[attr][j, i] = attr_data + return A + + for u, v, wt in G.edges(data=weight, default=1.0): + i.append(idx[u]) + j.append(idx[v]) + wts.append(wt) + + # Set array values with advanced indexing + A[i, j] = wts + if not G.is_directed(): + A[j, i] = wts + + return A + + +@nx._dispatch(graphs=None) +def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"): + """Returns a graph from a 2D NumPy array. + + The 2D NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : a 2D numpy.ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the array is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the array are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attr : String, optional (default="weight") + The attribute to which the array values are assigned on each edge. If + it is None, edge attributes will not be assigned. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If `edge_attr` is Falsy (False or None), edge attributes will not be + assigned, and the array data will be treated like a binary mask of + edge presence or absence. Otherwise, the attributes will be assigned + as follows: + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + kind_to_python_type = { + "f": float, + "i": int, + "u": int, + "b": bool, + "c": complex, + "S": str, + "U": str, + "V": "void", + } + G = nx.empty_graph(0, create_using) + if A.ndim != 2: + raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}") + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + dt = A.dtype + try: + python_type = kind_to_python_type[dt.kind] + except Exception as err: + raise TypeError(f"Unknown numpy data type: {dt}") from err + + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Get a list of all the entries in the array with nonzero entries. These + # coordinates become edges in the graph. (convert to int from np.int64) + edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero())) + # handle numpy constructed data type + if python_type == "void": + # Sort the fields by their offset, then by dtype, then by name. + fields = sorted( + (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items() + ) + triples = ( + ( + u, + v, + {} + if edge_attr in [False, None] + else { + name: kind_to_python_type[dtype.kind](val) + for (_, dtype, name), val in zip(fields, A[u, v]) + }, + ) + for u, v in edges + ) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + elif python_type is int and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + if edge_attr in [False, None]: + triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges) + else: + triples = chain( + ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges + ) + else: # basic data type + if edge_attr in [False, None]: + triples = ((u, v, {}) for u, v in edges) + else: + triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_edges_from(triples) + return G diff --git a/phivenv/Lib/site-packages/networkx/exception.py b/phivenv/Lib/site-packages/networkx/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..96694cc32dcfbb8307cf99b0fa939e2fa0f5a46d --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/exception.py @@ -0,0 +1,125 @@ +""" +********** +Exceptions +********** + +Base exceptions and errors for NetworkX. +""" + +__all__ = [ + "HasACycle", + "NodeNotFound", + "PowerIterationFailedConvergence", + "ExceededMaxIterations", + "AmbiguousSolution", + "NetworkXAlgorithmError", + "NetworkXException", + "NetworkXError", + "NetworkXNoCycle", + "NetworkXNoPath", + "NetworkXNotImplemented", + "NetworkXPointlessConcept", + "NetworkXUnbounded", + "NetworkXUnfeasible", +] + + +class NetworkXException(Exception): + """Base class for exceptions in NetworkX.""" + + +class NetworkXError(NetworkXException): + """Exception for a serious error in NetworkX""" + + +class NetworkXPointlessConcept(NetworkXException): + """Raised when a null graph is provided as input to an algorithm + that cannot use it. + + The null graph is sometimes considered a pointless concept [1]_, + thus the name of the exception. + + References + ---------- + .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless + Concept?" In Graphs and Combinatorics Conference, George + Washington University. New York: Springer-Verlag, 1973. + + """ + + +class NetworkXAlgorithmError(NetworkXException): + """Exception for unexpected termination of algorithms.""" + + +class NetworkXUnfeasible(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a problem + instance that has no feasible solution.""" + + +class NetworkXNoPath(NetworkXUnfeasible): + """Exception for algorithms that should return a path when running + on graphs where such a path does not exist.""" + + +class NetworkXNoCycle(NetworkXUnfeasible): + """Exception for algorithms that should return a cycle when running + on graphs where such a cycle does not exist.""" + + +class HasACycle(NetworkXException): + """Raised if a graph has a cycle when an algorithm expects that it + will have no cycles. + + """ + + +class NetworkXUnbounded(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a maximization + or a minimization problem instance that is unbounded.""" + + +class NetworkXNotImplemented(NetworkXException): + """Exception raised by algorithms not implemented for a type of graph.""" + + +class NodeNotFound(NetworkXException): + """Exception raised if requested node is not present in the graph""" + + +class AmbiguousSolution(NetworkXException): + """Raised if more than one valid solution exists for an intermediary step + of an algorithm. + + In the face of ambiguity, refuse the temptation to guess. + This may occur, for example, when trying to determine the + bipartite node sets in a disconnected bipartite graph when + computing bipartite matchings. + + """ + + +class ExceededMaxIterations(NetworkXException): + """Raised if a loop iterates too many times without breaking. + + This may occur, for example, in an algorithm that computes + progressively better approximations to a value but exceeds an + iteration bound specified by the user. + + """ + + +class PowerIterationFailedConvergence(ExceededMaxIterations): + """Raised when the power iteration method fails to converge within a + specified iteration limit. + + `num_iterations` is the number of iterations that have been + completed when this exception was raised. + + """ + + def __init__(self, num_iterations, *args, **kw): + msg = f"power iteration failed to converge within {num_iterations} iterations" + exception_message = msg + superinit = super().__init__ + superinit(self, exception_message, *args, **kw) diff --git a/phivenv/Lib/site-packages/networkx/lazy_imports.py b/phivenv/Lib/site-packages/networkx/lazy_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf7576ac036bbabfe0b50a9fedc91c14682bc4e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/lazy_imports.py @@ -0,0 +1,190 @@ +import importlib +import importlib.util +import inspect +import os +import sys +import types + +__all__ = ["attach", "_lazy_import"] + + +def attach(module_name, submodules=None, submod_attrs=None): + """Attach lazily loaded submodules, and functions or other attributes. + + Typically, modules import submodules and attributes as follows:: + + import mysubmodule + import anothersubmodule + + from .foo import someattr + + The idea of this function is to replace the `__init__.py` + module's `__getattr__`, `__dir__`, and `__all__` attributes such that + all imports work exactly the way they normally would, except that the + actual import is delayed until the resulting module object is first used. + + The typical way to call this function, replacing the above imports, is:: + + __getattr__, __lazy_dir__, __all__ = lazy.attach( + __name__, + ['mysubmodule', 'anothersubmodule'], + {'foo': 'someattr'} + ) + + This functionality requires Python 3.7 or higher. + + Parameters + ---------- + module_name : str + Typically use __name__. + submodules : set + List of submodules to lazily import. + submod_attrs : dict + Dictionary of submodule -> list of attributes / functions. + These attributes are imported as they are used. + + Returns + ------- + __getattr__, __dir__, __all__ + + """ + if submod_attrs is None: + submod_attrs = {} + + if submodules is None: + submodules = set() + else: + submodules = set(submodules) + + attr_to_modules = { + attr: mod for mod, attrs in submod_attrs.items() for attr in attrs + } + + __all__ = list(submodules | attr_to_modules.keys()) + + def __getattr__(name): + if name in submodules: + return importlib.import_module(f"{module_name}.{name}") + elif name in attr_to_modules: + submod = importlib.import_module(f"{module_name}.{attr_to_modules[name]}") + return getattr(submod, name) + else: + raise AttributeError(f"No {module_name} attribute {name}") + + def __dir__(): + return __all__ + + if os.environ.get("EAGER_IMPORT", ""): + for attr in set(attr_to_modules.keys()) | submodules: + __getattr__(attr) + + return __getattr__, __dir__, list(__all__) + + +class DelayedImportErrorModule(types.ModuleType): + def __init__(self, frame_data, *args, **kwargs): + self.__frame_data = frame_data + super().__init__(*args, **kwargs) + + def __getattr__(self, x): + if x in ("__class__", "__file__", "__frame_data"): + super().__getattr__(x) + else: + fd = self.__frame_data + raise ModuleNotFoundError( + f"No module named '{fd['spec']}'\n\n" + "This error is lazily reported, having originally occurred in\n" + f' File {fd["filename"]}, line {fd["lineno"]}, in {fd["function"]}\n\n' + f'----> {"".join(fd["code_context"] or "").strip()}' + ) + + +def _lazy_import(fullname): + """Return a lazily imported proxy for a module or library. + + Warning + ------- + Importing using this function can currently cause trouble + when the user tries to import from a subpackage of a module before + the package is fully imported. In particular, this idiom may not work: + + np = lazy_import("numpy") + from numpy.lib import recfunctions + + This is due to a difference in the way Python's LazyLoader handles + subpackage imports compared to the normal import process. Hopefully + we will get Python's LazyLoader to fix this, or find a workaround. + In the meantime, this is a potential problem. + + The workaround is to import numpy before importing from the subpackage. + + Notes + ----- + We often see the following pattern:: + + def myfunc(): + import scipy as sp + sp.argmin(...) + .... + + This is to prevent a library, in this case `scipy`, from being + imported at function definition time, since that can be slow. + + This function provides a proxy module that, upon access, imports + the actual module. So the idiom equivalent to the above example is:: + + sp = lazy.load("scipy") + + def myfunc(): + sp.argmin(...) + .... + + The initial import time is fast because the actual import is delayed + until the first attribute is requested. The overall import time may + decrease as well for users that don't make use of large portions + of the library. + + Parameters + ---------- + fullname : str + The full name of the package or subpackage to import. For example:: + + sp = lazy.load('scipy') # import scipy as sp + spla = lazy.load('scipy.linalg') # import scipy.linalg as spla + + Returns + ------- + pm : importlib.util._LazyModule + Proxy module. Can be used like any regularly imported module. + Actual loading of the module occurs upon first attribute request. + + """ + try: + return sys.modules[fullname] + except: + pass + + # Not previously loaded -- look it up + spec = importlib.util.find_spec(fullname) + + if spec is None: + try: + parent = inspect.stack()[1] + frame_data = { + "spec": fullname, + "filename": parent.filename, + "lineno": parent.lineno, + "function": parent.function, + "code_context": parent.code_context, + } + return DelayedImportErrorModule(frame_data, "DelayedImportErrorModule") + finally: + del parent + + module = importlib.util.module_from_spec(spec) + sys.modules[fullname] = module + + loader = importlib.util.LazyLoader(spec.loader) + loader.exec_module(module) + + return module diff --git a/phivenv/Lib/site-packages/networkx/relabel.py b/phivenv/Lib/site-packages/networkx/relabel.py new file mode 100644 index 0000000000000000000000000000000000000000..67b738f508dea93c0a252e0d1a355e2dee8397f3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/relabel.py @@ -0,0 +1,285 @@ +import networkx as nx + +__all__ = ["convert_node_labels_to_integers", "relabel_nodes"] + + +@nx._dispatch(preserve_all_attrs=True) +def relabel_nodes(G, mapping, copy=True): + """Relabel the nodes of the graph G according to a given mapping. + + The original node ordering may not be preserved if `copy` is `False` and the + mapping includes overlap between old and new labels. + + Parameters + ---------- + G : graph + A NetworkX graph + + mapping : dictionary + A dictionary with the old labels as keys and new labels as values. + A partial mapping is allowed. Mapping 2 nodes to a single node is allowed. + Any non-node keys in the mapping are ignored. + + copy : bool (optional, default=True) + If True return a copy, or if False relabel the nodes in place. + + Examples + -------- + To create a new graph with nodes relabeled according to a given + dictionary: + + >>> G = nx.path_graph(3) + >>> sorted(G) + [0, 1, 2] + >>> mapping = {0: "a", 1: "b", 2: "c"} + >>> H = nx.relabel_nodes(G, mapping) + >>> sorted(H) + ['a', 'b', 'c'] + + Nodes can be relabeled with any hashable object, including numbers + and strings: + + >>> import string + >>> G = nx.path_graph(26) # nodes are integers 0 through 25 + >>> sorted(G)[:3] + [0, 1, 2] + >>> mapping = dict(zip(G, string.ascii_lowercase)) + >>> G = nx.relabel_nodes(G, mapping) # nodes are characters a through z + >>> sorted(G)[:3] + ['a', 'b', 'c'] + >>> mapping = dict(zip(G, range(1, 27))) + >>> G = nx.relabel_nodes(G, mapping) # nodes are integers 1 through 26 + >>> sorted(G)[:3] + [1, 2, 3] + + To perform a partial in-place relabeling, provide a dictionary + mapping only a subset of the nodes, and set the `copy` keyword + argument to False: + + >>> G = nx.path_graph(3) # nodes 0-1-2 + >>> mapping = {0: "a", 1: "b"} # 0->'a' and 1->'b' + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> sorted(G, key=str) + [2, 'a', 'b'] + + A mapping can also be given as a function: + + >>> G = nx.path_graph(3) + >>> H = nx.relabel_nodes(G, lambda x: x ** 2) + >>> list(H) + [0, 1, 4] + + In a multigraph, relabeling two or more nodes to the same new node + will retain all edges, but may change the edge keys in the process: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, value="a") # returns the key for this edge + 0 + >>> G.add_edge(0, 2, value="b") + 0 + >>> G.add_edge(0, 3, value="c") + 0 + >>> mapping = {1: 4, 2: 4, 3: 4} + >>> H = nx.relabel_nodes(G, mapping, copy=True) + >>> print(H[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + This works for in-place relabeling too: + + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> print(G[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + Notes + ----- + Only the nodes specified in the mapping will be relabeled. + Any non-node keys in the mapping are ignored. + + The keyword setting copy=False modifies the graph in place. + Relabel_nodes avoids naming collisions by building a + directed graph from ``mapping`` which specifies the order of + relabelings. Naming collisions, such as a->b, b->c, are ordered + such that "b" gets renamed to "c" before "a" gets renamed "b". + In cases of circular mappings (e.g. a->b, b->a), modifying the + graph is not possible in-place and an exception is raised. + In that case, use copy=True. + + If a relabel operation on a multigraph would cause two or more + edges to have the same source, target and key, the second edge must + be assigned a new key to retain all edges. The new key is set + to the lowest non-negative integer not already used as a key + for edges between these two nodes. Note that this means non-numeric + keys may be replaced by numeric keys. + + See Also + -------- + convert_node_labels_to_integers + """ + # you can pass any callable e.g. f(old_label) -> new_label or + # e.g. str(old_label) -> new_label, but we'll just make a dictionary here regardless + m = {n: mapping(n) for n in G} if callable(mapping) else mapping + + if copy: + return _relabel_copy(G, m) + else: + return _relabel_inplace(G, m) + + +def _relabel_inplace(G, mapping): + if len(mapping.keys() & mapping.values()) > 0: + # labels sets overlap + # can we topological sort and still do the relabeling? + D = nx.DiGraph(list(mapping.items())) + D.remove_edges_from(nx.selfloop_edges(D)) + try: + nodes = reversed(list(nx.topological_sort(D))) + except nx.NetworkXUnfeasible as err: + raise nx.NetworkXUnfeasible( + "The node label sets are overlapping and no ordering can " + "resolve the mapping. Use copy=True." + ) from err + else: + # non-overlapping label sets, sort them in the order of G nodes + nodes = [n for n in G if n in mapping] + + multigraph = G.is_multigraph() + directed = G.is_directed() + + for old in nodes: + # Test that old is in both mapping and G, otherwise ignore. + try: + new = mapping[old] + G.add_node(new, **G.nodes[old]) + except KeyError: + continue + if new == old: + continue + if multigraph: + new_edges = [ + (new, new if old == target else target, key, data) + for (_, target, key, data) in G.edges(old, data=True, keys=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, key, data) + for (source, _, key, data) in G.in_edges(old, data=True, keys=True) + ] + # Ensure new edges won't overwrite existing ones + seen = set() + for i, (source, target, key, data) in enumerate(new_edges): + if target in G[source] and key in G[source][target]: + new_key = 0 if not isinstance(key, (int, float)) else key + while new_key in G[source][target] or (target, new_key) in seen: + new_key += 1 + new_edges[i] = (source, target, new_key, data) + seen.add((target, new_key)) + else: + new_edges = [ + (new, new if old == target else target, data) + for (_, target, data) in G.edges(old, data=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, data) + for (source, _, data) in G.in_edges(old, data=True) + ] + G.remove_node(old) + G.add_edges_from(new_edges) + return G + + +def _relabel_copy(G, mapping): + H = G.__class__() + H.add_nodes_from(mapping.get(n, n) for n in G) + H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items()) + if G.is_multigraph(): + new_edges = [ + (mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy()) + for (n1, n2, k, d) in G.edges(keys=True, data=True) + ] + + # check for conflicting edge-keys + undirected = not G.is_directed() + seen_edges = set() + for i, (source, target, key, data) in enumerate(new_edges): + while (source, target, key) in seen_edges: + if not isinstance(key, (int, float)): + key = 0 + key += 1 + seen_edges.add((source, target, key)) + if undirected: + seen_edges.add((target, source, key)) + new_edges[i] = (source, target, key, data) + + H.add_edges_from(new_edges) + else: + H.add_edges_from( + (mapping.get(n1, n1), mapping.get(n2, n2), d.copy()) + for (n1, n2, d) in G.edges(data=True) + ) + H.graph.update(G.graph) + return H + + +@nx._dispatch( + preserve_edge_attrs=True, preserve_node_attrs=True, preserve_graph_attrs=True +) +def convert_node_labels_to_integers( + G, first_label=0, ordering="default", label_attribute=None +): + """Returns a copy of the graph G with the nodes relabeled using + consecutive integers. + + Parameters + ---------- + G : graph + A NetworkX graph + + first_label : int, optional (default=0) + An integer specifying the starting offset in numbering nodes. + The new integer labels are numbered first_label, ..., n-1+first_label. + + ordering : string + "default" : inherit node ordering from G.nodes() + "sorted" : inherit node ordering from sorted(G.nodes()) + "increasing degree" : nodes are sorted by increasing degree + "decreasing degree" : nodes are sorted by decreasing degree + + label_attribute : string, optional (default=None) + Name of node attribute to store old label. If None no attribute + is created. + + Notes + ----- + Node and edge attribute data are copied to the new (relabeled) graph. + + There is no guarantee that the relabeling of nodes to integers will + give the same two integers for two (even identical graphs). + Use the `ordering` argument to try to preserve the order. + + See Also + -------- + relabel_nodes + """ + N = G.number_of_nodes() + first_label + if ordering == "default": + mapping = dict(zip(G.nodes(), range(first_label, N))) + elif ordering == "sorted": + nlist = sorted(G.nodes()) + mapping = dict(zip(nlist, range(first_label, N))) + elif ordering == "increasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + elif ordering == "decreasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + dv_pairs.reverse() + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + else: + raise nx.NetworkXError(f"Unknown node ordering: {ordering}") + H = relabel_nodes(G, mapping) + # create node attribute with the old label + if label_attribute is not None: + nx.set_node_attributes(H, {v: k for k, v in mapping.items()}, label_attribute) + return H