[+] Initial commit

This commit is contained in:
Hykilpikonna
2021-12-22 03:30:29 -05:00
commit ee16ca411e
258 changed files with 177309 additions and 0 deletions
+2
View File
@@ -0,0 +1,2 @@
# Auto detect text files and perform LF normalization
* text=auto
+119
View File
@@ -0,0 +1,119 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# Custom
.idea
._*
+11
View File
@@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
+4
View File
@@ -0,0 +1,4 @@
tensorflow==2.2.0
plaidml-keras==0.7.0
inaSpeechSegmenter==0.6.8
+22
View File
@@ -0,0 +1,22 @@
The MIT License
Copyright (c) 2018 Ina (David Doukhan - http://www.ina.fr/)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
+241
View File
@@ -0,0 +1,241 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
+76
View File
@@ -0,0 +1,76 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV="/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8"
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
if [ "x(venv8) " != x ] ; then
PS1="(venv8) ${PS1:-}"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see https://aspen.io/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi
+37
View File
@@ -0,0 +1,37 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV "/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8"
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
if ("venv8" != "") then
set env_name = "venv8"
else
if (`basename "VIRTUAL_ENV"` == "__") then
# special case for Aspen magic directories
# see https://aspen.io/
set env_name = `basename \`dirname "$VIRTUAL_ENV"\``
else
set env_name = `basename "$VIRTUAL_ENV"`
endif
endif
set prompt = "[$env_name] $prompt"
unset env_name
endif
alias pydoc python -m pydoc
rehash
+75
View File
@@ -0,0 +1,75 @@
# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org)
# you cannot run it directly
function deactivate -d "Exit virtualenv and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
functions -e fish_prompt
set -e _OLD_FISH_PROMPT_OVERRIDE
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
set -e VIRTUAL_ENV
if test "$argv[1]" != "nondestructive"
# Self destruct!
functions -e deactivate
end
end
# unset irrelevant variables
deactivate nondestructive
set -gx VIRTUAL_ENV "/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8"
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
# unset PYTHONHOME if set
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# save the current fish_prompt function as the function _old_fish_prompt
functions -c fish_prompt _old_fish_prompt
# with the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command
set -l old_status $status
# Prompt override?
if test -n "(venv8) "
printf "%s%s" "(venv8) " (set_color normal)
else
# ...Otherwise, prepend env
set -l _checkbase (basename "$VIRTUAL_ENV")
if test $_checkbase = "__"
# special case for Aspen magic directories
# see https://aspen.io/
printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal)
else
printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal)
end
end
# Restore the return status of the previous command.
echo "exit $old_status" | .
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
end
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow_estimator.python.estimator.tools.checkpoint_converter import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
Executable
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from fontTools.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from google_auth_oauthlib.tool.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from imageio.__main__ import download_bin_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(download_bin_main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from imageio.__main__ import remove_bin_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(remove_bin_main())
+79
View File
@@ -0,0 +1,79 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# encoding: utf-8
# The MIT License
# Copyright (c) 2018 Ina (David Doukhan, Eliott Lechapt - http://www.ina.fr/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import os
import distutils.util
import warnings
# TODO
# * allow to use external activity or speech music segmentations
# * describe URL management in help and interference with glob
description = """Do Speech/Music(/Noise) and Male/Female segmentation and store segmentations into CSV files. Segments labelled 'noEnergy' are discarded from music, noise, speech and gender analysis. 'speech', 'male' and 'female' labels include speech over music and speech over noise. 'music' and 'noise' labels are pure segments that are not supposed to contain speech.
"""
epilog="""
Detailled description of this framework is presented in the following study:
Doukhan, D., Carrive, J., Vallet, F., Larcher, A., & Meignier, S. (2018, April). An open-source speaker gender detection framework for monitoring gender equality. In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (pp. 5214-5218). IEEE.
"""
# Configure command line parsing
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('-i', '--input', nargs='+', help='Input media to analyse. May be a full path to a media (/home/david/test.mp3), a list of full paths (/home/david/test.mp3 /tmp/mymedia.avi), a regex input pattern ("/home/david/myaudiobooks/*.mp3"), an url with http protocol (http://url_of_the_file)', required=True)
parser.add_argument('-o', '--output_directory', help='Directory used to store segmentations. Resulting segmentations have same base name as the corresponding input media, with csv extension. Ex: mymedia.MPG will result in mymedia.csv', required=True)
parser.add_argument('-d', '--vad_engine', choices=['sm', 'smn'], default='smn', help="Voice activity detection (VAD) engine to be used (default: 'smn'). 'smn' split signal into 'speech', 'music' and 'noise' (better). 'sm' split signal into 'speech' and 'music' and do not take noise into account, which is either classified as music or speech. Results presented in ICASSP were obtained using 'sm' option")
parser.add_argument('-g', '--detect_gender', choices = ['true', 'false'], default='True', help="(default: 'true'). If set to 'true', segments detected as speech will be splitted into 'male' and 'female' segments. If set to 'false', segments corresponding to speech will be labelled as 'speech' (faster)")
parser.add_argument('-b', '--ffmpeg_binary', default='ffmpeg', help='Your custom binary of ffmpeg', required=False)
parser.add_argument('-e', '--export_format', choices = ['csv', 'textgrid'], default='csv', help="(default: 'csv'). If set to 'csv', result will be exported in csv. If set to 'textgrid', results will be exported to praat Textgrid")
args = parser.parse_args()
# Preprocess arguments and check their consistency
input_files = []
for e in args.input:
if e.startswith("http"):
input_files += [e]
else:
input_files += glob.glob(e)
assert len(input_files) > 0, 'No existing media selected for analysis! Bad values provided to -i (%s)' % args.input
odir = args.output_directory.strip(" \t\n\r").rstrip('/')
assert os.access(odir, os.W_OK), 'Directory %s is not writable!' % odir
# Do processings
from inaSpeechSegmenter import Segmenter, seg2csv
# load neural network into memory, may last few seconds
detect_gender = bool(distutils.util.strtobool(args.detect_gender))
seg = Segmenter(vad_engine=args.vad_engine, detect_gender=detect_gender, ffmpeg=args.ffmpeg_binary)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
base = [os.path.splitext(os.path.basename(e))[0] for e in input_files]
output_files = [os.path.join(odir, e + '.csv') for e in base]
seg.batch_process(input_files, output_files, verbose=True, output_format=args.export_format)
+57
View File
@@ -0,0 +1,57 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# encoding: utf-8
# The MIT License
# Copyright (c) 2018 Ina (David Doukhan - http://www.ina.fr/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import Pyro4
import sys
import os
import socket
from inaSpeechSegmenter import Segmenter
if __name__ == '__main__':
dname = os.path.dirname(os.path.realpath(__file__))
hostname = socket.gethostname()
uri = sys.argv[1]
jobserver = Pyro4.Proxy(uri)
ret = -1
outname = 'init'
# batch size set at 1024. Use lower values with small gpus
g = Segmenter(batch_size=1024)
while True:
lsrc, ldst = jobserver.get_njobs('%s %s' % (hostname, ret))
print(lsrc, ldst)
if len(lsrc) == 0:
print('job list finished')
break
ret = g.batch_process(lsrc, ldst, skipifexist=True, nbtry=3)
+37
View File
@@ -0,0 +1,37 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# encoding: utf-8
# The MIT License
# Copyright (c) 2018 Ina (David Doukhan - http://www.ina.fr/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import Pyro4
import sys
if __name__ == '__main__':
uri = sys.argv[1]
csvfname = sys.argv[2]
jobserver = Pyro4.Proxy(uri)
ret = jobserver.set_jobs(csvfname)
print(ret)
+76
View File
@@ -0,0 +1,76 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# encoding: utf-8
# The MIT License
# Copyright (c) 2018 Ina (David Doukhan - http://www.ina.fr/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import Pyro4
import numpy as np
import pandas as pd
@Pyro4.expose
class GenderJobServer(object):
def __init__(self, csvjobs):
self.set_jobs(csvjobs)
def set_jobs(self, csvjobs):
# csv configuration file with 2 columns: source_path, dest_path
df = pd.read_csv(csvjobs)
df.source_path = df.source_path.str.strip()
df.dest_path = df.dest_path.str.strip()
df = df.drop_duplicates().sample(frac=1).reset_index(drop=True)
print('setting jobs')
print('random source & dest path:', df.source_path[0], ' ',df.dest_path[0])
print('number of files to process:', len(df))
self.lsource = list(df.source_path)
self.ldest = list(df.dest_path)
self.i = 0
return '%s jobs have been set' % csvjobs
def get_job(self, msg):
print('job %d: %s' % (self.i, msg))
self.i += 1
return (self.lsource.pop(0), self.ldest.pop(0))
def get_njobs(self, msg, nbjobs=20):
print('jobs %d-%d: %s' % (self.i, self.i + nbjobs, msg))
ret = (self.lsource[:nbjobs], self.ldest[:nbjobs])
if len(ret[0]) == 0:
print('All jobs dispatched')
self.lsource = self.lsource[nbjobs:]
self.ldest = self.ldest[nbjobs:]
self.i += nbjobs
return ret
if __name__ == '__main__':
# full name of the host to be used by remote clients
Pyro4.config.HOST = sys.argv[1]
daemon = Pyro4.Daemon() # make a Pyro daemon\n",
uri = daemon.register(GenderJobServer(sys.argv[2])) # register the greeting maker as a Pyro object\n",
print("Ready. Object uri =", uri)
daemon.requestLoop()
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tifffile.lsm2bin import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli.normalizer import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())
Executable
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
Executable
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from plaidml.plaidml_setup import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+126
View File
@@ -0,0 +1,126 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
"""
Hidden Markov Model with (constrained) Viterbi decoding
Usage:
hmm train [-g <gaussian>] [-c <covariance>] [-d <duration>] <uris.lst> <references.mdtm> <features.pkl> <model.pkl>
hmm apply [-d <duration>] [-f <constraint.mdtm>] <model.pkl> <features.pkl> <hypothesis.mdtm>
hmm -h | --help
hmm --version
Options:
-g <gaussian> Number of gaussian components [default: 16].
-c <covariance> Covariance type (diag or full) [default: diag].
-d <duration> Minimum duration in seconds [default: 0.250].
-f <constraint.mdtm> Constrain Viterbi decoding to follow this path.
-h --help Show this screen.
--version Show version.
"""
from pyannote.algorithms.segmentation.hmm import ViterbiHMM
from pyannote.parser.util import CoParser
from pyannote.parser import MDTMParser
from docopt import docopt
import pickle
def do_train(
uris_lst, references_mdtm, features_pkl, model_pkl,
n_components=16, covariance_type='diag', min_duration=0.250,
):
hmm = ViterbiHMM(
n_components=n_components, covariance_type=covariance_type,
random_state=None, thresh=1e-2, min_covar=1e-3, n_iter=10,
disturb=0.05, sampling=1000, min_duration=min_duration)
# iterate over all uris in a synchronous manner
coParser = CoParser(uris=uris_lst,
reference=references_mdtm,
features=features_pkl)
references, features = coParser.generators('reference', 'features')
hmm.fit(references, features)
with open(model_pkl, 'wb') as f:
pickle.dump(hmm, f)
def do_apply(model_pkl, features_pkl, hypothesis_mdtm,
min_duration=0.250, constraint_mdtm=None):
with open(model_pkl, 'rb') as f:
hmm = pickle.load(f)
hmm.min_duration = min_duration
with open(features_pkl, 'rb') as f:
features = pickle.load(f)
constraint = None
if constraint_mdtm:
constraint = MDTMParser().read(constraint_mdtm)()
hypothesis = hmm.apply(features, constraint=constraint)
with open(hypothesis_mdtm, 'w') as f:
MDTMParser().write(hypothesis, f=f)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Hidden Markov Models 1.0')
if arguments['train']:
uris_lst = arguments['<uris.lst>']
references_mdtm = arguments['<references.mdtm>']
features_pkl = arguments['<features.pkl>']
model_pkl = arguments['<model.pkl>']
n_components = int(arguments['-g'])
covariance_type = arguments['-c']
min_duration = float(arguments['-d'])
do_train(
uris_lst, references_mdtm, features_pkl, model_pkl,
n_components=n_components, covariance_type=covariance_type,
min_duration=min_duration)
elif arguments['apply']:
model_pkl = arguments['<model.pkl>']
features_pkl = arguments['<features.pkl>']
hypothesis_mdtm = arguments['<hypothesis.mdtm>']
min_duration = float(arguments['-d'])
constraint_mdtm = arguments['-f']
do_apply(model_pkl, features_pkl, hypothesis_mdtm,
min_duration=min_duration, constraint_mdtm=constraint_mdtm)
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from fontTools.merge import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from fontTools.subset import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.configuration import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.utils.flameserver import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.utils.httpgateway import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.naming import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.nsc import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from Pyro4.test.echoserver import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import verify
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(verify())
+1
View File
@@ -0,0 +1 @@
python3.8
+1
View File
@@ -0,0 +1 @@
python3.8
+1
View File
@@ -0,0 +1 @@
/usr/local/Cellar/python@3.8/3.8.12_1/bin/python3.8
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.python.tools.saved_model_cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from skimage.scripts.skivi import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorboard.main import run_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.tools.compatibility.tf_upgrade_v2_main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.lite.python.tflite_convert import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tifffile.tiff2fsspec import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tifffile.tiffcomment import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tifffile import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
Executable
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.lite.python.tflite_convert import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.lite.toco.python.toco_from_protos import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
Executable
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from fontTools.ttx import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+8
View File
@@ -0,0 +1,8 @@
#!/Volumes/macWorkspace/CS/SpeechGenderAnalysis/venv8/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
+2909
View File
File diff suppressed because it is too large Load Diff
+251
View File
@@ -0,0 +1,251 @@
// Copyright 2018 Intel Corporation.
//
// This is the PlaidML base library interface, handling functionality common across the Vertex.AI libraries.
#pragma once
#if defined _WIN32 || defined __CYGWIN__
#ifdef VAI_DLL
#define VAI_API __declspec(dllexport)
#else
#define VAI_API __declspec(dllimport)
#endif
#elif __GNUC__ >= 4
#define VAI_API __attribute__((visibility("default")))
#else
#define VAI_API
#endif
#ifdef __cplusplus
#include <cstddef>
#include <memory>
extern "C" {
#else
#include <stdbool.h>
#include <stddef.h>
#endif // __cplusplus
// Error handling.
//
// In the Vertex.AI C APIs, functions that can fail return either a pointer or a
// boolean; success is represented as a non-NULL or true value, and failure is
// represented as a NULL or false value.
//
// When a call fails, the callee is responsible for recording additional
// information in thread-local storage. This information can be retrieved by
// calling vai_last_status(). Otherwise (if a call is successful), no
// guarantees are made; the callee may clobber existing thread-local error
// information, leaving it in an undefined state.
//
// Note that errors may occur asynchronously. Asynchronous errors are reported
// to the various callback functions used to collect the results of
// asynchronous, operations, again via a NULL or false value. In this case,
// the last error can be retrieved from thread-local-storage within the
// callback function.
//
// Failed calls always propagate errors, poisoning the state of any objects
// being updated. Additionally, NULL inputs are valid for all calls, causing
// them to fail (on the assumption that a NULL input indicates an earlier
// out-of-memory condition). Callers may take advantage of this by performing
// a sequence of calls without checking for errors, and then checking the last
// dependent call for errors; this obscures the exact call that produced an
// error, but in most cases the caller is more interested in the fact that the
// overall computation failed, and less interested in exactly which call
// failed.
// These are the various status codes the application may observe.
// Note that additional status codes may be added in subsquent releases.
//
// The set of status codes attempts to provide enough information for software components to determine the appropriate
// recovery action to take. For diagnostics, the human-readable string associated with the status is typically more
// useful.
typedef enum {
// A status representing "No error".
VAI_STATUS_OK = 0,
// Indicates that an asynchronous operations was cancelled.
VAI_STATUS_CANCELLED = 1,
// A generic catch-all error, used when an error condition must be signalled but
// there is no appropriate API-level status code.
VAI_STATUS_UNKNOWN = 2,
// Indicates that at least one invalid argument was passed to a function.
VAI_STATUS_INVALID_ARGUMENT = 3,
// The operation deadline was exceeded.
VAI_STATUS_DEADLINE_EXCEEDED = 4,
// The requested object was not found.
VAI_STATUS_NOT_FOUND = 5,
// The requested object already exists.
VAI_STATUS_ALREADY_EXISTS = 6,
// The caller does not have permission to access a resource required by the operation.
VAI_STATUS_PERMISSION_DENIED = 7,
// A resource required by the operation is exhausted. (For example, this is returned when the implementation is
// unable to allocate sufficient memory.)
VAI_STATUS_RESOURCE_EXHAUSTED = 8,
// A precondition required by the operation is unmet. (For example, this is returned when an object supplied to a
// call is not in the correct state for the call to take place).
VAI_STATUS_FAILED_PRECONDITION = 9,
// A transactional operation was aborted by the system. Generally, this is a transient condition.
VAI_STATUS_ABORTED = 10,
// A call parameter is out of the range accepted by the implementation.
VAI_STATUS_OUT_OF_RANGE = 11,
// The requested functionality is not implemented.
VAI_STATUS_UNIMPLEMENTED = 12,
// An internal error occurred. Typically, this indicates that the implementation has detected that its internal state
// is inconsistent.
VAI_STATUS_INTERNAL = 13,
// A resource required by the operation (such as a hardware device) is unavailable for use.
VAI_STATUS_UNAVAILABLE = 14,
// The system has lost data required by the operation, typically due to hardware failure.
VAI_STATUS_DATA_LOSS = 15,
// The caller is unauthenticated, but authenticated access is required to access some resource.
VAI_STATUS_UNAUTHENTICATED = 16,
} vai_status;
// Returns the last status recorded in the current thread's thread-local storage,
// or VAI_STATUS_OK if no status has been recorded.
VAI_API vai_status vai_last_status();
// Resets the current thread's thread-local status storage to VAI_STATUS_OK.
VAI_API void vai_clear_status();
// Returns a NUL-terminated UTF-8 message describing the status of the call
// errors recorded by the current thread's thread-local storage. If no error has
// been recorded, an empty string will be returned.
//
// The returned string will remain alive until vai_clear_error is called, another Vertex.AI
// call is made, or until the current thread exits.
//
// The error string may be dependent on the locale installed when the error occurred.
VAI_API const char* vai_last_status_str();
// Logger configuration.
typedef enum {
VAI_LOG_SEVERITY_TRACE = 2,
VAI_LOG_SEVERITY_DEBUG = 4,
VAI_LOG_SEVERITY_FATAL = 8,
VAI_LOG_SEVERITY_ERROR = 16,
VAI_LOG_SEVERITY_WARNING = 32,
VAI_LOG_SEVERITY_VERBOSE = 64,
VAI_LOG_SEVERITY_INFO = 128,
} vai_log_severity;
// Sets the process-global logging callback.
VAI_API void vai_set_logger(void (*logger)(void*, vai_log_severity, const char*), void* arg);
// Dynamic feature detection.
//
// Invoking this API with an unknown / unsupported feature ID will return NULL.
// Invoking it with a supported feature ID will return a pointer to a static
// feature-specific value.
typedef enum {
VAI_FEATURE_ID_RESERVED = 0,
} vai_feature_id;
VAI_API void* vai_query_feature(vai_feature_id id);
// A Vertex.AI context provides a scope for Vertex.AI library operations. In
// particular, it provides an asynchronous execution context, allowing callers
// to correctly synchronize with asynchronous callbacks during shutdown.
//
// NULL semantically points to a valid, but cancelled, context.
#ifdef __cplusplus
struct vai_ctx;
#else
typedef struct vai_ctx vai_ctx;
#endif // __cplusplus
// Allocate and returns a context, or returns NULL if the library
// cannot allocate sufficient memory.
VAI_API vai_ctx* vai_alloc_ctx();
// Frees a context. After this call, the context should not be used for
// any subsequent calls. Freeing a NULL context is a no-op.
//
// Freeing a context will block until pending asynchronous operations are complete.
VAI_API void vai_free_ctx(vai_ctx* ctx);
// Cancels outstanding asynchronous operations associated with the context
// (ensuring that callbacks are completed before returning to the caller), and
// causes future callbacks issued using the context to synchronously fail.
//
// Note that there is no call to go from "cancelled" back to "uncancelled".
//
// This does not block waiting for asynchronous operations to complete.
VAI_API void vai_cancel_ctx(vai_ctx* ctx);
// Sets the context to log events according to the specified
// configuration. For instance, to point the context's event log at
// "eventlog.gz", use the JSON string:
//
// "@type": "type.vertex.ai/vertexai.eventing.file.proto.EventLog",
// "filename": "eventlog.gz"
//
// If the context already has an associated eventlog, that eventlog
// will be finalized and closed asynchronously, once all asynchronous
// activity using that eventlog has completed.
//
// A NULL config sets the context to not use the event logging
// subsystem for future calls.
VAI_API bool vai_set_eventlog(vai_ctx* ctx, const char* config);
// Gets the current value of a performance counter based on the name.
// If there is no performance counter with that name, returns -1.
VAI_API int64_t vai_get_perf_counter(const char* name);
// Sets the current value of a performance counter based on the name.
// If there is no performance counter with that name, no action is taken.
VAI_API void vai_set_perf_counter(const char* name, int64_t value);
// A PlaidML datatype indicates the type of data stored within a buffer, as
// observed by a program.
typedef enum {
PLAIDML_DATA_INVALID = 0,
PLAIDML_DATA_BOOLEAN = 0x02,
PLAIDML_DATA_INT8 = 0x10,
PLAIDML_DATA_INT16 = 0x11,
PLAIDML_DATA_INT32 = 0x12,
PLAIDML_DATA_INT64 = 0x13,
PLAIDML_DATA_INT128 = 0x14,
PLAIDML_DATA_UINT8 = 0x20,
PLAIDML_DATA_UINT16 = 0x21,
PLAIDML_DATA_UINT32 = 0x22,
PLAIDML_DATA_UINT64 = 0x23,
PLAIDML_DATA_FLOAT16 = 0x31,
PLAIDML_DATA_FLOAT32 = 0x32,
PLAIDML_DATA_FLOAT64 = 0x33,
PLAIDML_DATA_BFLOAT16 = 0x38,
PLAIDML_DATA_PRNG = 0x40,
} plaidml_datatype;
#ifdef __cplusplus
} // extern "C"
namespace std {
template <>
struct default_delete<::vai_ctx> {
void operator()(::vai_ctx* ctx) const noexcept { ::vai_free_ctx(ctx); }
};
} // namespace std
#endif // __cplusplus
+68
View File
@@ -0,0 +1,68 @@
// Copyright 2018 Intel Corporation.
//
// This is the Vertex.AI common C++ interface, which provides a higher level object
// oriented wrapper on top of the Vertex.AI common C API.
#pragma once
#include <exception>
#include <memory>
#include <string>
#include <utility>
#include "plaidml/base/base.h"
namespace vertexai {
class vai_exception : public std::runtime_error {
public:
vai_exception(vai_status status, const std::string& what) : std::runtime_error(what), status_(status) {}
vai_status status() { return status_; }
template <typename T>
static void check_and_throw(const T& good) {
if (good) {
return;
}
vai_status status = vai_last_status();
std::string err = vai_last_status_str();
vai_clear_status();
throw vai_exception{status, err.c_str()};
}
static std::exception_ptr current() noexcept {
try {
vai_status status = vai_last_status();
std::string err = vai_last_status_str();
vai_clear_status();
return std::make_exception_ptr(vai_exception{status, err.c_str()});
} catch (...) {
return std::current_exception();
}
}
private:
vai_status status_;
};
class ctx final {
public:
ctx() : ctx_{vai_alloc_ctx()} {
if (!ctx_) {
throw std::bad_alloc();
}
}
explicit ctx(std::unique_ptr<vai_ctx> ctx) : ctx_{std::move(ctx)} {
if (!ctx_) {
throw std::bad_alloc();
}
}
vai_ctx* get_ctx() const { return ctx_.get(); }
private:
std::unique_ptr<vai_ctx> ctx_;
};
} // namespace vertexai
+826
View File
@@ -0,0 +1,826 @@
// Copyright 2018 Intel Corporation.
//
// This is the PlaidML C++ interface, which provides a higher level object
// oriented wrapper on top of the PlaidML C API.
#pragma once
#include <exception>
#include <future>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <half.hpp>
#include "plaidml/base/base_cpp.h"
#include "plaidml/plaidml.h"
using half_float::half;
namespace vertexai {
namespace plaidml {
// Import plaidml_datatype into the namespace
typedef plaidml_datatype datatype;
// Make a map for c++ types to PlaidML types
template <typename T>
struct to_plaidml_datatype {};
template <>
struct to_plaidml_datatype<int8_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_INT8;
};
template <>
struct to_plaidml_datatype<int16_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_INT16;
};
template <>
struct to_plaidml_datatype<int32_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_INT32;
};
template <>
struct to_plaidml_datatype<int64_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_INT64;
};
template <>
struct to_plaidml_datatype<uint8_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_UINT8;
};
template <>
struct to_plaidml_datatype<uint16_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_UINT16;
};
template <>
struct to_plaidml_datatype<uint32_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_UINT32;
};
template <>
struct to_plaidml_datatype<uint64_t> {
static constexpr plaidml_datatype value = PLAIDML_DATA_UINT64;
};
template <>
struct to_plaidml_datatype<half> {
static constexpr plaidml_datatype value = PLAIDML_DATA_FLOAT16;
};
template <>
struct to_plaidml_datatype<float> {
static constexpr plaidml_datatype value = PLAIDML_DATA_FLOAT32;
};
template <>
struct to_plaidml_datatype<double> {
static constexpr plaidml_datatype value = PLAIDML_DATA_FLOAT64;
};
// Predeclare classes
class application;
class base_shape;
class base_tensor;
class buffer;
class compose;
class device;
class device_config;
class function;
class gradient;
class invoker;
template <typename T>
class mapping;
class placeholder;
template <typename T>
class shape;
template <typename T>
class tensor;
class variable;
// A dimension containing both size + stride
struct dimension {
uint64_t size;
int64_t stride;
};
inline std::vector<device_config> _enumerate_devices(const std::shared_ptr<ctx>& ctx,
std::shared_ptr<plaidml_device_enumerator> dev_enum);
inline std::vector<device_config> enumerate_devices(const std::shared_ptr<ctx>& ctx);
inline std::vector<device_config> enumerate_devices(const std::shared_ptr<ctx>& ctx, const std::string& config);
class base_shape {
friend class base_tensor;
friend class invoker;
public:
// Construct an empty shape with a specific data type
explicit base_shape(const std::shared_ptr<ctx>& ctx, datatype dtype = PLAIDML_DATA_FLOAT32)
: ctx_{ctx}, ptr_(plaidml_alloc_shape(ctx->get_ctx(), dtype), plaidml_free_shape) {
vai_exception::check_and_throw(ptr_);
}
// Simple passthrough of C api for setup
void add_dimension(size_t size, ptrdiff_t stride) {
bool r = plaidml_add_dimension(ctx_->get_ctx(), ptr_.get(), size, stride);
vai_exception::check_and_throw(r);
}
void set_offset(size_t offset) {
bool r = plaidml_set_shape_offset(ctx_->get_ctx(), ptr_.get(), offset);
vai_exception::check_and_throw(r);
}
// Add a dimension
void push_back(const dimension& d) { add_dimension(d.size, d.stride); }
// Add multiple dimensions
template <typename L>
void add_dimensions(const L& dims, ptrdiff_t initial_stride = 1) {
ptrdiff_t stride = initial_stride;
for (const auto& sz : dims) {
stride *= sz;
}
for (const auto& sz : dims) {
stride /= sz;
add_dimension(sz, stride);
}
}
// Make a simple shape with packed strides, last dimension lowest stride
base_shape(const std::shared_ptr<ctx>& ctx, datatype dtype, const std::initializer_list<size_t>& il,
uint64_t offset = 0)
: base_shape(ctx, dtype) {
add_dimensions(il);
}
// Get information about a shape
datatype type() const { return plaidml_get_shape_type(ptr_.get()); }
size_t dimensions() const { return plaidml_get_shape_dimension_count(ptr_.get()); }
dimension operator[](size_t i) const { return dimension{size(i), stride(i)}; }
uint64_t size(size_t i) const { return plaidml_get_shape_dimension_size(ptr_.get(), i); }
int64_t stride(size_t i) const { return plaidml_get_shape_dimension_stride(ptr_.get(), i); }
uint64_t buffer_size() const { return plaidml_get_shape_buffer_size(ptr_.get()); }
const std::shared_ptr<ctx>& get_context() const { return ctx_; }
protected:
explicit base_shape(const std::shared_ptr<ctx>& ctx, const std::shared_ptr<plaidml_shape>& ptr)
: ctx_{ctx}, ptr_{ptr} {}
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_shape> ptr_;
};
template <typename T>
class shape : public base_shape {
public:
explicit shape(const std::shared_ptr<ctx>& ctx, datatype dt = to_plaidml_datatype<T>::value) : base_shape(ctx, dt) {}
explicit shape(const base_shape& base, datatype dt = to_plaidml_datatype<T>::value) : base_shape(base) {
if (dt != base.type()) {
throw vai_exception(VAI_STATUS_INVALID_ARGUMENT, "Mismatched shape");
}
}
shape(const std::shared_ptr<ctx>& ctx, const std::initializer_list<size_t>& il, uint64_t offset = 0)
: base_shape(ctx, to_plaidml_datatype<T>::value, il, offset) {}
};
class buffer {
friend class device;
friend class base_tensor;
public:
buffer() {}
void copy_into(const std::shared_ptr<ctx>& ctx, void* dst) {
plaidml_mapping* mapping = plaidml_map_buffer_discard(ctx->get_ctx(), ptr_.get());
char* src = plaidml_get_mapping_base(ctx->get_ctx(), mapping);
size_t size = plaidml_get_mapping_size(ctx->get_ctx(), mapping);
memcpy(dst, src, size);
plaidml_free_mapping(mapping);
}
void copy_from(const std::shared_ptr<ctx>& ctx, const void* src) {
plaidml_mapping* mapping = plaidml_map_buffer_current(ptr_.get(), nullptr, nullptr);
char* dst = plaidml_get_mapping_base(ctx->get_ctx(), mapping);
size_t size = plaidml_get_mapping_size(ctx->get_ctx(), mapping);
memcpy(dst, src, size);
plaidml_writeback_mapping(ctx->get_ctx(), mapping);
plaidml_free_mapping(mapping);
}
private:
std::shared_ptr<plaidml_buffer> ptr_;
explicit buffer(const std::shared_ptr<plaidml_buffer>& ptr) : ptr_(ptr) {}
};
class base_tensor {
friend class variable;
public:
base_tensor() {}
base_tensor(const std::shared_ptr<ctx>& ctx, const buffer& buf, const base_shape& shape)
: ctx_(ctx), buf_(buf.ptr_), shape_(shape.ptr_) {}
base_shape get_shape() { return base_shape(ctx_, shape_); }
buffer get_buffer() { return buffer(buf_); }
std::shared_ptr<ctx> get_context() { return ctx_; }
protected:
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_buffer> buf_;
std::shared_ptr<plaidml_shape> shape_;
};
// Indicates that the mapping will be used for reading the buffer. The mapping
// will reflect the buffer's current contents. By default, the implementation
// is free to either discard the mapping's contents or to write them back to
// the underlying buffer.
struct map_for_read_t {};
static constexpr map_for_read_t map_for_read = {};
// Indicates that the mapping will be used for writing to the buffer. The
// mapping may not reflect the buffer's current contents; the implementation is
// free to construct the mapping with garbage data. By default, the mapping's
// contents will be written back to the buffer when the mapping is deleted
// unless the deletion is due to an exceptional condition.
struct map_for_write_t {};
static constexpr map_for_write_t map_for_write = {};
// Indicates that the mapping will be used for read-write access to the buffer.
// The mapping will reflect the buffer's current contents. By default, the
// mapping's contents will be written back to the buffer when the mapping is
// deleted unless the deletion is due to an exceptional condition.
struct map_for_update_t {};
static constexpr map_for_update_t map_for_update = {};
enum class mapping_destructor_behavior {
writeback_if_normal, // Write the contents on normal exits
writeback_always, // Always write the contents (including on exceptions)
discard // The implementation may discard the contents
};
template <typename T>
class mapping {
public:
// Construct an uninitialized mapping.
mapping() {}
~mapping() { release(); }
// Disallow copy + default construction
mapping(const mapping& rhs) = delete;
mapping& operator=(const mapping& rhs) = delete;
// Allow moves
mapping(mapping&& rhs)
: ctx_{std::move(rhs.ctx_)},
buf_{std::move(rhs.buf_)},
sizes_{std::move(rhs.sizes_)},
strides_{std::move(rhs.strides_)},
map_{std::move(rhs.map_)},
behavior_{std::move(rhs.behavior_)},
mapped_{rhs.mapped_} {
rhs.mapped_ = nullptr;
}
mapping& operator=(mapping&& rhs) {
release();
ctx_ = std::move(rhs.ctx_);
buf_ = std::move(rhs.buf_);
sizes_ = std::move(rhs.sizes_);
strides_ = std::move(rhs.strides_);
map_ = std::move(rhs.map_);
behavior_ = std::move(rhs.behavior_);
mapped_ = rhs.mapped_;
rhs.mapped_ = nullptr;
return *this;
}
// Provide access to raw buffer
T* raw() { return mapped_; }
// Explicitly set the destruction behavior.
void set_destructor_behavior(mapping_destructor_behavior behavior) { behavior_ = behavior; }
// Compute location of index, also do bounds check. Note: this is a convience function;
// it is not designed to be performant.
T& at(const std::initializer_list<size_t>& idx) {
if (idx.size() != sizes_.size()) {
throw vai_exception(VAI_STATUS_OUT_OF_RANGE, "Invalid number of indexes in mapping access");
}
ptrdiff_t off = 0;
for (size_t i = 0; i < sizes_.size(); i++) {
if (*(idx.begin() + i) >= sizes_[i]) {
throw vai_exception(VAI_STATUS_OUT_OF_RANGE, "Index out of bound on mapping access");
}
off += strides_[i] * *(idx.begin() + i);
}
return mapped_[off];
}
// Syntactic sugar
template <typename... Args>
T& operator()(Args... args) {
return at({args...});
}
private:
friend class tensor<T>;
mapping(std::shared_ptr<ctx> ctx, std::shared_ptr<plaidml_buffer> buf, const std::shared_ptr<plaidml_shape>& shape,
std::unique_ptr<plaidml_mapping> map, mapping_destructor_behavior behavior)
: ctx_{std::move(ctx)}, buf_{std::move(buf)}, map_{std::move(map)}, behavior_{behavior} {
sizes_.resize(plaidml_get_shape_dimension_count(shape.get()));
strides_.resize(plaidml_get_shape_dimension_count(shape.get()));
for (size_t i = 0; i < strides_.size(); i++) {
sizes_[i] = plaidml_get_shape_dimension_size(shape.get(), i);
strides_[i] = plaidml_get_shape_dimension_stride(shape.get(), i);
}
mapped_ = reinterpret_cast<T*>(plaidml_get_mapping_base(ctx_->get_ctx(), map_.get()));
vai_exception::check_and_throw(mapped_);
}
void release() {
if (!mapped_) {
return;
}
switch (behavior_) {
case mapping_destructor_behavior::writeback_if_normal:
#ifdef __cpp_lib_uncaught_exceptions
if (std::uncaught_exceptions()) {
break;
}
#else
if (std::uncaught_exception()) {
break;
}
#endif
// fallthrough
case mapping_destructor_behavior::writeback_always:
plaidml_writeback_mapping(ctx_->get_ctx(), map_.get());
break;
case mapping_destructor_behavior::discard:
break;
}
mapped_ = nullptr;
}
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_buffer> buf_;
std::vector<size_t> sizes_;
std::vector<ptrdiff_t> strides_;
std::unique_ptr<plaidml_mapping> map_;
mapping_destructor_behavior behavior_;
T* mapped_ = nullptr;
};
template <typename T>
class tensor : public base_tensor {
friend class mapping<T>;
public:
tensor() {}
tensor(const std::shared_ptr<ctx>& ctx, const buffer& buf, const shape<T>& shape) : base_tensor(ctx, buf, shape) {}
mapping<T> map(map_for_read_t) const {
std::unique_ptr<plaidml_mapping> m{plaidml_map_buffer_current(buf_.get(), NULL, NULL)};
return mapping<T>{ctx_, buf_, shape_, std::move(m), mapping_destructor_behavior::discard};
}
// Asynchronously creates a readable mapping. The completion function should take a std::future<mapping<T>>,
// which will be a ready future for the result of the mapping call.
template <typename C>
void map(map_for_read_t, vai_ctx* ctx, C&& on_complete) const {
std::unique_ptr<completion> comp{
static_cast<completion*>(new typed_completion<C>(ctx_, buf_, shape_, std::forward<C>(on_complete)))};
plaidml_map_buffer_current(buf_.get(), &OnMapped, comp.release());
}
mapping<T> map(map_for_write_t) {
std::unique_ptr<plaidml_mapping> m{plaidml_map_buffer_discard(ctx_->get_ctx(), buf_.get())};
return mapping<T>{ctx_, buf_, shape_, std::move(m), mapping_destructor_behavior::writeback_if_normal};
}
mapping<T> map(map_for_update_t) {
std::unique_ptr<plaidml_mapping> m{plaidml_map_buffer_current(buf_.get(), NULL, NULL)};
return mapping<T>{ctx_, buf_, shape_, std::move(m), mapping_destructor_behavior::writeback_if_normal};
}
private:
class completion {
public:
virtual ~completion() {}
virtual void complete(plaidml_mapping* result) = 0;
};
template <typename C>
class typed_completion final : public completion {
public:
typed_completion(std::shared_ptr<ctx> ctx, std::shared_ptr<plaidml_buffer> buf,
std::shared_ptr<plaidml_shape> shape, C&& on_complete)
: ctx_{std::move(ctx)},
buf_{std::move(buf)},
shape_{std::move(shape)},
on_complete_{std::forward<C>(on_complete)} {}
void complete(plaidml_mapping* result) final {
if (!result) {
prom_.set_exception(vai_exception::current());
} else {
std::unique_ptr<plaidml_mapping> mp{result};
prom_.set_value(mapping<T>{ctx_, std::move(buf_), shape_, std::move(mp), mapping_destructor_behavior::discard});
}
on_complete_(prom_.get_future());
}
private:
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_buffer> buf_;
std::shared_ptr<plaidml_shape> shape_;
std::promise<mapping<T>> prom_;
C on_complete_;
};
static void OnMapped(void* arg, plaidml_mapping* result) noexcept {
std::unique_ptr<completion> comp{static_cast<completion*>(arg)};
comp->complete(result);
}
};
class placeholder {
friend class variable;
friend class compose;
public:
placeholder() {}
explicit placeholder(size_t ndims) : ptr_(plaidml_alloc_placeholder(ndims), plaidml_free_var) {
vai_exception::check_and_throw(ptr_);
}
private:
std::shared_ptr<plaidml_var> ptr_;
};
class variable {
friend class application;
friend class compose;
friend class function;
friend class gradient;
friend class invoker;
public:
variable() {}
variable(const int64_t& val) : ptr_(plaidml_alloc_int64(val), plaidml_free_var) { // NOLINT(runtime/explicit)
vai_exception::check_and_throw(ptr_);
}
variable(const double& val) : ptr_(plaidml_alloc_real(val), plaidml_free_var) { // NOLINT(runtime/explicit)
vai_exception::check_and_throw(ptr_);
}
variable(const placeholder& val) : ptr_(val.ptr_) {} // NOLINT(runtime/explicit)
variable(const base_tensor& val) // NOLINT(runtime/explicit)
: ptr_(plaidml_alloc_tensor(val.ctx_->get_ctx(), val.buf_.get(), val.shape_.get()), plaidml_free_var) {
vai_exception::check_and_throw(ptr_);
}
private:
std::shared_ptr<plaidml_var> ptr_;
};
class application {
friend class function;
friend class compose;
public:
application() {}
operator variable() {
if (plaidml_get_function_output_count(func_.get()) != 1) {
throw std::runtime_error("Function application with non-unique return used in variable context");
}
return get_output(0);
}
variable get_output(size_t i) {
if (i >= plaidml_get_function_output_count(func_.get())) {
throw std::runtime_error("Attempting to get an invalid output index");
}
std::string oname = plaidml_get_function_output(func_.get(), i);
return get_output(oname);
}
variable get_output(const std::string& name) {
variable r;
std::shared_ptr<plaidml_var> out(plaidml_apply_alloc_output(ptr_.get(), name.c_str()), plaidml_free_var);
vai_exception::check_and_throw(out);
r.ptr_ = out;
return r;
}
private:
std::shared_ptr<plaidml_function> func_;
std::shared_ptr<plaidml_applier> ptr_;
application(const std::shared_ptr<plaidml_function> func, const std::shared_ptr<plaidml_applier>& ptr)
: func_(func), ptr_(ptr) {}
};
class function {
friend class compose;
friend class invoker;
public:
typedef std::vector<std::pair<std::string, variable>> parameters_t;
typedef std::vector<variable> positional_t;
// Invalid function
function() {}
// Make a function from code
explicit function(const std::string& str, const std::string& id = "")
: ptr_(plaidml_build_coded_function(str.c_str(), id.c_str()), plaidml_free_function) {
vai_exception::check_and_throw(ptr_);
}
// Load and save function
inline void load(const std::shared_ptr<ctx>& ctx, const device& dev,
const std::string& file); // Later, after dev is defined
void save(const std::string& file) {
vai_exception::check_and_throw(plaidml_save_function(ptr_.get(), file.c_str()));
}
// Get information
size_t num_inputs() { return plaidml_get_function_input_count(ptr_.get()); }
size_t num_outputs() { return plaidml_get_function_output_count(ptr_.get()); }
std::string input_name(size_t i) {
const char* name = plaidml_get_function_input(ptr_.get(), i);
return (name == NULL ? "" : name);
}
std::string output_name(size_t i) {
const char* name = plaidml_get_function_output(ptr_.get(), i);
return (name == NULL ? "" : name);
}
// Apply a function to values, produce new values, named parameters
application apply(const parameters_t& inputs, const std::vector<application> prev = {}) {
std::shared_ptr<plaidml_applier> app(plaidml_alloc_applier(ptr_.get()), plaidml_free_applier);
vai_exception::check_and_throw(app);
for (const auto& papp : prev) {
bool r = plaidml_apply_add_dependency(app.get(), papp.ptr_.get());
vai_exception::check_and_throw(r);
}
for (const auto& arg : inputs) {
bool r = plaidml_apply_add_input(app.get(), arg.first.c_str(), arg.second.ptr_.get());
vai_exception::check_and_throw(r);
}
return application(ptr_, app);
}
application apply(const positional_t& inputs, const std::vector<application> prev = {}) {
if (inputs.size() != num_inputs()) {
throw std::runtime_error("Mismatched number of input in application: " + std::to_string(inputs.size()) + " vs " +
std::to_string(num_inputs()));
}
std::shared_ptr<plaidml_applier> app(plaidml_alloc_applier(ptr_.get()), plaidml_free_applier);
vai_exception::check_and_throw(app);
for (const auto& papp : prev) {
bool r = plaidml_apply_add_dependency(app.get(), papp.ptr_.get());
vai_exception::check_and_throw(r);
}
for (size_t idx = 0; idx < inputs.size(); ++idx) {
bool r = plaidml_apply_add_input(app.get(), input_name(idx).c_str(), inputs[idx].ptr_.get());
vai_exception::check_and_throw(r);
}
return application(ptr_, app);
}
// Operator() for ease of use in apply
template <typename... Params>
application operator()(Params... params) {
return apply(std::vector<variable>{params...});
}
private:
std::shared_ptr<plaidml_function> ptr_;
explicit function(const std::shared_ptr<plaidml_function>& ptr) : ptr_(ptr) {}
};
class compose {
public:
explicit compose(const std::string name = "") : ptr_(plaidml_alloc_composer(), plaidml_free_composer) {
vai_exception::check_and_throw(ptr_);
}
compose& input(const std::string& name, const placeholder& p) {
bool r = plaidml_add_composer_input(ptr_.get(), name.c_str(), p.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
compose& output(const std::string& name, const variable& p) {
bool r = plaidml_add_composer_output(ptr_.get(), name.c_str(), p.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
compose& dependency(const application& prev) {
bool r = plaidml_add_composer_dependency(ptr_.get(), prev.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
compose& update(const base_tensor& lhs, const variable& rhs) {
variable tvar = lhs;
bool r = plaidml_add_composer_update(ptr_.get(), tvar.ptr_.get(), rhs.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
operator function() {
std::shared_ptr<plaidml_function> func(plaidml_build_composed_function(ptr_.get()), plaidml_free_function);
vai_exception::check_and_throw(func);
return function(func);
}
private:
std::shared_ptr<plaidml_composer> ptr_;
};
class invoker {
public:
invoker() {}
invoker(const invoker&) = delete;
invoker(invoker&&) = default;
invoker& operator=(const invoker&) = delete;
invoker& operator=(invoker&&) = default;
invoker(const std::shared_ptr<ctx>& ctx, const function& func)
: ctx_{ctx}, invoker_{plaidml_alloc_invoker(ctx_->get_ctx(), func.ptr_.get())} {
vai_exception::check_and_throw(invoker_);
}
invoker& set_input(const std::string& name, const variable& var) {
auto r = plaidml_set_invoker_input(invoker_.get(), name.c_str(), var.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
invoker& set_output(const std::string& name, const variable& var) {
auto r = plaidml_set_invoker_output(invoker_.get(), name.c_str(), var.ptr_.get());
vai_exception::check_and_throw(r);
return *this;
}
base_shape output_shape(const std::string& name) {
std::shared_ptr<plaidml_shape> shp{plaidml_alloc_invoker_output_shape(invoker_.get(), name.c_str()),
plaidml_free_shape};
vai_exception::check_and_throw(shp);
return base_shape{ctx_, std::move(shp)};
}
void save(const std::string& file, plaidml_file_format format) {
vai_exception::check_and_throw(plaidml_save_invoker(invoker_.get(), file.c_str(), format));
}
void set_const() { vai_exception::check_and_throw(plaidml_set_invoker_const(invoker_.get())); }
std::unique_ptr<plaidml_invocation> invoke() {
std::unique_ptr<plaidml_invocation> invocation{plaidml_schedule_invocation(ctx_->get_ctx(), invoker_.get())};
vai_exception::check_and_throw(invocation);
return invocation;
}
std::unique_ptr<plaidml_invocation> invoke(const std::shared_ptr<ctx>& ctx) {
std::unique_ptr<plaidml_invocation> invocation{plaidml_schedule_invocation(ctx->get_ctx(), invoker_.get())};
vai_exception::check_and_throw(invocation);
return invocation;
}
private:
std::shared_ptr<ctx> ctx_;
std::unique_ptr<plaidml_invoker> invoker_;
};
// TODO: Fix this!
class gradient {
public:
explicit gradient(const variable& var) : ptr_(plaidml_alloc_gradient(var.ptr_.get()), plaidml_free_gradient) {
vai_exception::check_and_throw(ptr_);
}
variable operator()(const variable& v) {
variable r;
plaidml_var* var = plaidml_compute_grad_wrt(ptr_.get(), v.ptr_.get());
vai_exception::check_and_throw(var);
r.ptr_ = std::shared_ptr<plaidml_var>(var, plaidml_free_var);
return r;
}
private:
std::shared_ptr<plaidml_gradient> ptr_;
};
class device {
friend class function;
friend class device_config;
public:
device() = default;
bool operator!() const { return !ptr_; }
buffer allocate(uint64_t size) const {
buffer r;
r.ptr_ =
std::shared_ptr<plaidml_buffer>(plaidml_alloc_buffer(ctx_->get_ctx(), ptr_.get(), size), plaidml_free_buffer);
vai_exception::check_and_throw(r.ptr_);
return r;
}
base_tensor allocate(const base_shape& s) const { return base_tensor(s.get_context(), allocate(s.buffer_size()), s); }
template <class T>
tensor<T> allocate(const shape<T>& s) const {
return tensor<T>(s.get_context(), allocate(s.buffer_size()), s);
}
private:
explicit device(const std::shared_ptr<ctx>& ctx, plaidml_device* raw) : ctx_{ctx}, ptr_(raw, plaidml_close_device) {}
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_device> ptr_;
const std::shared_ptr<ctx>& get_context() const { return ctx_; }
};
class device_config {
friend std::vector<device_config> _enumerate_devices(const std::shared_ptr<ctx>& ctx,
std::shared_ptr<plaidml_device_enumerator> dev_enum);
public:
// Get any string based property
std::string get_string_prop(plaidml_device_property prop) const {
size_t out_size;
bool r = plaidml_query_devconf(ctx_->get_ctx(), config_, prop, NULL, 0, &out_size);
vai_exception::check_and_throw(r);
std::string str(out_size, '\0');
r = plaidml_query_devconf(ctx_->get_ctx(), config_, prop, &str[0], str.size(), NULL);
str.pop_back();
vai_exception::check_and_throw(r);
return str;
}
// Convenience functions for current properties
std::string id() const { return get_string_prop(PLAIDML_DEVICE_ID); }
std::string config() const { return get_string_prop(PLAIDML_DEVICE_CONFIG); }
std::string description() const { return get_string_prop(PLAIDML_DEVICE_DESCRIPTION); }
std::string details() const { return get_string_prop(PLAIDML_DEVICE_DETAILS); }
// Open the device
device open() const {
device dev(ctx_, plaidml_open_device(ctx_->get_ctx(), config_));
vai_exception::check_and_throw(dev.ptr_);
return dev;
}
private:
device_config(const std::shared_ptr<ctx>& ctx, const std::shared_ptr<plaidml_device_enumerator>& dev_enum,
plaidml_devconf* config)
: ctx_(ctx), dev_enum_(dev_enum), config_(config) {}
std::shared_ptr<ctx> ctx_;
std::shared_ptr<plaidml_device_enumerator> dev_enum_;
plaidml_devconf* config_;
};
std::vector<device_config> _enumerate_devices(const std::shared_ptr<ctx>& ctx,
std::shared_ptr<plaidml_device_enumerator> dev_enum) {
std::vector<device_config> out;
size_t i = 0;
while (true) {
plaidml_devconf* conf = plaidml_get_devconf(ctx->get_ctx(), dev_enum.get(), i);
if (conf == NULL) break;
i++;
out.push_back(device_config(ctx, dev_enum, conf));
}
vai_clear_status(); // Since we always walk off the list, clear errors
return out;
}
std::vector<device_config> enumerate_devices(const std::shared_ptr<ctx>& ctx) {
std::shared_ptr<plaidml_device_enumerator> dev_enum(plaidml_alloc_device_enumerator(ctx->get_ctx(), NULL, NULL),
plaidml_free_device_enumerator);
vai_exception::check_and_throw(dev_enum);
return _enumerate_devices(ctx, dev_enum);
}
std::vector<device_config> enumerate_devices(const std::shared_ptr<ctx>& ctx, const std::string& config) {
std::vector<device_config> out;
std::shared_ptr<plaidml_device_enumerator> dev_enum(
plaidml_alloc_device_enumerator_with_config(ctx->get_ctx(), config.c_str(), NULL, NULL),
plaidml_free_device_enumerator);
vai_exception::check_and_throw(dev_enum);
return _enumerate_devices(ctx, dev_enum);
}
// Actually needs definitions of both classes
void function::load(const std::shared_ptr<ctx>& ctx, const device& dev, const std::string& file) {
ptr_ = std::shared_ptr<plaidml_function>(plaidml_load_function(ctx->get_ctx(), dev.ptr_.get(), file.c_str()),
plaidml_free_function);
vai_exception::check_and_throw(ptr_);
}
} // namespace plaidml
} // namespace vertexai
+637
View File
@@ -0,0 +1,637 @@
// Copyright 2018 Intel Corporation.
//
// This is the PlaidML library interface, used to construct and manipulate
// programs defined as a sequence of operations over tensors.
#pragma once
#ifdef __cplusplus
#include <cstddef>
#include <memory>
#else
#include <stdbool.h>
#include <stddef.h>
#endif // __cplusplus
#include "plaidml/base/base.h"
#define PLAIDML_API VAI_API
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
// PlaidML devconf objects represent device configurations. Devconf objects are
// contained by PlaidML device enumerators, examined and potentially modified by
// PlaidML library consumers, and then used to open PlaidML devices for compute
// access.
#ifdef __cplusplus
struct plaidml_devconf;
#else
typedef struct plaidml_devconf plaidml_devconf;
#endif // __cplusplus
// Platform configuration properties. This set may be extended in the future.
typedef enum {
// Platform names are NUL-terminated strings.
PLAIDML_DEVICE_ID = 1,
// Platform configs are NUL-terminated prototxt.
PLAIDML_DEVICE_CONFIG = 2,
// Platform descriptions are NUL-terminated prototxt.
PLAIDML_DEVICE_DESCRIPTION = 3,
// Platform descriptions are NUL-terminated prototxt.
PLAIDML_DEVICE_DETAILS = 4
} plaidml_device_property;
// Returns the version of plaidml
PLAIDML_API const char* plaidml_get_version();
// Queries the supplied device configuration property.
//
// The supplied output buffer pointer should point to a property-specific value
// to be filled in with the requested information; the output size is used for
// property versioning and buffer overflow protection. Fields in the output
// buffer not supported by the property implementation will be zero-filled
// (i.e. zero is the default value for all properties, including unsupported
// properties).
//
// The value pointed to by output_buffer_size_required, if provided, will be
// filled in with the size required for the output buffer. This is most useful
// with queries that return string values or arrays of values; a common pattern
// is to make a call with a NULL output buffer and zero size, allocate a buffer
// of the indicated output size required, and then to re-issue the query.
PLAIDML_API bool plaidml_query_devconf(vai_ctx* ctx, plaidml_devconf* devconf, plaidml_device_property property,
void* output_buffer, size_t output_buffer_size,
size_t* output_buffer_size_required);
// PlaidML devices are used to supply resources that might be backed by a
// variety of hardware.
#ifdef __cplusplus
struct plaidml_device;
#else
typedef struct plaidml_device plaidml_device;
#endif // __cplusplus
// Opens a device, using the supplied device configuration. A NULL
// configuration is permitted; this will use the system default PlaidML compute
// device.
PLAIDML_API plaidml_device* plaidml_open_device(vai_ctx* ctx, plaidml_devconf* devconf);
// Closes a device. After this call, the device should not be used for any
// subsequent calls. The device may share resources with other objects (such
// as buffers and functions); those resources will only be released when they
// are no longer needed by those other objects. Closing a NULL device is a
// no-op.
PLAIDML_API void plaidml_close_device(plaidml_device* device);
// PlaidML device enumerators offer access to sets of compute devices. Enumerators may
// supply devices that are in-process, machine-local, or cross-network, depending on
// the deployment configuration.
#ifdef __cplusplus
struct plaidml_device_enumerator;
#else
typedef struct plaidml_device_enumerator plaidml_device_enumerator;
#endif // __cplusplus
// Allocates a device enumerator and initializes it using automatic configuration.
//
// If the supplied callback is NULL, the call will block until the enumerator
// is fully initialized, and then will return a pointer to the enumerator
// (unless an error occurs).
//
// If the supplied callback is non-NULL, the call will immediately return NULL,
// and will arrange for the callback to be invoked with the enumerator once the
// enumerator becomes available (calling it with a NULL enumerator on error). The
// library guarantees to invoke the callback exactly once.
//
// If the library returns an enumerator, there will be at least one
// configured device available; otherwise, the call will fail with
// VAI_STATUS_NOT_FOUND.
//
// The library may invoke the callback synchronously if the enumerator is
// immediately available or in error conditions.
PLAIDML_API plaidml_device_enumerator* plaidml_alloc_device_enumerator(
vai_ctx* ctx, void (*callback)(void* arg, plaidml_device_enumerator* enumerator), void* arg);
// Allocates a device enumerator and initializes it using the supplied
// configuration. Otherwise identical to the version that uses system
// config.
PLAIDML_API plaidml_device_enumerator* plaidml_alloc_device_enumerator_with_config(
vai_ctx* ctx, const char* configuration, void (*callback)(void* arg, plaidml_device_enumerator* enumerator),
void* arg);
// Frees a device enumerator. After this call, the enumerator should not be
// used for any subsequent calls. The enumerator may share resources with
// other objects (such as devices, buffers, and functions); those resources
// will only be released when they are no longer needed by those other objects.
// Freeing a NULL enumerator is a no-op.
PLAIDML_API void plaidml_free_device_enumerator(plaidml_device_enumerator* enumerator);
// Gets the configuration file that was used to initialize devices.
PLAIDML_API const char* plaidml_get_enumerator_config_source(plaidml_device_enumerator* enumerator);
// Gets the number of device valid or invalid configurations available.
PLAIDML_API size_t plaidml_get_devconf_count(vai_ctx* ctx, plaidml_device_enumerator* enumerator, bool valid_devices);
// Gets one device configuration from a device enumerator. The lifetime of the
// device configuration is bounded by the device enumerator; there's no need to
// separately free the configuration. If the requested configuration index is
// out of range, or if the enumerator is NULL, this call will return NULL.
PLAIDML_API plaidml_devconf* plaidml_get_devconf(vai_ctx* ctx, plaidml_device_enumerator* enumerator, size_t index);
// Same as above, only returns invalid devices
PLAIDML_API plaidml_devconf* plaidml_get_invalid_devconf(vai_ctx* ctx, plaidml_device_enumerator* enumerator,
size_t index);
// PlaidML buffers are used to create bindings between actual data and the data
// elements in PlaidML programs.
#ifdef __cplusplus
struct plaidml_buffer;
#else
typedef struct plaidml_buffer plaidml_buffer;
#endif // __cplusplus
// PlaidML mappings are used to view and manipulate the contents of buffers.
#ifdef __cplusplus
struct plaidml_mapping;
#else
typedef struct plaidml_mapping plaidml_mapping;
#endif // __cplusplus
// Allocates a buffer of the supplied raw memory size, or returns NULL if the
// library cannot allocate sufficient memory, or if the supplied device is
// NULL.
PLAIDML_API plaidml_buffer* plaidml_alloc_buffer(vai_ctx* ctx, plaidml_device* device, uint64_t size);
// Frees a buffer. After this call, the buffer should not be used for any
// subsequent calls. Freeing a NULL buffer is a no-op.
PLAIDML_API void plaidml_free_buffer(plaidml_buffer* buffer);
// Maps a buffer's current contents into memory.
//
// If the supplied callback is NULL, the call will block until the buffer is
// available, and then will return a pointer to a mapping for the buffer
// (unless an error occurs).
//
// If the supplied callback is non-NULL, the call will immediately return NULL,
// and will arrange for the callback to be invoked with the mapping once the
// buffer's data becomes available (calling it with a NULL mapping on error).
//
// The library may invoke the callback synchronously if the buffer's data is
// already available or in error conditions.
//
// A NULL buffer may be supplied; this will always result in a NULL address,
// and an out-of-memory error in the current thread's thread-local storage.
PLAIDML_API plaidml_mapping* plaidml_map_buffer_current(plaidml_buffer* buffer,
void (*callback)(void* arg, plaidml_mapping* mapping),
void* arg);
// Maps a buffer into memory, possibly discarding its current contents.
//
// The implementation may preserve or discard the buffer's contents when
// constructing the mapping; callers should not assume that the buffer has been
// initialized.
//
// The implementation may construct a non-coherent mapping: reads from the
// buffer may return arbitrary values, even after writing the same memory from
// the same processor.
//
// A NULL buffer may be supplied; this will always result in a NULL address,
// and an out-of-memory error in the current thread's thread-local storage.
PLAIDML_API plaidml_mapping* plaidml_map_buffer_discard(vai_ctx* ctx, plaidml_buffer* buffer);
// Gets the base address of a mapping's mapped memory region. If the mapping
// has been written back to the buffer, this call will return NULL. A NULL
// mapping may be supplied; this will always return NULL.
PLAIDML_API char* plaidml_get_mapping_base(vai_ctx* ctx, plaidml_mapping* mapping);
// Gets the size of a mapping's mapped memory region. If the mapping has been
// written back to the buffer, this call will return 0. A NULL mapping may be
// supplied; this will always return 0.
PLAIDML_API size_t plaidml_get_mapping_size(vai_ctx* ctx, plaidml_mapping* mapping);
// Synchronizes a mapping with its backing store (if required by the underlying
// device implementation), possibly removing the mapping from the current
// virtual address space.
//
// After this call, callers must not access the mapping's previous virtual
// memory region.
//
// Callers are allowed to free the mapping and use the buffer as a program input
// immediately after this call.
//
// A NULL mapping may be supplied; this will always result in a false result
// and an out-of-memory error in the current thread's thread-local storage.
PLAIDML_API bool plaidml_writeback_mapping(vai_ctx* ctx, plaidml_mapping* mapping);
// Removes a mapping that's no longer required by the caller.
//
// After this call, callers must not access the mapping's previous virtual
// memory region.
//
// Freeing a NULL mapping is a no-op.
PLAIDML_API void plaidml_free_mapping(plaidml_mapping* mapping);
// PlaidML shapes describe the layout of the data within a buffer as observed by a
// program.
#ifdef __cplusplus
struct plaidml_shape;
#else
typedef struct plaidml_shape plaidml_shape;
#endif // __cplusplus
// Set the default datatype for floating-point computations.
PLAIDML_API void plaidml_set_floatx(plaidml_datatype datatype);
// Allocates a shape, or returns NULL if the library cannot allocate sufficient
// memory. Note that shapes must have dimensions added before use.
PLAIDML_API plaidml_shape* plaidml_alloc_shape(vai_ctx* ctx, plaidml_datatype datatype);
// Frees a shape. After this call, the shape should not be used for any
// subsequent calls. Freeing a NULL shape is a no-op.
PLAIDML_API void plaidml_free_shape(plaidml_shape* shape);
// Sets a shape's offset, in elements, from the beginning of the data.
PLAIDML_API bool plaidml_set_shape_offset(vai_ctx* ctx, plaidml_shape* shape, uint64_t offset_in_elements);
// Set a shape's layout
PLAIDML_API bool plaidml_shape_set_layout(vai_ctx* ctx, plaidml_shape* shape, const char* layout);
// Adds a dimension to a shape. Dimension sizes and strides are measured in
// elements of the shape's datatype, not by local buffer byte counts.
PLAIDML_API bool plaidml_add_dimension(vai_ctx* ctx, plaidml_shape* shape, uint64_t size_in_elements,
int64_t stride_in_elements);
// Gets a shape's type.
PLAIDML_API plaidml_datatype plaidml_get_shape_type(plaidml_shape* shape);
// Gets a shape's offset.
PLAIDML_API uint64_t plaidml_get_shape_offset(plaidml_shape* shape);
// Get the number of dimensions for a shape.
PLAIDML_API size_t plaidml_get_shape_dimension_count(plaidml_shape* shape);
// Gets the size in elements for a given shape dimension.
// If the dimension is out of range, zero will be returned.
PLAIDML_API uint64_t plaidml_get_shape_dimension_size(plaidml_shape* shape, size_t dim);
// Gets the stride in elements for a given shape dimension.
// If the dimension is out of range, zero will be returned.
PLAIDML_API int64_t plaidml_get_shape_dimension_stride(plaidml_shape* shape, size_t dim);
// Gets the byte size required for a buffer to hold the given shape.
PLAIDML_API uint64_t plaidml_get_shape_buffer_size(plaidml_shape* shape);
// Gets the underlying element count described by the given shape.
PLAIDML_API uint64_t plaidml_get_shape_element_count(plaidml_shape* shape);
// A PlaidML function defines a transformation from some set of inputs to
// some set of outputs.
#ifdef __cplusplus
struct plaidml_function;
#else
typedef struct plaidml_function plaidml_function;
#endif // __cplusplus
// Frees a function. After this call, the function should not be used for any
// subsequent calls. Freeing a NULL function is a no-op.
PLAIDML_API void plaidml_free_function(plaidml_function* function);
// Return the number of inputs to a function, 0 if function is NULL
PLAIDML_API size_t plaidml_get_function_input_count(plaidml_function* function);
// Return the name of input i for a function, or NULL if function is NULL or out of bounds
PLAIDML_API const char* plaidml_get_function_input(plaidml_function* function, size_t i);
// Return the number of outpus from function, 0 if function is NULL
PLAIDML_API size_t plaidml_get_function_output_count(plaidml_function* function);
// Return the name of output i for a function, or NULL if function is NULL or out of bounds
PLAIDML_API const char* plaidml_get_function_output(plaidml_function* function, size_t i);
// A PlaidML var is an input to or an output from a PlaidML function.
#ifdef __cplusplus
struct plaidml_var;
#else
typedef struct plaidml_var plaidml_var;
#endif // __cplusplus
// Frees a var. After this call, the var should not be used for any
// subsequent calls. Freeing a NULL var is a no-op.
PLAIDML_API void plaidml_free_var(plaidml_var* var);
// Allocate a placeholder var.
//
// A placeholder can be used during function application: used as the output of
// one function application and the the input of another function application,
// the placeholder defines an information flow between the functions. A
// placeholder can also during function composition: the placeholder can be
// bound to the inputs or outputs of the composed function.
//
// num_dimensions specifies the placeholder dimension count: PlaidML functions are
// polymorphic with respect to tensor sizes and datatypes, but not to the
// actual dimension count of their inputs and outputs, so placeholders need to
// indicate the number of dimensions of the variables they will eventually be
// bound to. For scalar placeholders, specify zero for the dimension count.
PLAIDML_API plaidml_var* plaidml_alloc_placeholder(size_t num_dimensions);
// Allocates a var representing a signed integer constant.
PLAIDML_API plaidml_var* plaidml_alloc_int64(int64_t value);
// Allocates a var representing a floating point constant.
PLAIDML_API plaidml_var* plaidml_alloc_real(double value);
// Allocates a var representing a tensor, bound to the given shape and buffer.
PLAIDML_API plaidml_var* plaidml_alloc_tensor(vai_ctx* ctx, plaidml_buffer* buffer, plaidml_shape* shape);
// Attaches quantization parameters to a weights tensor
PLAIDML_API bool plaidml_tensor_attach_qparams(plaidml_var* tensor, plaidml_var* qparams);
// Builds a function from the supplied code written in the PlaidML operation
// description language. If 'id' is not NULL, attach the id to the function
// for tracking purposes.
PLAIDML_API plaidml_function* plaidml_build_coded_function(const char* code, const char* id);
// TODO: Make more general method to serialize things.
// Load a function (possibly with bound tensors) from a file
PLAIDML_API plaidml_function* plaidml_load_function(vai_ctx* ctx, plaidml_device* dev, const char* filename);
// Store a function (possibly with bound tensors) from to a file
PLAIDML_API bool plaidml_save_function(plaidml_function* func, const char* filename);
// Predeclare applier
// A PlaidML applier describes the application of a PlaidML function to some
// particular set of inputs, yielding some particular set of outputs. (For
// example, you can think of "+" as a function; applying it to "2" and "3"
// yields a particular output, "5".)
#ifdef __cplusplus
struct plaidml_applier;
#else
typedef struct plaidml_applier plaidml_applier;
#endif // __cplusplus
// A PlaidML composer builds a new function out of a set of vars, where the values
// of the output vars have been previously defined (by using an applier),
// either in terms of placeholders (which become the new function inputs), or
// in terms of mutable tensors (which will be mutated each time the function is
// run).
#ifdef __cplusplus
struct plaidml_composer;
#else
typedef struct plaidml_composer plaidml_composer;
#endif // __cplusplus
// Allocates a composer, or returns NULL if the library cannot allocate sufficient memory.
PLAIDML_API plaidml_composer* plaidml_alloc_composer();
// Binds a placeholder var to a named input of a composed function.
PLAIDML_API bool plaidml_add_composer_input(plaidml_composer* composer, const char* name, plaidml_var* var);
// Binds a computed value var to a named output of a composed function.
PLAIDML_API bool plaidml_add_composer_output(plaidml_composer* composer, const char* name, plaidml_var* var);
// Adds a dependency to the composed function. Any updates induced by the function
// application will be updates of the newly generated function (in addition to any
// explicit updates, which will superseed them).
PLAIDML_API bool plaidml_add_composer_dependency(plaidml_composer* composer, plaidml_applier* must_run_before);
// Adds a tensor update to a composed function. This allows the composed
// function to have externally visible side effects when run: the source tensor
// (which should be a placeholder bound to an output of some function) will be
// assigned to the destination tensor (either a placeholder or a tensor var)
// each time the composed function is run.
PLAIDML_API bool plaidml_add_composer_update(plaidml_composer* composer, plaidml_var* dest_tensor,
plaidml_var* src_tensor);
// Builds the function described by the composer. This should be called at
// most once per composer; after this call, the only valid operation on the
// composer is plaidml_free_composer().
PLAIDML_API plaidml_function* plaidml_build_composed_function(plaidml_composer* composer);
// Frees a composer. After this call, the composer should not be used for any
// subsequent calls. Freeing a NULL composer is a no-op.
PLAIDML_API void plaidml_free_composer(plaidml_composer* composer);
// Allocates an applier describing the application of the given function to some
// number of inputs, or returns NULL if the library cannot allocate sufficient memory.
PLAIDML_API plaidml_applier* plaidml_alloc_applier(plaidml_function* function);
// Adds a dependency to the applied function. This is used to sequence tensor
// updates: if a sub-function of the applied function uses a mutable tensor as
// an input, the value it will observe for that tensor will be the value after
// the indicated function has run (presumably updating the tensor). In addition
// the new function application will carry the updates forward.
PLAIDML_API bool plaidml_apply_add_dependency(plaidml_applier* applier, plaidml_applier* must_run_before);
// Adds a named input to a function application. Note that the input variable
// is not consumed; the caller remains responsible for calling plaidml_free_var()
// when the supplied var is no longer needed.
PLAIDML_API bool plaidml_apply_add_input(plaidml_applier* applier, const char* name, plaidml_var* var);
// Allocates a var corresponding to the output of a function application. The
// caller is responsible for calling plaidml_free_var() on the result when the
// variable is no longer needed.
//
// At the time when the output is allocated, all inputs to the function
// application must already be added (either as concrete values or as
// placeholders).
PLAIDML_API plaidml_var* plaidml_apply_alloc_output(plaidml_applier* applier, const char* name);
// Frees an applier. After this call, the applier should not be used for any
// subsequent calls. Freeing a NULL applier is a no-op.
PLAIDML_API void plaidml_free_applier(plaidml_applier* applier);
// A PlaidML invoker provides a mechanism for scheduling runs of a
// PlaidML function.
//
// The function need not be completely bound when supplied to the
// invoker; the invoker may be mutated to set the function inputs and
// outputs. The input and output bindings must be fully specified,
// and must be dimensionally consistent with each other, at the time
// the invoker is used to invoke the supplied function.
//
// Invokers are not threadsafe, and they are stateful; callers are
// advised to synchronize concurrent access from the time the
// invoker's inputs are set through to when the function has completed
// running.
#ifdef __cplusplus
struct plaidml_invoker;
#else
typedef struct plaidml_invoker plaidml_invoker;
#endif // __cplusplus
// Allocates an invoker for the supplied function, or returns NULL if
// the library cannot allocate sufficient memory, or if the supplied
// context or function is NULL.
PLAIDML_API plaidml_invoker* plaidml_alloc_invoker(vai_ctx* ctx, plaidml_function* function);
// Frees an invoker. After this call, the invoker should not be used for any
// subsequent calls. Freeing a NULL invoker is a no-op.
PLAIDML_API void plaidml_free_invoker(plaidml_invoker* invoker);
// Sets a named input for an invocation. The variable must be NULL or
// a concrete object; placeholders are not permitted. Note that the
// input variable is not consumed; the caller remains responsible for
// calling plaidml_free_var() when the supplied var is no longer
// needed.
PLAIDML_API bool plaidml_set_invoker_input(plaidml_invoker* invoker, const char* name, plaidml_var* var);
// Allocates a shape corresponding to the output of an invocation.
// The caller is responsible for calling plaidml_free_shape() on the
// result when the shape is no longer needed.
//
// At the time when the shape is allocated, all inputs to the
// invocation must already be set to concrete values that are
// consistent in size.
PLAIDML_API plaidml_shape* plaidml_alloc_invoker_output_shape(plaidml_invoker* invoker, const char* name);
// Sets a named output for an invocation. The variable must be NULL
// or a concrete tensor; placeholders are not permitted. Note that
// the output variable is not consumed; the caller remains responsible
// for calling plaidml_free_var() when the supplied var is no longer
// needed.
PLAIDML_API bool plaidml_set_invoker_output(plaidml_invoker* invoker, const char* name, plaidml_var* var);
// PlaidML Stripe file formats.
typedef enum {
PLAIDML_FILE_FORMAT_TILE = 1,
PLAIDML_FILE_FORMAT_STRIPE_HUMAN = 2,
PLAIDML_FILE_FORMAT_STRIPE_PROTOTXT = 3,
PLAIDML_FILE_FORMAT_STRIPE_BINARY = 4,
} plaidml_file_format;
// Mark a functions inputs as 'const', that is, subject to constant folding
// and other operations that assume they will no longer be changed
PLAIDML_API bool plaidml_set_invoker_const(plaidml_invoker* invoker);
// Serializes an invoker to a file. All inputs to the invoker must
// already be set to concrete values that are consistent in size.
PLAIDML_API bool plaidml_save_invoker(plaidml_invoker* invoker, const char* filename, plaidml_file_format format);
// A PlaidML invocation describes one particular run of a function.
#ifdef __cplusplus
struct plaidml_invocation;
#else
typedef struct plaidml_invocation plaidml_invocation;
#endif // __cplusplus
// Schedules a run of an invoker's function with the invoker's current
// input and output bindings.
//
// The invocation must be fully specified: all function inputs and
// outputs must have corresponding inputs and outputs set in the
// invoker. Furthermore, all inputs and outputs must be consistently
// sized relative to each other and the function.
//
// All buffers used to back tensors that will be updated by the
// invocation should already be unmapped. When this call returns,
// those buffers logically contain the updated values, and may be
// remapped; remapping requests will complete once the underlying
// invocation is complete.
//
// Note that this call may return before the computation described by
// the function has actually completed; the computation is scheduled,
// not complete. Errors that occur asynchronously will be reported
// when the buffers updated by running the function are remapped.
//
// Once this call returns, the invoker's inputs and outputs may be set
// by the caller, and the invoker may be used for another run of the
// invoker's function, even if the first run has not yet completed.
PLAIDML_API plaidml_invocation* plaidml_schedule_invocation(vai_ctx* ctx, plaidml_invoker* invoker);
// Frees an invocation. After this call, the invocation should not be
// used for any subsequent calls. Freeing a NULL invocation is a no-op.
PLAIDML_API void plaidml_free_invocation(plaidml_invocation* invocation);
// A PlaidML gradient computes gradient data for a given scalar.
#ifdef __cplusplus
struct plaidml_gradient;
#else
typedef struct plaidml_gradient plaidml_gradient;
#endif // __cplusplus
// Allocate and returns a gradient computer for a given scalar or NULL if error
PLAIDML_API plaidml_gradient* plaidml_alloc_gradient(plaidml_var* var);
// Frees a gradient computer. After this call, the context should not be used for
// any subsequent calls. Freeing a NULL context is a no-op.
PLAIDML_API void plaidml_free_gradient(plaidml_gradient* grad);
// Determines the gradient of with respect to some value
PLAIDML_API plaidml_var* plaidml_compute_grad_wrt(plaidml_gradient* grad, plaidml_var* wrt);
#ifdef __cplusplus
} // extern "C"
namespace std {
template <>
struct default_delete<::plaidml_device> {
void operator()(::plaidml_device* device) const noexcept { ::plaidml_close_device(device); }
};
template <>
struct default_delete<::plaidml_device_enumerator> {
void operator()(::plaidml_device_enumerator* enumerator) const noexcept {
::plaidml_free_device_enumerator(enumerator);
}
};
template <>
struct default_delete<::plaidml_buffer> {
void operator()(::plaidml_buffer* buffer) const noexcept { ::plaidml_free_buffer(buffer); }
};
template <>
struct default_delete<::plaidml_mapping> {
void operator()(::plaidml_mapping* mapping) const noexcept { ::plaidml_free_mapping(mapping); }
};
template <>
struct default_delete<::plaidml_shape> {
void operator()(::plaidml_shape* shape) const noexcept { ::plaidml_free_shape(shape); }
};
template <>
struct default_delete<::plaidml_function> {
void operator()(::plaidml_function* function) const noexcept { ::plaidml_free_function(function); }
};
template <>
struct default_delete<::plaidml_var> {
void operator()(::plaidml_var* var) const noexcept { ::plaidml_free_var(var); }
};
template <>
struct default_delete<::plaidml_composer> {
void operator()(::plaidml_composer* composer) const noexcept { ::plaidml_free_composer(composer); }
};
template <>
struct default_delete<::plaidml_applier> {
void operator()(::plaidml_applier* applier) const noexcept { ::plaidml_free_applier(applier); }
};
template <>
struct default_delete<::plaidml_invoker> {
void operator()(::plaidml_invoker* invoker) const noexcept { ::plaidml_free_invoker(invoker); }
};
template <>
struct default_delete<::plaidml_invocation> {
void operator()(::plaidml_invocation* invocation) const noexcept { ::plaidml_free_invocation(invocation); }
};
template <>
struct default_delete<::plaidml_gradient> {
void operator()(::plaidml_gradient* gradient) const noexcept { ::plaidml_free_gradient(gradient); }
};
} // namespace std
#endif // __cplusplus
+3
View File
@@ -0,0 +1,3 @@
home = /usr/local/Cellar/python@3.8/3.8.12_1/bin
include-system-site-packages = false
version = 3.8.12
@@ -0,0 +1,37 @@
NetworkX is distributed with the 3-clause BSD license.
::
Copyright (C) 2004-2021, NetworkX Developers
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NetworkX Developers nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,2 @@
3D Drawing
----------
@@ -0,0 +1,43 @@
"""
=======
Mayavi2
=======
"""
import networkx as nx
import numpy as np
from mayavi import mlab
# some graphs to try
# H=nx.krackhardt_kite_graph()
# H=nx.Graph();H.add_edge('a','b');H.add_edge('a','c');H.add_edge('a','d')
# H=nx.grid_2d_graph(4,5)
H = nx.cycle_graph(20)
# reorder nodes from 0,len(G)-1
G = nx.convert_node_labels_to_integers(H)
# 3d spring layout
pos = nx.spring_layout(G, dim=3, seed=1001)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([pos[v] for v in sorted(G)])
# scalar colors
scalars = np.array(list(G.nodes())) + 5
mlab.figure()
pts = mlab.points3d(
xyz[:, 0],
xyz[:, 1],
xyz[:, 2],
scalars,
scale_factor=0.1,
scale_mode="none",
colormap="Blues",
resolution=20,
)
pts.mlab_source.dataset.lines = np.array(list(G.edges()))
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
mlab.orientation_axes()
@@ -0,0 +1,51 @@
"""
================
Basic matplotlib
================
A basic example of 3D Graph visualization using `mpl_toolkits.mplot_3d`.
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# The graph to visualize
G = nx.cycle_graph(20)
# 3d spring layout
pos = nx.spring_layout(G, dim=3, seed=779)
# Extract node and edge positions from the layout
node_xyz = np.array([pos[v] for v in sorted(G)])
edge_xyz = np.array([(pos[u], pos[v]) for u, v in G.edges()])
# Create the 3D figure
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
# Plot the nodes - alpha is scaled by "depth" automatically
ax.scatter(*node_xyz.T, s=100, ec="w")
# Plot the edges
for vizedge in edge_xyz:
ax.plot(*vizedge.T, color="tab:gray")
def _format_axes(ax):
"""Visualization options for the 3D axes."""
# Turn gridlines off
ax.grid(False)
# Suppress tick labels
for dim in (ax.xaxis, ax.yaxis, ax.zaxis):
dim.set_ticks([])
# Set axes labels
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
_format_axes(ax)
fig.tight_layout()
plt.show()
@@ -0,0 +1,8 @@
.. _examples_gallery:
Gallery
=======
General-purpose and introductory examples for NetworkX.
The `tutorial <../tutorial.html>`_ introduces conventions and basic graph
manipulations.
@@ -0,0 +1,2 @@
Algorithms
----------
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,338 @@
# source target
1 2
1 10
2 1
2 10
3 7
4 7
4 209
5 132
6 150
7 3
7 4
7 9
8 106
8 115
9 1
9 2
9 7
10 1
10 2
11 133
11 218
12 88
13 214
14 24
14 52
16 10
16 19
17 64
17 78
18 55
18 103
18 163
19 18
20 64
20 180
21 16
21 22
22 21
22 64
22 106
23 20
23 22
23 64
24 14
24 31
24 122
27 115
28 29
29 28
30 19
31 24
31 32
31 122
31 147
31 233
32 31
32 86
34 35
34 37
35 34
35 43
36 132
36 187
37 38
37 90
37 282
38 42
38 43
38 210
40 20
42 15
42 38
43 34
43 35
43 38
45 107
46 61
46 72
48 23
49 30
49 64
49 108
49 115
49 243
50 30
50 47
50 55
50 125
50 163
52 218
52 224
54 111
54 210
55 65
55 67
55 105
55 108
55 222
56 18
56 64
57 65
57 125
58 20
58 30
58 50
58 103
58 180
59 164
63 125
64 8
64 50
64 70
64 256
66 20
66 84
66 106
66 125
67 22
67 50
67 113
68 50
70 50
70 64
71 72
74 29
74 75
74 215
75 74
75 215
76 58
76 104
77 103
78 64
78 68
80 207
80 210
82 8
82 77
82 83
82 97
82 163
83 82
83 226
83 243
84 29
84 154
87 101
87 189
89 90
90 89
90 94
91 86
92 19
92 30
92 106
94 72
94 89
94 90
95 30
96 75
96 256
97 80
97 128
98 86
100 86
101 87
103 77
103 104
104 58
104 77
104 103
106 22
107 38
107 114
107 122
108 49
108 55
111 121
111 128
111 210
113 253
114 107
116 30
116 140
118 129
118 138
120 88
121 128
122 31
123 32
124 244
125 132
126 163
126 180
128 38
128 111
129 118
132 29
132 30
133 30
134 135
134 150
135 134
137 144
138 118
138 129
139 142
141 157
141 163
142 139
143 2
144 137
145 151
146 137
146 165
146 169
146 171
147 31
147 128
148 146
148 169
148 171
148 282
149 128
149 148
149 172
150 86
151 145
152 4
153 134
154 155
156 161
157 141
161 156
165 144
165 148
167 149
169 15
169 148
169 171
170 115
170 173
170 183
170 202
171 72
171 148
171 169
173 170
175 100
176 10
178 181
181 178
182 38
182 171
183 96
185 50
186 127
187 50
187 65
188 30
188 50
189 87
189 89
190 35
190 38
190 122
190 182
191 54
191 118
191 129
191 172
192 149
192 167
195 75
197 50
197 188
198 218
198 221
198 222
200 65
200 220
201 113
202 156
203 232
204 194
207 38
207 122
207 124
208 30
208 50
210 38
210 207
211 37
213 35
213 38
214 13
214 14
214 171
214 213
215 75
217 39
218 68
218 222
221 198
222 198
222 218
223 39
225 3
226 22
229 65
230 68
231 43
232 95
232 203
233 99
234 68
234 230
237 244
238 145
242 3
242 113
244 237
249 96
250 156
252 65
254 65
258 113
268 4
270 183
272 6
275 96
280 183
280 206
282 37
285 75
290 285
293 290
@@ -0,0 +1,112 @@
"""
===========
Beam Search
===========
Beam search with dynamic beam width.
The progressive widening beam search repeatedly executes a beam search
with increasing beam width until the target node is found.
"""
import math
import matplotlib.pyplot as plt
import networkx as nx
def progressive_widening_search(G, source, value, condition, initial_width=1):
"""Progressive widening beam search to find a node.
The progressive widening beam search involves a repeated beam
search, starting with a small beam width then extending to
progressively larger beam widths if the target node is not
found. This implementation simply returns the first node found that
matches the termination condition.
`G` is a NetworkX graph.
`source` is a node in the graph. The search for the node of interest
begins here and extends only to those nodes in the (weakly)
connected component of this node.
`value` is a function that returns a real number indicating how good
a potential neighbor node is when deciding which neighbor nodes to
enqueue in the breadth-first search. Only the best nodes within the
current beam width will be enqueued at each step.
`condition` is the termination condition for the search. This is a
function that takes a node as input and return a Boolean indicating
whether the node is the target. If no node matches the termination
condition, this function raises :exc:`NodeNotFound`.
`initial_width` is the starting beam width for the beam search (the
default is one). If no node matching the `condition` is found with
this beam width, the beam search is restarted from the `source` node
with a beam width that is twice as large (so the beam width
increases exponentially). The search terminates after the beam width
exceeds the number of nodes in the graph.
"""
# Check for the special case in which the source node satisfies the
# termination condition.
if condition(source):
return source
# The largest possible value of `i` in this range yields a width at
# least the number of nodes in the graph, so the final invocation of
# `bfs_beam_edges` is equivalent to a plain old breadth-first
# search. Therefore, all nodes will eventually be visited.
log_m = math.ceil(math.log2(len(G)))
for i in range(log_m):
width = initial_width * pow(2, i)
# Since we are always starting from the same source node, this
# search may visit the same nodes many times (depending on the
# implementation of the `value` function).
for u, v in nx.bfs_beam_edges(G, source, value, width):
if condition(v):
return v
# At this point, since all nodes have been visited, we know that
# none of the nodes satisfied the termination condition.
raise nx.NodeNotFound("no node satisfied the termination condition")
###############################################################################
# Search for a node with high centrality.
# ---------------------------------------
#
# We generate a random graph, compute the centrality of each node, then perform
# the progressive widening search in order to find a node of high centrality.
# Set a seed for random number generation so the example is reproducible
seed = 89
G = nx.gnp_random_graph(100, 0.5, seed=seed)
centrality = nx.eigenvector_centrality(G)
avg_centrality = sum(centrality.values()) / len(G)
def has_high_centrality(v):
return centrality[v] >= avg_centrality
source = 0
value = centrality.get
condition = has_high_centrality
found_node = progressive_widening_search(G, source, value, condition)
c = centrality[found_node]
print(f"found node {found_node} with centrality {c}")
# Draw graph
pos = nx.spring_layout(G, seed=seed)
options = {
"node_color": "blue",
"node_size": 20,
"edge_color": "grey",
"linewidths": 0,
"width": 0.1,
}
nx.draw(G, pos, **options)
# Draw node with high centrality as large and red
nx.draw_networkx_nodes(G, pos, nodelist=[found_node], node_size=100, node_color="r")
plt.show()
@@ -0,0 +1,83 @@
"""
=====================
Betweeness Centrality
=====================
Betweenness centrality measures of positive gene functional associations
using WormNet v.3-GS.
Data from: https://www.inetbio.org/wormnet/downloadnetwork.php
"""
from random import sample
import networkx as nx
import matplotlib.pyplot as plt
# Gold standard data of positive gene functional associations
# from https://www.inetbio.org/wormnet/downloadnetwork.php
G = nx.read_edgelist("WormNet.v3.benchmark.txt")
# remove randomly selected nodes (to make example fast)
num_to_remove = int(len(G) / 1.5)
nodes = sample(list(G.nodes), num_to_remove)
G.remove_nodes_from(nodes)
# remove low-degree nodes
low_degree = [n for n, d in G.degree() if d < 10]
G.remove_nodes_from(low_degree)
# largest connected component
components = nx.connected_components(G)
largest_component = max(components, key=len)
H = G.subgraph(largest_component)
# compute centrality
centrality = nx.betweenness_centrality(H, k=10, endpoints=True)
# compute community structure
lpc = nx.community.label_propagation_communities(H)
community_index = {n: i for i, com in enumerate(lpc) for n in com}
#### draw graph ####
fig, ax = plt.subplots(figsize=(20, 15))
pos = nx.spring_layout(H, k=0.15, seed=4572321)
node_color = [community_index[n] for n in H]
node_size = [v * 20000 for v in centrality.values()]
nx.draw_networkx(
H,
pos=pos,
with_labels=False,
node_color=node_color,
node_size=node_size,
edge_color="gainsboro",
alpha=0.4,
)
# Title/legend
font = {"color": "k", "fontweight": "bold", "fontsize": 20}
ax.set_title("Gene functional association network (C. elegans)", font)
# Change font color for legend
font["color"] = "r"
ax.text(
0.80,
0.10,
"node color = community structure",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font,
)
ax.text(
0.80,
0.06,
"node size = betweeness centrality",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font,
)
# Resize figure for label readibility
ax.margins(0.1, 0.05)
fig.tight_layout()
plt.axis("off")
plt.show()
@@ -0,0 +1,79 @@
"""
==========
Blockmodel
==========
Example of creating a block model using the quotient_graph function in NX. Data
used is the Hartford, CT drug users network::
@article{weeks2002social,
title={Social networks of drug users in high-risk sites: Finding the connections},
url = {https://doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
author={Weeks, Margaret R and Clair, Scott and Borgatti, Stephen P and Radda, Kim and Schensul, Jean J},
journal={{AIDS and Behavior}},
volume={6},
number={2},
pages={193--206},
year={2002},
publisher={Springer}
}
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from scipy.cluster import hierarchy
from scipy.spatial import distance
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length = nx.all_pairs_shortest_path_length(G)
distances = np.zeros((len(G), len(G)))
for u, p in path_length:
for v, d in p.items():
distances[u][v] = d
# Create hierarchical cluster
Y = distance.squareform(distances)
Z = hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership = list(hierarchy.fcluster(Z, t=1.15))
# Create collection of lists for blockmodel
partition = defaultdict(list)
for n, p in zip(list(range(len(G))), membership):
partition[p].append(n)
return list(partition.values())
G = nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H = G.subgraph(next(nx.connected_components(G)))
# Makes life easier to have consecutively labeled integer nodes
H = nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions = create_hc(H)
# Build blockmodel graph
BM = nx.quotient_graph(H, partitions, relabel=True)
# Draw original graph
pos = nx.spring_layout(H, iterations=100, seed=83) # Seed for reproducibility
plt.subplot(211)
nx.draw(H, pos, with_labels=False, node_size=10)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size = [BM.nodes[x]["nnodes"] * 10 for x in BM.nodes()]
edge_width = [(2 * d["weight"]) for (u, v, d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM = {}
for n in BM:
xy = np.array([pos[u] for u in BM.nodes[n]["graph"]])
posBM[n] = xy.mean(axis=0)
plt.subplot(212)
nx.draw(BM, posBM, node_size=node_size, width=edge_width, with_labels=False)
plt.axis("off")
plt.show()
@@ -0,0 +1,103 @@
"""
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""
import matplotlib.pyplot as plt
import networkx as nx
def circuit_to_formula(circuit):
# Convert the circuit to an equivalent formula.
formula = nx.dag_to_branching(circuit)
# Transfer the operator or variable labels for each node from the
# circuit to the formula.
for v in formula:
source = formula.nodes[v]["source"]
formula.nodes[v]["label"] = circuit.nodes[source]["label"]
return formula
def formula_to_string(formula):
def _to_string(formula, root):
# If there are no children, this is a variable node.
label = formula.nodes[root]["label"]
if not formula[root]:
return label
# Otherwise, this is an operator.
children = formula[root]
# If one child, the label must be a NOT operator.
if len(children) == 1:
child = nx.utils.arbitrary_element(children)
return f"{label}({_to_string(formula, child)})"
# NB "left" and "right" here are a little misleading: there is
# no order on the children of a node. That's okay because the
# Boolean AND and OR operators are symmetric. It just means that
# the order of the operands cannot be predicted and hence the
# function does not necessarily behave the same way on every
# invocation.
left, right = formula[root]
left_subformula = _to_string(formula, left)
right_subformula = _to_string(formula, right)
return f"({left_subformula} {label} {right_subformula})"
root = next(v for v, d in formula.in_degree() if d == 0)
return _to_string(formula, root)
###############################################################################
# Create an example Boolean circuit.
# ----------------------------------
#
# This circuit has a ∧ at the output and two s at the next layer.
# The third layer has a variable x that appears in the left , a
# variable y that appears in both the left and right s, and a
# negation for the variable z that appears as the sole node in the
# fourth layer.
circuit = nx.DiGraph()
# Layer 0
circuit.add_node(0, label="", layer=0)
# Layer 1
circuit.add_node(1, label="", layer=1)
circuit.add_node(2, label="", layer=1)
circuit.add_edge(0, 1)
circuit.add_edge(0, 2)
# Layer 2
circuit.add_node(3, label="x", layer=2)
circuit.add_node(4, label="y", layer=2)
circuit.add_node(5, label="¬", layer=2)
circuit.add_edge(1, 3)
circuit.add_edge(1, 4)
circuit.add_edge(2, 4)
circuit.add_edge(2, 5)
# Layer 3
circuit.add_node(6, label="z", layer=3)
circuit.add_edge(5, 6)
# Convert the circuit to an equivalent formula.
formula = circuit_to_formula(circuit)
print(formula_to_string(formula))
labels = nx.get_node_attributes(circuit, "label")
options = {
"node_size": 600,
"alpha": 0.5,
"node_color": "blue",
"labels": labels,
"font_size": 22,
}
plt.figure(figsize=(8, 8))
pos = nx.multipartite_layout(circuit, subset_key="layer")
nx.draw_networkx(circuit, pos, **options)
plt.title(formula_to_string(formula))
plt.axis("equal")
plt.show()
@@ -0,0 +1,43 @@
"""
==========
Davis Club
==========
Davis Southern Club Women
Shows how to make unipartite projections of the graph and compute the
properties of those graphs.
These data were collected by Davis et al. in the 1930s.
They represent observed attendance at 14 social events by 18 Southern women.
The graph is bipartite (clubs, women).
"""
import matplotlib.pyplot as plt
import networkx as nx
import networkx.algorithms.bipartite as bipartite
G = nx.davis_southern_women_graph()
women = G.graph["top"]
clubs = G.graph["bottom"]
print("Biadjacency matrix")
print(bipartite.biadjacency_matrix(G, women, clubs))
# project bipartite graph onto women nodes
W = bipartite.projected_graph(G, women)
print()
print("#Friends, Member")
for w in women:
print(f"{W.degree(w)} {w}")
# project bipartite graph onto women nodes keeping number of co-occurence
# the degree computed is weighted and counts the total number of shared contacts
W = bipartite.weighted_projected_graph(G, women)
print()
print("#Friend meetings, Member")
for w in women:
print(f"{W.degree(w, weight='weight')} {w}")
pos = nx.spring_layout(G, seed=648) # Seed layout for reproducible node positions
nx.draw(G, pos)
plt.show()
@@ -0,0 +1,92 @@
"""
===============
Dedensification
===============
Examples of dedensification of a graph. Dedensification retains the structural
pattern of the original graph and will only add compressor nodes when doing so
would result in fewer edges in the compressed graph.
"""
import matplotlib.pyplot as plt
import networkx as nx
plt.suptitle("Dedensification")
original_graph = nx.DiGraph()
white_nodes = ["1", "2", "3", "4", "5", "6"]
red_nodes = ["A", "B", "C"]
node_sizes = [250 for node in white_nodes + red_nodes]
node_colors = ["white" for n in white_nodes] + ["red" for n in red_nodes]
original_graph.add_nodes_from(white_nodes + red_nodes)
original_graph.add_edges_from(
[
("1", "C"),
("1", "B"),
("2", "C"),
("2", "B"),
("2", "A"),
("3", "B"),
("3", "A"),
("3", "6"),
("4", "C"),
("4", "B"),
("4", "A"),
("5", "B"),
("5", "A"),
("6", "5"),
("A", "6"),
]
)
base_options = dict(with_labels=True, edgecolors="black")
pos = {
"3": (0, 1),
"2": (0, 2),
"1": (0, 3),
"6": (1, 0),
"A": (1, 1),
"B": (1, 2),
"C": (1, 3),
"4": (2, 3),
"5": (2, 1),
}
ax1 = plt.subplot(1, 2, 1)
plt.title("Original (%s edges)" % original_graph.number_of_edges())
nx.draw_networkx(original_graph, pos=pos, node_color=node_colors, **base_options)
nonexp_graph, compression_nodes = nx.summarization.dedensify(
original_graph, threshold=2, copy=False
)
nonexp_node_colors = list(node_colors)
nonexp_node_sizes = list(node_sizes)
for node in compression_nodes:
nonexp_node_colors.append("yellow")
nonexp_node_sizes.append(600)
plt.subplot(1, 2, 2)
plt.title("Dedensified (%s edges)" % nonexp_graph.number_of_edges())
nonexp_pos = {
"5": (0, 0),
"B": (0, 2),
"1": (0, 3),
"6": (1, 0.75),
"3": (1.5, 1.5),
"A": (2, 0),
"C": (2, 3),
"4": (3, 1.5),
"2": (3, 2.5),
}
c_nodes = list(compression_nodes)
c_nodes.sort()
for spot, node in enumerate(c_nodes):
nonexp_pos[node] = (2, spot + 2)
nx.draw_networkx(
nonexp_graph,
pos=nonexp_pos,
node_color=nonexp_node_colors,
node_size=nonexp_node_sizes,
**base_options
)
plt.tight_layout()
plt.show()
@@ -0,0 +1,210 @@
"""
==========================
Iterated Dynamical Systems
==========================
Digraphs from Integer-valued Iterated Functions
Sums of cubes on 3N
-------------------
The number 153 has a curious property.
Let 3N={3,6,9,12,...} be the set of positive multiples of 3. Define an
iterative process f:3N->3N as follows: for a given n, take each digit
of n (in base 10), cube it and then sum the cubes to obtain f(n).
When this process is repeated, the resulting series n, f(n), f(f(n)),...
terminate in 153 after a finite number of iterations (the process ends
because 153 = 1**3 + 5**3 + 3**3).
In the language of discrete dynamical systems, 153 is the global
attractor for the iterated map f restricted to the set 3N.
For example: take the number 108
f(108) = 1**3 + 0**3 + 8**3 = 513
and
f(513) = 5**3 + 1**3 + 3**3 = 153
So, starting at 108 we reach 153 in two iterations,
represented as:
108->513->153
Computing all orbits of 3N up to 10**5 reveals that the attractor
153 is reached in a maximum of 14 iterations. In this code we
show that 13 cycles is the maximum required for all integers (in 3N)
less than 10,000.
The smallest number that requires 13 iterations to reach 153, is 177, i.e.,
177->687->1071->345->216->225->141->66->432->99->1458->702->351->153
The resulting large digraphs are useful for testing network software.
The general problem
-------------------
Given numbers n, a power p and base b, define F(n; p, b) as the sum of
the digits of n (in base b) raised to the power p. The above example
corresponds to f(n)=F(n; 3,10), and below F(n; p, b) is implemented as
the function powersum(n,p,b). The iterative dynamical system defined by
the mapping n:->f(n) above (over 3N) converges to a single fixed point;
153. Applying the map to all positive integers N, leads to a discrete
dynamical process with 5 fixed points: 1, 153, 370, 371, 407. Modulo 3
those numbers are 1, 0, 1, 2, 2. The function f above has the added
property that it maps a multiple of 3 to another multiple of 3; i.e. it
is invariant on the subset 3N.
The squaring of digits (in base 10) result in cycles and the
single fixed point 1. I.e., from a certain point on, the process
starts repeating itself.
keywords: "Recurring Digital Invariant", "Narcissistic Number",
"Happy Number"
The 3n+1 problem
----------------
There is a rich history of mathematical recreations
associated with discrete dynamical systems. The most famous
is the Collatz 3n+1 problem. See the function
collatz_problem_digraph below. The Collatz conjecture
--- that every orbit returns to the fixed point 1 in finite time
--- is still unproven. Even the great Paul Erdos said "Mathematics
is not yet ready for such problems", and offered $500
for its solution.
keywords: "3n+1", "3x+1", "Collatz problem", "Thwaite's conjecture"
"""
import networkx as nx
nmax = 10000
p = 3
def digitsrep(n, b=10):
"""Return list of digits comprising n represented in base b.
n must be a nonnegative integer"""
if n <= 0:
return [0]
dlist = []
while n > 0:
# Prepend next least-significant digit
dlist = [n % b] + dlist
# Floor-division
n = n // b
return dlist
def powersum(n, p, b=10):
"""Return sum of digits of n (in base b) raised to the power p."""
dlist = digitsrep(n, b)
sum = 0
for k in dlist:
sum += k ** p
return sum
def attractor153_graph(n, p, multiple=3, b=10):
"""Return digraph of iterations of powersum(n,3,10)."""
G = nx.DiGraph()
for k in range(1, n + 1):
if k % multiple == 0 and k not in G:
k1 = k
knext = powersum(k1, p, b)
while k1 != knext:
G.add_edge(k1, knext)
k1 = knext
knext = powersum(k1, p, b)
return G
def squaring_cycle_graph_old(n, b=10):
"""Return digraph of iterations of powersum(n,2,10)."""
G = nx.DiGraph()
for k in range(1, n + 1):
k1 = k
G.add_node(k1) # case k1==knext, at least add node
knext = powersum(k1, 2, b)
G.add_edge(k1, knext)
while k1 != knext: # stop if fixed point
k1 = knext
knext = powersum(k1, 2, b)
G.add_edge(k1, knext)
if G.out_degree(knext) >= 1:
# knext has already been iterated in and out
break
return G
def sum_of_digits_graph(nmax, b=10):
def f(n):
return powersum(n, 1, b)
return discrete_dynamics_digraph(nmax, f)
def squaring_cycle_digraph(nmax, b=10):
def f(n):
return powersum(n, 2, b)
return discrete_dynamics_digraph(nmax, f)
def cubing_153_digraph(nmax):
def f(n):
return powersum(n, 3, 10)
return discrete_dynamics_digraph(nmax, f)
def discrete_dynamics_digraph(nmax, f, itermax=50000):
G = nx.DiGraph()
for k in range(1, nmax + 1):
kold = k
G.add_node(kold)
knew = f(kold)
G.add_edge(kold, knew)
while kold != knew and kold << itermax:
# iterate until fixed point reached or itermax is exceeded
kold = knew
knew = f(kold)
G.add_edge(kold, knew)
if G.out_degree(knew) >= 1:
# knew has already been iterated in and out
break
return G
def collatz_problem_digraph(nmax):
def f(n):
if n % 2 == 0:
return n // 2
else:
return 3 * n + 1
return discrete_dynamics_digraph(nmax, f)
def fixed_points(G):
"""Return a list of fixed points for the discrete dynamical
system represented by the digraph G.
"""
return [n for n in G if G.out_degree(n) == 0]
nmax = 10000
print(f"Building cubing_153_digraph({nmax})")
G = cubing_153_digraph(nmax)
print("Resulting digraph has", len(G), "nodes and", G.size(), " edges")
print("Shortest path from 177 to 153 is:")
print(nx.shortest_path(G, 177, 153))
print(f"fixed points are {fixed_points(G)}")
@@ -0,0 +1,31 @@
"""
=====================
Krackhardt Centrality
=====================
Centrality measures of Krackhardt social network.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.krackhardt_kite_graph()
print("Betweenness")
b = nx.betweenness_centrality(G)
for v in G.nodes():
print(f"{v:2} {b[v]:.3f}")
print("Degree centrality")
d = nx.degree_centrality(G)
for v in G.nodes():
print(f"{v:2} {d[v]:.3f}")
print("Closeness centrality")
c = nx.closeness_centrality(G)
for v in G.nodes():
print(f"{v:2} {c[v]:.3f}")
pos = nx.spring_layout(G, seed=367) # Seed layout for reproducibility
nx.draw(G, pos)
plt.show()
@@ -0,0 +1,82 @@
"""
====================
Parallel Betweenness
====================
Example of parallel implementation of betweenness centrality using the
multiprocessing module from Python Standard Library.
The function betweenness centrality accepts a bunch of nodes and computes
the contribution of those nodes to the betweenness centrality of the whole
network. Here we divide the network in chunks of nodes and we compute their
contribution to the betweenness centrality of the whole network.
Note: The example output below shows that the non-parallel implementation is
faster. This is a limitation of our CI/CD pipeline running on a single core.
Depending on your setup, you will likely observe a speedup.
"""
from multiprocessing import Pool
import time
import itertools
import matplotlib.pyplot as plt
import networkx as nx
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_subset,
zip(
[G] * num_chunks,
node_chunks,
[list(G)] * num_chunks,
[True] * num_chunks,
[None] * num_chunks,
),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
G_ba = nx.barabasi_albert_graph(1000, 3)
G_er = nx.gnp_random_graph(1000, 0.01)
G_ws = nx.connected_watts_strogatz_graph(1000, 4, 0.1)
for G in [G_ba, G_er, G_ws]:
print("")
print("Computing betweenness centrality for:")
print(nx.info(G))
print("\tParallel version")
start = time.time()
bt = betweenness_centrality_parallel(G)
print(f"\t\tTime: {(time.time() - start):.4F} seconds")
print(f"\t\tBetweenness centrality for node 0: {bt[0]:.5f}")
print("\tNon-Parallel version")
start = time.time()
bt = nx.betweenness_centrality(G)
print(f"\t\tTime: {(time.time() - start):.4F} seconds")
print(f"\t\tBetweenness centrality for node 0: {bt[0]:.5f}")
print("")
nx.draw(G_ba, node_size=100)
plt.show()
@@ -0,0 +1,40 @@
"""
======================
Reverse Cuthill--McKee
======================
Cuthill-McKee ordering of matrices
The reverse Cuthill--McKee algorithm gives a sparse matrix ordering that
reduces the matrix bandwidth.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
# build low-bandwidth numpy matrix
G = nx.grid_2d_graph(3, 3)
rcm = list(nx.utils.reverse_cuthill_mckee_ordering(G))
print("ordering", rcm)
print("unordered Laplacian matrix")
A = nx.laplacian_matrix(G)
x, y = np.nonzero(A)
# print(f"lower bandwidth: {(y - x).max()}")
# print(f"upper bandwidth: {(x - y).max()}")
print(f"bandwidth: {(y - x).max() + (x - y).max() + 1}")
print(A)
B = nx.laplacian_matrix(G, nodelist=rcm)
print("low-bandwidth Laplacian matrix")
x, y = np.nonzero(B)
# print(f"lower bandwidth: {(y - x).max()}")
# print(f"upper bandwidth: {(x - y).max()}")
print(f"bandwidth: {(y - x).max() + (x - y).max() + 1}")
print(B)
sns.heatmap(B.todense(), cbar=False, square=True, linewidths=0.5, annot=True)
plt.show()
@@ -0,0 +1,108 @@
"""
==================
SNAP Graph Summary
==================
An example of summarizing a graph based on node attributes and edge attributes
using the Summarization by Grouping Nodes on Attributes and Pairwise
edges (SNAP) algorithm (not to be confused with the Stanford Network
Analysis Project). The algorithm groups nodes by their unique
combinations of node attribute values and edge types with other groups
of nodes to produce a summary graph. The summary graph can then be used to
infer how nodes with different attributes values relate to other nodes in the
graph.
"""
import networkx as nx
import matplotlib.pyplot as plt
nodes = {
"A": dict(color="Red"),
"B": dict(color="Red"),
"C": dict(color="Red"),
"D": dict(color="Red"),
"E": dict(color="Blue"),
"F": dict(color="Blue"),
"G": dict(color="Blue"),
"H": dict(color="Blue"),
"I": dict(color="Yellow"),
"J": dict(color="Yellow"),
"K": dict(color="Yellow"),
"L": dict(color="Yellow"),
}
edges = [
("A", "B", "Strong"),
("A", "C", "Weak"),
("A", "E", "Strong"),
("A", "I", "Weak"),
("B", "D", "Weak"),
("B", "J", "Weak"),
("B", "F", "Strong"),
("C", "G", "Weak"),
("D", "H", "Weak"),
("I", "J", "Strong"),
("J", "K", "Strong"),
("I", "L", "Strong"),
]
original_graph = nx.Graph()
original_graph.add_nodes_from(n for n in nodes.items())
original_graph.add_edges_from((u, v, {"type": label}) for u, v, label in edges)
plt.suptitle("SNAP Summarization")
base_options = dict(with_labels=True, edgecolors="black", node_size=500)
ax1 = plt.subplot(1, 2, 1)
plt.title(
"Original (%s nodes, %s edges)"
% (original_graph.number_of_nodes(), original_graph.number_of_edges())
)
pos = nx.spring_layout(original_graph, seed=7482934)
node_colors = [d["color"] for _, d in original_graph.nodes(data=True)]
edge_type_visual_weight_lookup = {"Weak": 1.0, "Strong": 3.0}
edge_weights = [
edge_type_visual_weight_lookup[d["type"]]
for _, _, d in original_graph.edges(data=True)
]
nx.draw_networkx(
original_graph, pos=pos, node_color=node_colors, width=edge_weights, **base_options
)
node_attributes = ("color",)
edge_attributes = ("type",)
summary_graph = nx.snap_aggregation(
original_graph, node_attributes, edge_attributes, prefix="S-"
)
plt.subplot(1, 2, 2)
plt.title(
"SNAP Aggregation (%s nodes, %s edges)"
% (summary_graph.number_of_nodes(), summary_graph.number_of_edges())
)
summary_pos = nx.spring_layout(summary_graph, seed=8375428)
node_colors = []
for node in summary_graph:
color = summary_graph.nodes[node]["color"]
node_colors.append(color)
edge_weights = []
for edge in summary_graph.edges():
edge_types = summary_graph.get_edge_data(*edge)["types"]
edge_weight = 0.0
for edge_type in edge_types:
edge_weight += edge_type_visual_weight_lookup[edge_type["type"]]
edge_weights.append(edge_weight)
nx.draw_networkx(
summary_graph,
pos=summary_pos,
node_color=node_colors,
width=edge_weights,
**base_options
)
plt.tight_layout()
plt.show()
@@ -0,0 +1,2 @@
Basic
-----
@@ -0,0 +1,49 @@
"""
==========
Properties
==========
Compute some network properties for the lollipop graph.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.lollipop_graph(4, 6)
pathlengths = []
print("source vertex {target:length, }")
for v in G.nodes():
spl = dict(nx.single_source_shortest_path_length(G, v))
print(f"{v} {spl} ")
for p in spl:
pathlengths.append(spl[p])
print()
print(f"average shortest path length {sum(pathlengths) / len(pathlengths)}")
# histogram of path lengths
dist = {}
for p in pathlengths:
if p in dist:
dist[p] += 1
else:
dist[p] = 1
print()
print("length #paths")
verts = dist.keys()
for d in sorted(verts):
print(f"{d} {dist[d]}")
print(f"radius: {nx.radius(G)}")
print(f"diameter: {nx.diameter(G)}")
print(f"eccentricity: {nx.eccentricity(G)}")
print(f"center: {nx.center(G)}")
print(f"periphery: {nx.periphery(G)}")
print(f"density: {nx.density(G)}")
pos = nx.spring_layout(G, seed=3068) # Seed layout for reproducibility
nx.draw(G, pos=pos, with_labels=True)
plt.show()
@@ -0,0 +1,24 @@
"""
======================
Read and write graphs.
======================
Read and write graphs.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
pos = nx.spring_layout(H, seed=200)
nx.draw(H, pos)
plt.show()
@@ -0,0 +1,60 @@
"""
============
Simple graph
============
Draw simple graph with manual layout.
"""
import networkx as nx
import matplotlib.pyplot as plt
G = nx.Graph()
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(1, 5)
G.add_edge(2, 3)
G.add_edge(3, 4)
G.add_edge(4, 5)
# explicitly set positions
pos = {1: (0, 0), 2: (-1, 0.3), 3: (2, 0.17), 4: (4, 0.255), 5: (5, 0.03)}
options = {
"font_size": 36,
"node_size": 3000,
"node_color": "white",
"edgecolors": "black",
"linewidths": 5,
"width": 5,
}
nx.draw_networkx(G, pos, **options)
# Set margins for the axes so that nodes aren't clipped
ax = plt.gca()
ax.margins(0.20)
plt.axis("off")
plt.show()
# %%
# A directed graph
G = nx.DiGraph([(0, 3), (1, 3), (2, 4), (3, 5), (3, 6), (4, 6), (5, 6)])
# group nodes by column
left_nodes = [0, 1, 2]
middle_nodes = [3, 4]
right_nodes = [5, 6]
# set the position according to column (x-coord)
pos = {n: (0, i) for i, n in enumerate(left_nodes)}
pos.update({n: (1, i + 0.5) for i, n in enumerate(middle_nodes)})
pos.update({n: (2, i + 0.5) for i, n in enumerate(right_nodes)})
nx.draw_networkx(G, pos, **options)
# Set margins for the axes so that nodes aren't clipped
ax = plt.gca()
ax.margins(0.20)
plt.axis("off")
plt.show()
@@ -0,0 +1,2 @@
Drawing
-------
@@ -0,0 +1,152 @@
"""
=============
Chess Masters
=============
An example of the MultiDiGraph class.
The function `chess_pgn_graph` reads a collection of chess matches stored in
the specified PGN file (PGN ="Portable Game Notation"). Here the (compressed)
default file::
chess_masters_WCC.pgn.bz2
contains all 685 World Chess Championship matches from 1886--1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The `chess_pgn_graph()` function returns a `MultiDiGraph` with multiple edges.
Each node is the last name of a chess master. Each edge is directed from white
to black and contains selected game info.
The key statement in `chess_pgn_graph` below is::
G.add_edge(white, black, game_info)
where `game_info` is a `dict` describing each game.
"""
import matplotlib.pyplot as plt
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details = ["Event", "Date", "Result", "ECO", "Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G = nx.MultiDiGraph()
game = {}
with bz2.BZ2File(pgn_file) as datafile:
lines = [line.decode().rstrip("\r\n") for line in datafile]
for line in lines:
if line.startswith("["):
tag, value = line[1:-1].split(" ", 1)
game[str(tag)] = value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white = game.pop("White")
black = game.pop("Black")
G.add_edge(white, black, **game)
game = {}
return G
G = chess_pgn_graph()
print(
f"Loaded {G.number_of_edges()} chess games between {G.number_of_nodes()} players\n"
)
# identify connected components of the undirected version
H = G.to_undirected()
Gcc = [H.subgraph(c) for c in nx.connected_components(H)]
if len(Gcc) > 1:
print(f"Note the disconnected component consisting of:\n{Gcc[1].nodes()}")
# find all games with B97 opening (as described in ECO)
openings = {game_info["ECO"] for (white, black, game_info) in G.edges(data=True)}
print(f"\nFrom a total of {len(openings)} different openings,")
print("the following games used the Sicilian opening")
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white, black, game_info) in G.edges(data=True):
if game_info["ECO"] == "B97":
summary = f"{white} vs {black}\n"
for k, v in game_info.items():
summary += f" {k}: {v}\n"
summary += "\n"
print(summary)
# make new undirected graph H without multi-edges
H = nx.Graph(G)
# edge width is proportional number of games played
edgewidth = [len(G.get_edge_data(u, v)) for u, v in H.edges()]
# node size is proportional to number of games won
wins = dict.fromkeys(G.nodes(), 0.0)
for (u, v, d) in G.edges(data=True):
r = d["Result"].split("-")
if r[0] == "1":
wins[u] += 1.0
elif r[0] == "1/2":
wins[u] += 0.5
wins[v] += 0.5
else:
wins[v] += 1.0
nodesize = [wins[v] * 50 for v in H]
# Generate layout for visualization
pos = nx.kamada_kawai_layout(H)
# Manual tweaking to limit node label overlap in the visualization
pos["Reshevsky, Samuel H"] += (0.05, -0.10)
pos["Botvinnik, Mikhail M"] += (0.03, -0.06)
pos["Smyslov, Vassily V"] += (0.05, -0.03)
fig, ax = plt.subplots(figsize=(12, 12))
# Visualize graph components
nx.draw_networkx_edges(H, pos, alpha=0.3, width=edgewidth, edge_color="m")
nx.draw_networkx_nodes(H, pos, node_size=nodesize, node_color="#210070", alpha=0.9)
label_options = {"ec": "k", "fc": "white", "alpha": 0.7}
nx.draw_networkx_labels(H, pos, font_size=14, bbox=label_options)
# Title/legend
font = {"fontname": "Helvetica", "color": "k", "fontweight": "bold", "fontsize": 14}
ax.set_title("World Chess Championship Games: 1886 - 1985", font)
# Change font color for legend
font["color"] = "r"
ax.text(
0.80,
0.10,
"edge width = # games played",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font,
)
ax.text(
0.80,
0.06,
"node size = # games won",
horizontalalignment="center",
transform=ax.transAxes,
fontdict=font,
)
# Resize figure for label readibility
ax.margins(0.1, 0.05)
fig.tight_layout()
plt.axis("off")
plt.show()
@@ -0,0 +1,75 @@
"""
=================
Custom node icons
=================
Example of using custom icons to represent nodes with matplotlib.
Images for node icons courtesy of www.materialui.co
"""
import matplotlib.pyplot as plt
import networkx as nx
import PIL
# Image URLs for graph nodes
icons = {
"router": "icons/router_black_144x144.png",
"switch": "icons/switch_black_144x144.png",
"PC": "icons/computer_black_144x144.png",
}
# Load images
images = {k: PIL.Image.open(fname) for k, fname in icons.items()}
# Generate the computer network graph
G = nx.Graph()
G.add_node("router", image=images["router"])
for i in range(1, 4):
G.add_node(f"switch_{i}", image=images["switch"])
for j in range(1, 4):
G.add_node("PC_" + str(i) + "_" + str(j), image=images["PC"])
G.add_edge("router", "switch_1")
G.add_edge("router", "switch_2")
G.add_edge("router", "switch_3")
for u in range(1, 4):
for v in range(1, 4):
G.add_edge("switch_" + str(u), "PC_" + str(u) + "_" + str(v))
# Get a reproducible layout and create figure
pos = nx.spring_layout(G, seed=1734289230)
fig, ax = plt.subplots()
# Note: the min_source/target_margin kwargs only work with FancyArrowPatch objects.
# Force the use of FancyArrowPatch for edge drawing by setting `arrows=True`,
# but suppress arrowheads with `arrowstyle="-"`
nx.draw_networkx_edges(
G,
pos=pos,
ax=ax,
arrows=True,
arrowstyle="-",
min_source_margin=15,
min_target_margin=15,
)
# Transform from data coordinates (scaled between xlim and ylim) to display coordinates
tr_figure = ax.transData.transform
# Transform from display to figure coordinates
tr_axes = fig.transFigure.inverted().transform
# Select the size of the image (relative to the X axis)
icon_size = (ax.get_xlim()[1] - ax.get_xlim()[0]) * 0.025
icon_center = icon_size / 2.0
# Add the respective image to each node
for n in G.nodes:
xf, yf = tr_figure(pos[n])
xa, ya = tr_axes((xf, yf))
# get overlapped axes and plot icon
a = plt.axes([xa - icon_center, ya - icon_center, icon_size, icon_size])
a.imshow(G.nodes[n]["image"])
a.axis("off")
plt.show()
@@ -0,0 +1,50 @@
"""
===============
Degree Analysis
===============
This example shows several ways to visualize the distribution of the degree of
nodes with two common techniques: a *degree-rank plot* and a
*degree histogram*.
In this example, a random Graph is generated with 100 nodes. The degree of
each node is determined, and a figure is generated showing three things:
1. The subgraph of connected components
2. The degree-rank plot for the Graph, and
3. The degree histogram
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
G = nx.gnp_random_graph(100, 0.02, seed=10374196)
degree_sequence = sorted([d for n, d in G.degree()], reverse=True)
dmax = max(degree_sequence)
fig = plt.figure("Degree of a random graph", figsize=(8, 8))
# Create a gridspec for adding subplots of different sizes
axgrid = fig.add_gridspec(5, 4)
ax0 = fig.add_subplot(axgrid[0:3, :])
Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])
pos = nx.spring_layout(Gcc, seed=10396953)
nx.draw_networkx_nodes(Gcc, pos, ax=ax0, node_size=20)
nx.draw_networkx_edges(Gcc, pos, ax=ax0, alpha=0.4)
ax0.set_title("Connected components of G")
ax0.set_axis_off()
ax1 = fig.add_subplot(axgrid[3:, :2])
ax1.plot(degree_sequence, "b-", marker="o")
ax1.set_title("Degree Rank Plot")
ax1.set_ylabel("Degree")
ax1.set_xlabel("Rank")
ax2 = fig.add_subplot(axgrid[3:, 2:])
ax2.bar(*np.unique(degree_sequence, return_counts=True))
ax2.set_title("Degree histogram")
ax2.set_xlabel("Degree")
ax2.set_ylabel("# of Nodes")
fig.tight_layout()
plt.show()
@@ -0,0 +1,46 @@
"""
==============
Directed Graph
==============
Draw a graph with directed edges using a colormap and different node sizes.
Edges have different colors and alphas (opacity). Drawn using matplotlib.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
seed = 13648 # Seed random number generators for reproducibility
G = nx.random_k_out_graph(10, 3, 0.5, seed=seed)
pos = nx.spring_layout(G, seed=seed)
node_sizes = [3 + 10 * i for i in range(len(G))]
M = G.number_of_edges()
edge_colors = range(2, M + 2)
edge_alphas = [(5 + i) / (M + 4) for i in range(M)]
cmap = plt.cm.plasma
nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color="indigo")
edges = nx.draw_networkx_edges(
G,
pos,
node_size=node_sizes,
arrowstyle="->",
arrowsize=10,
edge_color=edge_colors,
edge_cmap=cmap,
width=2,
)
# set alpha value for each edge
for i in range(M):
edges[i].set_alpha(edge_alphas[i])
pc = mpl.collections.PatchCollection(edges, cmap=cmap)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
plt.show()
@@ -0,0 +1,23 @@
"""
=============
Edge Colormap
=============
Draw a graph with matplotlib, color edges.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.star_graph(20)
pos = nx.spring_layout(G, seed=63) # Seed layout for reproducibility
colors = range(20)
options = {
"node_color": "#A0CBE2",
"edge_color": colors,
"width": 4,
"edge_cmap": plt.cm.Blues,
"with_labels": False,
}
nx.draw(G, pos, **options)
plt.show()
@@ -0,0 +1,35 @@
"""
=========
Ego Graph
=========
Example using the NetworkX ego_graph() function to return the main egonet of
the largest hub in a Barabási-Albert network.
"""
from operator import itemgetter
import matplotlib.pyplot as plt
import networkx as nx
# Create a BA model graph - use seed for reproducibility
n = 1000
m = 2
seed = 20532
G = nx.barabasi_albert_graph(n, m, seed=seed)
# find node with largest degree
node_and_degree = G.degree()
(largest_hub, degree) = sorted(node_and_degree, key=itemgetter(1))[-1]
# Create ego graph of main hub
hub_ego = nx.ego_graph(G, largest_hub)
# Draw graph
pos = nx.spring_layout(hub_ego, seed=seed) # Seed layout for reproducibility
nx.draw(hub_ego, pos, node_color="b", node_size=50, with_labels=False)
# Draw ego as large and red
options = {"node_size": 300, "node_color": "r"}
nx.draw_networkx_nodes(hub_ego, pos, nodelist=[largest_hub], **options)
plt.show()
@@ -0,0 +1,22 @@
"""
===========
Eigenvalues
===========
Create an G{n,m} random graph and compute the eigenvalues.
"""
import matplotlib.pyplot as plt
import networkx as nx
import numpy.linalg
n = 1000 # 1000 nodes
m = 5000 # 5000 edges
G = nx.gnm_random_graph(n, m, seed=5040) # Seed for reproducibility
L = nx.normalized_laplacian_matrix(G)
e = numpy.linalg.eigvals(L.A)
print("Largest eigenvalue:", max(e))
print("Smallest eigenvalue:", min(e))
plt.hist(e, bins=100) # histogram with 100 bins
plt.xlim(0, 2) # eigenvalues between 0 and 2
plt.show()
@@ -0,0 +1,52 @@
"""
==========
Four Grids
==========
Draw a 4x4 graph with matplotlib.
This example illustrates the use of keyword arguments to `networkx.draw` to
customize the visualization of a simple Graph comprising a 4x4 grid.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(4, 4) # 4x4 grid
pos = nx.spring_layout(G, iterations=100, seed=39775)
# Create a 2x2 subplot
fig, all_axes = plt.subplots(2, 2)
ax = all_axes.flat
nx.draw(G, pos, ax=ax[0], font_size=8)
nx.draw(G, pos, ax=ax[1], node_size=0, with_labels=False)
nx.draw(
G,
pos,
ax=ax[2],
node_color="tab:green",
edgecolors="tab:gray", # Node surface color
edge_color="tab:gray", # Color of graph edges
node_size=250,
with_labels=False,
width=6,
)
H = G.to_directed()
nx.draw(
H,
pos,
ax=ax[3],
node_color="tab:orange",
node_size=20,
with_labels=False,
arrowsize=10,
width=2,
)
# Set margins for the axes so that nodes aren't clipped
for a in ax:
a.margins(0.10)
fig.tight_layout()
plt.show()
@@ -0,0 +1,26 @@
"""
=================
House With Colors
=================
Draw a graph with matplotlib.
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.house_graph()
# explicitly set positions
pos = {0: (0, 0), 1: (1, 0), 2: (0, 1), 3: (1, 1), 4: (0.5, 2.0)}
# Plot nodes with different properties for the "wall" and "roof" nodes
nx.draw_networkx_nodes(
G, pos, node_size=3000, nodelist=[0, 1, 2, 3], node_color="tab:blue"
)
nx.draw_networkx_nodes(G, pos, node_size=2000, nodelist=[4], node_color="tab:orange")
nx.draw_networkx_edges(G, pos, alpha=0.5, width=6)
# Customize axes
ax = plt.gca()
ax.margins(0.11)
plt.tight_layout()
plt.axis("off")
plt.show()
@@ -0,0 +1,142 @@
"""
===========
Knuth Miles
===========
`miles_graph()` returns an undirected graph over 128 US cities. The
cities each have location and population data. The edges are labeled with the
distance between the two cities.
This example is described in Section 1.1 of
Donald E. Knuth, "The Stanford GraphBase: A Platform for Combinatorial
Computing", ACM Press, New York, 1993.
http://www-cs-faculty.stanford.edu/~knuth/sgb.html
The data file can be found at:
- https://github.com/networkx/networkx/blob/main/examples/drawing/knuth_miles.txt.gz
"""
import gzip
import re
# Ignore any warnings related to downloading shpfiles with cartopy
import warnings
warnings.simplefilter("ignore")
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def miles_graph():
"""Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
fh = gzip.open("knuth_miles.txt.gz", "r")
G = nx.Graph()
G.position = {}
G.population = {}
cities = []
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind = re.compile(r"^\d+")
if numfind.match(line): # this line is distances
dist = line.split()
for d in dist:
G.add_edge(city, cities[i], weight=int(d))
i = i + 1
else: # this line is a city, position, population
i = 1
(city, coordpop) = line.split("[")
cities.insert(0, city)
(coord, pop) = coordpop.split("]")
(y, x) = coord.split(",")
G.add_node(city)
# assign position - Convert string to lat/long
G.position[city] = (-float(x) / 100, float(y) / 100)
G.population[city] = float(pop) / 1000.0
return G
G = miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print(G)
# make new graph of cites, edge if less then 300 miles between them
H = nx.Graph()
for v in G:
H.add_node(v)
for (u, v, d) in G.edges(data=True):
if d["weight"] < 300:
H.add_edge(u, v)
# draw with matplotlib/pylab
fig = plt.figure(figsize=(8, 6))
# nodes colored by degree sized by population
node_color = [float(H.degree(v)) for v in H]
# Use cartopy to provide a backdrop for the visualization
try:
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
ax = fig.add_axes([0, 0, 1, 1], projection=ccrs.LambertConformal(), frameon=False)
ax.set_extent([-125, -66.5, 20, 50], ccrs.Geodetic())
# Add map of countries & US states as a backdrop
for shapename in ("admin_1_states_provinces_lakes_shp", "admin_0_countries"):
shp = shpreader.natural_earth(
resolution="110m", category="cultural", name=shapename
)
ax.add_geometries(
shpreader.Reader(shp).geometries(),
ccrs.PlateCarree(),
facecolor="none",
edgecolor="k",
)
# NOTE: When using cartopy, use matplotlib directly rather than nx.draw
# to take advantage of the cartopy transforms
ax.scatter(
*np.array([v for v in G.position.values()]).T,
s=[G.population[v] for v in H],
c=node_color,
transform=ccrs.PlateCarree(),
zorder=100 # Ensure nodes lie on top of edges/state lines
)
# Plot edges between the cities
for edge in H.edges():
edge_coords = np.array([G.position[v] for v in edge])
ax.plot(
edge_coords[:, 0],
edge_coords[:, 1],
transform=ccrs.PlateCarree(),
linewidth=0.75,
color="k",
)
except ImportError:
# If cartopy is unavailable, the backdrop for the plot will be blank;
# though you should still be able to discern the general shape of the US
# from graph nodes and edges!
nx.draw(
H,
G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False,
)
plt.show()

Some files were not shown because too many files have changed in this diff Show More