Picam 2 usage

This commit is contained in:
Tylr-J42 2024-07-12 18:10:02 -04:00
parent aadab84836
commit 3b58d8a3cd
1851 changed files with 679541 additions and 3 deletions

247
.venv/bin/Activate.ps1 Normal file
View File

@ -0,0 +1,247 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove VIRTUAL_ENV_PROMPT altogether.
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
$env:VIRTUAL_ENV_PROMPT = $Prompt
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

69
.venv/bin/activate Normal file
View File

@ -0,0 +1,69 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV="/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv"
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1="(.venv) ${PS1:-}"
export PS1
VIRTUAL_ENV_PROMPT="(.venv) "
export VIRTUAL_ENV_PROMPT
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi

26
.venv/bin/activate.csh Normal file
View File

@ -0,0 +1,26 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV "/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv"
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
set prompt = "(.venv) $prompt"
setenv VIRTUAL_ENV_PROMPT "(.venv) "
endif
alias pydoc python -m pydoc
rehash

69
.venv/bin/activate.fish Normal file
View File

@ -0,0 +1,69 @@
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
# (https://fishshell.com/); you cannot run it directly.
function deactivate -d "Exit virtual environment and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
set -e _OLD_FISH_PROMPT_OVERRIDE
# prevents error when using nested fish instances (Issue #93858)
if functions -q _old_fish_prompt
functions -e fish_prompt
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
end
set -e VIRTUAL_ENV
set -e VIRTUAL_ENV_PROMPT
if test "$argv[1]" != "nondestructive"
# Self-destruct!
functions -e deactivate
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV "/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv"
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
# Unset PYTHONHOME if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# Save the current fish_prompt function as the function _old_fish_prompt.
functions -c fish_prompt _old_fish_prompt
# With the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command.
set -l old_status $status
# Output the venv prompt; color taken from the blue of the Python logo.
printf "%s%s%s" (set_color 4B8BBE) "(.venv) " (set_color normal)
# Restore the return status of the previous command.
echo "exit $old_status" | .
# Output the original/"old" prompt.
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
set -gx VIRTUAL_ENV_PROMPT "(.venv) "
end

8
.venv/bin/pip Executable file
View File

@ -0,0 +1,8 @@
#!/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
.venv/bin/pip3 Executable file
View File

@ -0,0 +1,8 @@
#!/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
.venv/bin/pip3.11 Executable file
View File

@ -0,0 +1,8 @@
#!/home/tyler/Desktop/FRC-Apriltag-Pose-Detection/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

1
.venv/bin/python Symbolic link
View File

@ -0,0 +1 @@
python3

1
.venv/bin/python3 Symbolic link
View File

@ -0,0 +1 @@
/usr/bin/python3

1
.venv/bin/python3.11 Symbolic link
View File

@ -0,0 +1 @@
python3

View File

@ -0,0 +1,222 @@
# don't import any costly modules
import sys
import os
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
import warnings
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils."
)
def clear_distutils():
if 'distutils' not in sys.modules:
return
import warnings
warnings.warn("Setuptools is replacing distutils.")
mods = [
name
for name in sys.modules
if name == "distutils" or name.startswith("distutils.")
]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
return which == 'local'
def ensure_local_distutils():
import importlib
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
with shim():
importlib.import_module('distutils')
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
assert 'setuptools._distutils.log' not in sys.modules
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class _TrivialRe:
def __init__(self, *patterns):
self._patterns = patterns
def match(self, string):
return all(pat in string for pat in self._patterns)
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
if self.is_cpython():
return
import importlib
import importlib.abc
import importlib.util
try:
mod = importlib.import_module('setuptools._distutils')
except Exception:
# There are a couple of cases where setuptools._distutils
# may not be present:
# - An older Setuptools without a local distutils is
# taking precedence. Ref #2957.
# - Path manipulation during sitecustomize removes
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
mod.__name__ = 'distutils'
return mod
def exec_module(self, module):
pass
return importlib.util.spec_from_loader(
'distutils', DistutilsLoader(), origin=mod.__file__
)
@staticmethod
def is_cpython():
"""
Suppress supplying distutils for CPython (build and tests).
Ref #2965 and #3007.
"""
return os.path.isfile('pybuilddir.txt')
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@classmethod
def pip_imported_during_build(cls):
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
)
@staticmethod
def frame_file_is_setup(frame):
"""
Return True if the indicated frame suggests a setup.py file.
"""
# some frames may not have __file__ (#2940)
return frame.f_globals.get('__file__', '').endswith('setup.py')
def spec_for_sensitive_tests(self):
"""
Ensure stdlib distutils when running select tests under CPython.
python/cpython#91169
"""
clear_distutils()
self.spec_for_distutils = lambda: None
sensitive_tests = (
[
'test.test_distutils',
'test.test_peg_generator',
'test.test_importlib',
]
if sys.version_info < (3, 10)
else [
'test.test_distutils',
]
)
for name in DistutilsMetaFinder.sensitive_tests:
setattr(
DistutilsMetaFinder,
f'spec_for_{name}',
DistutilsMetaFinder.spec_for_sensitive_tests,
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
DISTUTILS_FINDER in sys.meta_path or insert_shim()
class shim:
def __enter__(self):
insert_shim()
def __exit__(self, exc, value, tb):
remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass

View File

@ -0,0 +1 @@
__import__('_distutils_hack').do_override()

View File

@ -0,0 +1,17 @@
from ._impl import __version__
from .entry import NetworkTableEntry
from .instance import NetworkTablesInstance
from .table import NetworkTable
from ._impl.value import Value
#: Alias of NetworkTablesInstance.getDefault(), the "default" instance
NetworkTables = NetworkTablesInstance.getDefault()
__all__ = (
"NetworkTablesInstance",
"NetworkTables",
"NetworkTable",
"NetworkTableEntry",
"Value",
)

View File

@ -0,0 +1,4 @@
try:
from .version import __version__
except ImportError:
__version__ = "master"

View File

@ -0,0 +1,244 @@
# validated: 2018-11-27 DS 8eafe7f32561 cpp/ntcore_cpp.cpp
from .connection_notifier import ConnectionNotifier
from .dispatcher import Dispatcher
from .ds_client import DsClient
from .entry_notifier import EntryNotifier
from .rpc_server import RpcServer
from .storage import Storage
from .constants import NT_NOTIFY_IMMEDIATE, NT_NOTIFY_NEW
_is_new = NT_NOTIFY_IMMEDIATE | NT_NOTIFY_NEW
class NtCoreApi(object):
"""
Internal NetworkTables API wrapper
In theory you could create multiple instances of this
and talk to multiple NT servers or create multiple
NT servers... though, I don't really know why one
would want to do this.
"""
def __init__(self, entry_creator, verbose=False):
self.conn_notifier = ConnectionNotifier(verbose=verbose)
self.entry_notifier = EntryNotifier(verbose=verbose)
self.rpc_server = RpcServer(verbose=verbose)
self.storage = Storage(self.entry_notifier, self.rpc_server, entry_creator)
self.dispatcher = Dispatcher(self.storage, self.conn_notifier, verbose=verbose)
self.ds_client = DsClient(self.dispatcher, verbose=verbose)
self._init_table_functions()
def stop(self):
self.ds_client.stop()
self.dispatcher.stop()
self.rpc_server.stop()
self.entry_notifier.stop()
self.conn_notifier.stop()
self.storage.stop()
def destroy(self):
self.ds_client = None
self.dispatcher = None
self.rpc_server = None
self.entry_notifier = None
self.entry_notifier = None
self.conn_notifier = None
self.storage = None
#
# Table functions (inline because they're called often)
#
def _init_table_functions(self):
self.getEntry = self.storage.getEntry
self.getEntryId = self.storage.getEntryId
self.getEntries = self.storage.getEntries
self.getEntryNameById = self.storage.getEntryNameById
self.getEntryTypeById = self.storage.getEntryTypeById
self.getEntryValue = self.storage.getEntryValue
self.setDefaultEntryValue = self.storage.setDefaultEntryValue
self.setDefaultEntryValueById = self.storage.setDefaultEntryValueById
self.setEntryValue = self.storage.setEntryValue
self.setEntryValueById = self.storage.setEntryValueById
self.setEntryTypeValue = self.storage.setEntryTypeValue
self.setEntryTypeValueById = self.storage.setEntryTypeValueById
self.setEntryFlags = self.storage.setEntryFlags
self.setEntryFlagsById = self.storage.setEntryFlagsById
self.getEntryFlags = self.storage.getEntryFlags
self.getEntryFlagsById = self.storage.getEntryFlagsById
self.deleteEntry = self.storage.deleteEntry
self.deleteEntryById = self.storage.deleteEntryById
self.deleteAllEntries = self.storage.deleteAllEntries
self.getEntryInfo = self.storage.getEntryInfo
self.getEntryInfoById = self.storage.getEntryInfoById
#
# Entry notification
#
def addEntryListener(self, prefix, callback, flags):
return self.storage.addListener(prefix, callback, flags)
def addEntryListenerById(self, local_id, callback, flags):
return self.storage.addListenerById(local_id, callback, flags)
def addEntryListenerByIdEx(
self, fromobj, key, local_id, callback, flags, paramIsNew
):
if paramIsNew:
def listener(item):
key_, value_, flags_, _ = item
callback(fromobj, key, value_.value, (flags_ & _is_new) != 0)
else:
def listener(item):
key_, value_, flags_, _ = item
callback(fromobj, key, value_.value, flags_)
return self.storage.addListenerById(local_id, listener, flags)
def createEntryListenerPoller(self):
return self.entry_notifier.createPoller()
def destroyEntryListenerPoller(self, poller_uid):
self.entry_notifier.removePoller(poller_uid)
def addPolledEntryListener(self, poller_uid, prefix, flags):
return self.storage.addPolledListener(poller_uid, prefix, flags)
def addPolledEntryListenerById(self, poller_uid, local_id, flags):
return self.storage.addPolledListenerById(poller_uid, local_id, flags)
def pollEntryListener(self, poller_uid, timeout=None):
return self.entry_notifier.poll(poller_uid, timeout=timeout)
def cancelPollEntryListener(self, poller_uid):
self.entry_notifier.cancelPoll(poller_uid)
def removeEntryListener(self, listener_uid):
self.entry_notifier.remove(listener_uid)
def waitForEntryListenerQueue(self, timeout):
return self.entry_notifier.waitForQueue(timeout)
#
# Connection notifications
#
def addConnectionListener(self, callback, immediate_notify):
return self.dispatcher.addListener(callback, immediate_notify)
def createConnectionListenerPoller(self):
return self.conn_notifier.createPoller()
def destroyConnectionListenerPoller(self, poller_uid):
self.conn_notifier.removePoller(poller_uid)
def addPolledConnectionListener(self, poller_uid, immediate_notify):
return self.dispatcher.addPolledListener(poller_uid, immediate_notify)
def pollConnectionListener(self, poller_uid, timeout=None):
return self.conn_notifier.poll(poller_uid, timeout=timeout)
def cancelPollConnectionListener(self, poller_uid):
self.conn_notifier.cancelPoll(poller_uid)
def removeConnectionListener(self, listener_uid):
self.conn_notifier.remove(listener_uid)
def waitForConnectionListenerQueue(self, timeout):
return self.conn_notifier.waitForQueue(timeout)
#
# TODO: RPC stuff not currently implemented
# .. there's probably a good pythonic way to implement
# it, but I don't really want to deal with it now.
# If you care, submit a PR.
#
# I would have the caller register the server function
# via a docstring.
#
#
# Client/Server Functions
#
def setNetworkIdentity(self, name):
self.dispatcher.setIdentity(name)
def getNetworkMode(self):
return self.dispatcher.getNetworkMode()
# python-specific
def startTestMode(self, is_server):
if self.dispatcher.startTestMode(is_server):
self.storage.m_server = is_server
return True
else:
return False
def startServer(self, persist_filename, listen_address, port):
return self.dispatcher.startServer(persist_filename, listen_address, port)
def stopServer(self):
self.dispatcher.stop()
def startClient(self):
return self.dispatcher.startClient()
def stopClient(self):
self.dispatcher.stop()
def setServer(self, server_or_servers):
self.dispatcher.setServer(server_or_servers)
def setServerTeam(self, teamNumber, port):
self.dispatcher.setServerTeam(teamNumber, port)
def startDSClient(self, port):
self.ds_client.start(port)
def stopDSClient(self):
self.ds_client.stop()
def setUpdateRate(self, interval):
self.dispatcher.setUpdateRate(interval)
def flush(self):
self.dispatcher.flush()
def getRemoteAddress(self):
if not self.dispatcher.isServer():
for conn in self.dispatcher.getConnections():
return conn.remote_ip
def getIsConnected(self):
return self.dispatcher.isConnected()
def setVerboseLogging(self, verbose):
self.conn_notifier.setVerboseLogging(verbose)
self.dispatcher.setVerboseLogging(verbose)
self.entry_notifier.setVerboseLogging(verbose)
self.rpc_server.setVerboseLogging(verbose)
#
# Persistence
#
def savePersistent(self, filename):
return self.storage.savePersistent(filename, periodic=False)
def loadPersistent(self, filename):
return self.storage.loadPersistent(filename)
def saveEntries(self, filename, prefix):
return self.storage.saveEntries(prefix, filename=filename)
def loadEntries(self, filename, prefix):
return self.storage.loadEntries(filename=filename, prefix=prefix)

View File

@ -0,0 +1,275 @@
# validated: 2018-11-27 DS 18c8cce6a78d cpp/CallbackManager.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
from collections import deque, namedtuple
from threading import Condition
import time
try:
# Python 3.7 only, should be more efficient
from queue import SimpleQueue as Queue, Empty
except ImportError:
from queue import Queue, Empty
from .support.safe_thread import SafeThread
from .support.uidvector import UidVector
import logging
logger = logging.getLogger("nt")
_ListenerData = namedtuple("ListenerData", ["callback", "poller_uid"])
class Poller(object):
def __init__(self):
# Note: this is really close to the python queue, but we really have to
# mess with its internals to get the same semantics as WPILib, so we are
# rolling our own :(
self.poll_queue = deque()
self.poll_cond = Condition()
self.terminating = False
self.cancelling = False
def terminate(self):
with self.poll_cond:
self.terminating = True
self.poll_cond.notify_all()
class CallbackThread(object):
def __init__(self, name):
# Don't need this in python, queue already has one
# self.m_mutex = threading.Lock()
self.m_listeners = UidVector()
self.m_queue = Queue()
self.m_pollers = UidVector()
self.m_active = False
self.name = name
#
# derived must implement the following
#
def matches(self, listener, data):
raise NotImplementedError
def setListener(self, data, listener_uid):
raise NotImplementedError
def doCallback(self, callback, data):
raise NotImplementedError
#
# Impl
#
def start(self):
self.m_active = True
self._thread = SafeThread(target=self.main, name=self.name)
def stop(self):
self.m_active = False
self.m_queue.put(None)
def sendPoller(self, poller_uid, *args):
# args are (listener_uid, item)
poller = self.m_pollers.get(poller_uid)
if poller:
with poller.poll_cond:
poller.poll_queue.append(args)
poller.poll_cond.notify()
def main(self):
# micro-optimization: lift these out of the loop
doCallback = self.doCallback
matches = self.matches
queue_get = self.m_queue.get
setListener = self.setListener
listeners_get = self.m_listeners.get
listeners_items = self.m_listeners.items
while True:
item = queue_get()
if not item:
logger.debug("%s thread no longer active", self.name)
break
listener_uid, item = item
if listener_uid is not None:
listener = listeners_get(listener_uid)
if listener and matches(listener, item):
setListener(item, listener_uid)
cb = listener.callback
if cb:
try:
doCallback(cb, item)
except Exception:
logger.warning(
"Unhandled exception processing %s callback",
self.name,
exc_info=True,
)
elif listener.poller_uid is not None:
self.sendPoller(listener.poller_uid, listener_uid, item)
else:
# Use copy because iterator might get invalidated
for listener_uid, listener in list(listeners_items()):
if matches(listener, item):
setListener(item, listener_uid)
cb = listener.callback
if cb:
try:
doCallback(cb, item)
except Exception:
logger.warning(
"Unhandled exception processing %s callback",
self.name,
exc_info=True,
)
elif listener.poller_uid is not None:
self.sendPoller(listener.poller_uid, listener_uid, item)
# Wake any blocked pollers
for poller in self.m_pollers.values():
poller.terminate()
class CallbackManager(object):
# Derived classes should declare this attribute at class level:
# THREAD_CLASS = Something
def __init__(self, verbose):
self.m_verbose = verbose
self.m_owner = None
def setVerboseLogging(self, verbose):
self.m_verbose = verbose
def stop(self):
if self.m_owner:
self.m_owner.stop()
def remove(self, listener_uid):
thr = self.m_owner
if thr:
thr.m_listeners.pop(listener_uid, None)
def createPoller(self):
self.start()
thr = self.m_owner
return thr.m_pollers.add(Poller())
def removePoller(self, poller_uid):
thr = self.m_owner
if not thr:
return
# Remove any listeners that are associated with this poller
listeners = list(thr.m_listeners.items())
for lid, listener in listeners:
if listener.poller_uid == poller_uid:
thr.m_listeners.pop(lid)
# Wake up any blocked pollers
poller = thr.m_pollers.pop(poller_uid, None)
if not poller:
return
poller.terminate()
def waitForQueue(self, timeout):
thr = self.m_owner
if not thr:
return True
# This function is intended for unit testing purposes only, so it's
# not as efficient as it could be
q = thr.m_queue
if timeout is None:
while not q.empty() and thr.m_active:
time.sleep(0.005)
else:
wait_until = time.monotonic() + timeout
while not q.empty() and thr.m_active:
time.sleep(0.005)
if time.monotonic() > wait_until:
return q.empty()
return True
def poll(self, poller_uid, timeout=None):
# returns infos, timed_out
# -> infos is a list of (listener_uid, item)
infos = []
timed_out = False
thr = self.m_owner
if not thr:
return infos, timed_out
poller = thr.m_pollers.get(poller_uid)
if not poller:
return infos, timed_out
def _poll_fn():
if poller.poll_queue:
return 1
if poller.cancelling:
# Note: this only works if there's a single thread calling this
# function for any particular poller, but that's the intended use.
poller.cancelling = False
return 2
with poller.poll_cond:
result = poller.poll_cond.wait_for(_poll_fn, timeout)
if result is None: # timeout
timed_out = True
elif result == 1: # success
infos.extend(poller.poll_queue)
poller.poll_queue.clear()
return infos, timed_out
def cancelPoll(self, poller_uid):
thr = self.m_owner.getThread()
if not thr:
return
poller = thr.m_pollers.get(poller_uid)
if not poller:
return
with poller.poll_cond:
poller.cancelling = True
poller.poll_cond.notify()
# doStart in ntcore
def start(self, *args):
if not self.m_owner:
self.m_owner = self.THREAD_CLASS(*args)
self.m_owner.start()
# Unlike ntcore, only a single argument is supported here. This is
# to ensure that it's more clear what struct is being passed through
def doAdd(self, item):
self.start()
thr = self.m_owner
return thr.m_listeners.add(item)
def send(self, only_listener, item):
thr = self.m_owner
if not thr or not thr.m_listeners:
return
thr.m_queue.put((only_listener, item))

View File

@ -0,0 +1,48 @@
# validated: 2018-11-27 DS ac751d32247e cpp/ConnectionNotifier.cpp cpp/ConnectionNotifier.h cpp/IConnectionNotifier.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
from collections import namedtuple
from .callback_manager import CallbackManager, CallbackThread
_ConnectionCallback = namedtuple("ConnectionCallback", ["callback", "poller_uid"])
_ConnectionNotification = namedtuple(
"ConnectionNotification", ["connected", "conn_info"]
)
class ConnectionNotifierThread(CallbackThread):
def __init__(self):
CallbackThread.__init__(self, "connection-notifier")
def matches(self, listener, data):
return True
def setListener(self, data, listener_uid):
pass
def doCallback(self, callback, data):
callback(data)
class ConnectionNotifier(CallbackManager):
THREAD_CLASS = ConnectionNotifierThread
def add(self, callback):
return self.doAdd(_ConnectionCallback(callback, None))
def addPolled(self, poller_uid):
return self.doAdd(_ConnectionCallback(None, poller_uid))
def notifyConnection(self, connected, conn_info, only_listener=None):
self.send(only_listener, _ConnectionNotification(connected, conn_info))
def start(self):
CallbackManager.start(self)

View File

@ -0,0 +1,89 @@
# novalidate
# fmt: off
# data types
NT_UNASSIGNED = b'\x00'
NT_BOOLEAN = b'\x01'
NT_DOUBLE = b'\x02'
NT_STRING = b'\x04'
NT_RAW = b'\x08'
NT_BOOLEAN_ARRAY = b'\x10'
NT_DOUBLE_ARRAY = b'\x20'
NT_STRING_ARRAY = b'\x40'
NT_RPC = b'\x80'
# Raw types transmitted on the wire
NT_VTYPE2RAW = {
NT_BOOLEAN: b'\x00',
NT_DOUBLE: b'\x01',
NT_STRING: b'\x02',
NT_RAW: b'\x03',
NT_BOOLEAN_ARRAY: b'\x10',
NT_DOUBLE_ARRAY: b'\x11',
NT_STRING_ARRAY: b'\x12',
NT_RPC: b'\x20',
}
NT_RAW2VTYPE = {v: k for k, v in NT_VTYPE2RAW.items()}
# NetworkTables notifier kinds.
NT_NOTIFY_NONE = 0x00
NT_NOTIFY_IMMEDIATE = 0x01 # initial listener addition
NT_NOTIFY_LOCAL = 0x02 # changed locally
NT_NOTIFY_NEW = 0x04 # newly created entry
NT_NOTIFY_DELETE = 0x08 # deleted
NT_NOTIFY_UPDATE = 0x10 # value changed
NT_NOTIFY_FLAGS = 0x20 # flags changed
# Client/server modes
NT_NET_MODE_NONE = 0x00 # not running
NT_NET_MODE_SERVER = 0x01 # running in server mode
NT_NET_MODE_CLIENT = 0x02 # running in client mode
NT_NET_MODE_STARTING = 0x04 # flag for starting (either client or server)
NT_NET_MODE_FAILURE = 0x08 # flag for failure (either client or server)
NT_NET_MODE_TEST = 0x10 # flag indicating test mode (either client or server)
# NetworkTables entry flags
NT_PERSISTENT = 0x01
# Message types
kKeepAlive = b'\x00'
kClientHello = b'\x01'
kProtoUnsup = b'\x02'
kServerHelloDone = b'\x03'
kServerHello = b'\x04'
kClientHelloDone = b'\x05'
kEntryAssign = b'\x10'
kEntryUpdate = b'\x11'
kFlagsUpdate = b'\x12'
kEntryDelete = b'\x13'
kClearEntries = b'\x14'
kExecuteRpc = b'\x20'
kRpcResponse = b'\x21'
kClearAllMagic = 0xD06CB27A
_msgtypes = {
kKeepAlive: 'kKeepAlive',
kClientHello: 'kClientHello',
kProtoUnsup: 'kProtoUnsup',
kServerHelloDone: 'kServerHelloDone',
kServerHello: 'kServerHello',
kClientHelloDone: 'kClientHelloDone',
kEntryAssign: 'kEntryAssign',
kEntryUpdate: 'kEntryUpdate',
kFlagsUpdate: 'kFlagsUpdate',
kEntryDelete: 'kEntryDelete',
kClearEntries: 'kClearEntries',
kExecuteRpc: 'kExecuteRpc',
kRpcResponse: 'kRpcResponse',
}
def msgtype_str(msgtype):
return _msgtypes.get(msgtype, 'Unknown (%s)' % msgtype)
# The default port that network tables operates on
NT_DEFAULT_PORT = 1735

View File

@ -0,0 +1,741 @@
# validated: 2019-01-04 DS ceed1d74dc30 cpp/Dispatcher.cpp cpp/Dispatcher.h cpp/IDispatcher.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import threading
import time
from .message import Message
from .network_connection import NetworkConnection
from .tcpsockets.tcp_acceptor import TcpAcceptor
from .tcpsockets.tcp_connector import TcpConnector
from .support.safe_thread import SafeThread
from .constants import (
kKeepAlive,
kClientHello,
kProtoUnsup,
kServerHello,
kServerHelloDone,
kClientHelloDone,
kEntryAssign,
NT_NET_MODE_NONE,
NT_NET_MODE_SERVER,
NT_NET_MODE_CLIENT,
NT_NET_MODE_STARTING,
NT_NET_MODE_FAILURE,
NT_NET_MODE_TEST,
)
import logging
logger = logging.getLogger("nt")
class Dispatcher(object):
def __init__(self, storage, conn_notifier, verbose=False):
# logging debugging
self.m_verbose = verbose
self.m_storage = storage
self.m_notifier = conn_notifier
self.m_networkMode = NT_NET_MODE_NONE
self.m_persist_filename = None
self.m_server_acceptor = None
self.m_client_connector_override = None
self.m_client_connector = None
self.m_connections_uid = 0
self.m_default_proto = 0x0300 # for testing
# Mutex for user-accessible items
self.m_user_mutex = threading.RLock()
self.m_connections = []
# Circular import issue
try:
from .version import __version__
except ImportError:
__version__ = "[unknown version]"
self.m_identity = "pynetworktables %s" % __version__
self.m_active = False # set to false to terminate threads
self.m_update_rate = 0.050 # periodic dispatch rate, in s
# Condition variable for forced dispatch wakeup (flush)
self.m_flush_mutex = threading.Lock()
self.m_flush_cv = threading.Condition(self.m_flush_mutex)
self.m_last_flush = 0
self.m_do_flush = False
# Condition variable for client reconnect (uses user mutex)
self.m_reconnect_cv = threading.Condition(self.m_user_mutex)
self.m_reconnect_proto_rev = self.m_default_proto
self.m_do_reconnect = True
def setVerboseLogging(self, verbose):
self.m_verbose = verbose
def setServer(self, server_or_servers):
"""
:param server_or_servers: a tuple of (server, port) or a list of tuples of (server, port)
"""
self._setConnector(server_or_servers)
def setServerTeam(self, team, port):
servers = [
"10.%d.%d.2" % (team / 100, team % 100),
"roboRIO-%d-FRC.local" % team,
"172.22.11.2",
"roboRIO-%d-FRC.lan" % team,
"roboRIO-%d-FRC.frc-field.local" % team,
]
self.setServer([(s, port) for s in servers])
def setServerOverride(self, server, port):
self._setConnectorOverride((server, port))
def clearServerOverride(self):
self._clearConnectorOverride()
def getNetworkMode(self):
return self.m_networkMode
# python-specific
def startTestMode(self, is_server):
with self.m_user_mutex:
if self.m_active:
return False
self.m_active = True
if is_server:
self.m_networkMode = NT_NET_MODE_SERVER | NT_NET_MODE_TEST
else:
self.m_networkMode = NT_NET_MODE_CLIENT | NT_NET_MODE_TEST
return True
def startServer(self, persist_filename, listen_address, port):
with self.m_user_mutex:
if self.m_active:
return False
self.m_active = True
logger.info("NetworkTables initialized in server mode")
acceptor = TcpAcceptor(port, listen_address.strip())
self.m_networkMode = NT_NET_MODE_SERVER | NT_NET_MODE_STARTING
self.m_persist_filename = persist_filename
self.m_server_acceptor = acceptor
# Load persistent file. Ignore errors, pass along warnings.
if self.m_verbose:
logger.debug("persistent filename is %s", persist_filename)
if persist_filename:
self.m_storage.loadPersistent(persist_filename)
self.m_storage.setDispatcher(self, True)
self.m_dispatch_thread = SafeThread(
target=self._dispatchThreadMain, name="nt-dispatch-thread"
)
self.m_clientserver_thread = SafeThread(
target=self._serverThreadMain, name="nt-server-thread"
)
return True
def startClient(self):
with self.m_user_mutex:
if self.m_active:
return False
self.m_active = True
logger.info("NetworkTables initialized in client mode")
self.m_networkMode = NT_NET_MODE_CLIENT | NT_NET_MODE_STARTING
self.m_storage.setDispatcher(self, False)
self.m_dispatch_thread = SafeThread(
target=self._dispatchThreadMain, name="nt-dispatch-thread"
)
self.m_clientserver_thread = SafeThread(
target=self._clientThreadMain, name="nt-client-thread"
)
return False
def stop(self):
with self.m_user_mutex:
if not self.m_active:
return
self.m_active = False
# python-specific
if self.m_networkMode & NT_NET_MODE_TEST != 0:
return
# wake up dispatch thread with a flush
with self.m_flush_mutex:
self.m_flush_cv.notify()
# wake up client thread with a reconnect
with self.m_user_mutex:
self.m_client_connector = None
self._clientReconnect()
# wake up server thread by shutting down the socket
if self.m_server_acceptor:
self.m_server_acceptor.shutdown()
# join threads, timeout
self.m_dispatch_thread.join(1)
self.m_clientserver_thread.join(1)
with self.m_user_mutex:
conns = self.m_connections
self.m_connections = []
# close all connections
for conn in conns:
conn.stop()
# cleanup the server socket
# -> needed because we don't have destructors
if self.m_server_acceptor:
self.m_server_acceptor.close()
def setUpdateRate(self, interval):
# don't allow update rates faster than 10 ms or slower than 1 second
interval = float(interval)
if interval < 0.01:
interval = 0.01
elif interval > 1.0:
interval = 1.0
self.m_update_rate = interval
def setIdentity(self, name):
with self.m_user_mutex:
self.m_identity = name
def setDefaultProtoRev(self, proto_rev):
self.m_default_proto = proto_rev
self.m_reconnect_proto_rev = proto_rev
def flush(self):
now = time.monotonic()
with self.m_flush_mutex:
# don't allow flushes more often than every 10 ms
if (now - self.m_last_flush) < 0.010:
return
self.m_last_flush = now
self.m_do_flush = True
self.m_flush_cv.notify()
def getConnections(self):
conns = []
if not self.m_active:
return conns
with self.m_user_mutex:
for conn in self.m_connections:
if conn.state != NetworkConnection.State.kActive:
continue
conns.append(conn.info())
return conns
def isConnected(self):
if self.m_active:
with self.m_user_mutex:
for conn in self.m_connections:
if conn.is_connected():
return True
return False
def isServer(self):
return (self.m_networkMode & NT_NET_MODE_SERVER) != 0
def addListener(self, callback, immediate_notify):
with self.m_user_mutex:
uid = self.m_notifier.add(callback)
# perform immediate notifications
if immediate_notify:
for conn in self.m_connections:
if conn.is_connected():
self.m_notifier.notifyConnection(True, conn.info(), uid)
return uid
def addPolledListener(self, poller_uid, immediate_notify):
with self.m_user_mutex:
uid = self.m_notifier.addPolled(poller_uid)
# perform immediate notifications
if immediate_notify:
for conn in self.m_connections:
if conn.is_connected():
self.m_notifier.notifyConnection(True, conn.info(), uid)
return uid
def _strip_connectors(self, connector):
if isinstance(connector, tuple):
server, port = connector
return (server.strip(), port)
else:
return [(server.strip(), port) for server, port in connector]
def _setConnector(self, connector):
with self.m_user_mutex:
self.m_client_connector = self._strip_connectors(connector)
def _setConnectorOverride(self, connector):
with self.m_user_mutex:
self.m_client_connector_override = self._strip_connectors(connector)
def _clearConnectorOverride(self):
with self.m_user_mutex:
self.m_client_connector_override = None
def _dispatchWaitFor(self):
return not self.m_active or self.m_do_flush
def _dispatchThreadMain(self):
timeout_time = time.monotonic()
save_delta_time = 1.0
next_save_time = timeout_time + save_delta_time
count = 0
is_server = self.m_networkMode & NT_NET_MODE_SERVER
verbose = self.m_verbose
# python micro-optimizations because this is a loop
monotonic = time.monotonic
kActive = NetworkConnection.State.kActive
kDead = NetworkConnection.State.kDead
while self.m_active:
# handle loop taking too long
start = monotonic()
if start > timeout_time or timeout_time > start + self.m_update_rate:
timeout_time = start
# wait for periodic or when flushed
timeout_time += self.m_update_rate
with self.m_flush_mutex:
self.m_flush_cv.wait_for(self._dispatchWaitFor, timeout_time - start)
self.m_do_flush = False
# in case we were woken up to terminate
if not self.m_active:
break
# perform periodic persistent save
if is_server and self.m_persist_filename and start > next_save_time:
next_save_time += save_delta_time
# handle loop taking too long
if start > next_save_time:
next_save_time = start + save_delta_time
err = self.m_storage.savePersistent(self.m_persist_filename, True)
if err:
logger.warning("periodic persistent save: %s", err)
with self.m_user_mutex:
reconnect = False
if verbose:
count += 1
if count > 10:
logger.debug(
"dispatch running %s connections", len(self.m_connections)
)
count = 0
for conn in self.m_connections:
# post outgoing messages if connection is active
# only send keep-alives on client
state = conn.state
if state == kActive:
conn.postOutgoing(not is_server)
# if client, if connection died
if not is_server and state == kDead:
reconnect = True
# reconnect if we disconnected (and a reconnect is not in progress)
if reconnect and not self.m_do_reconnect:
self.m_do_reconnect = True
self.m_reconnect_cv.notify()
def _queueOutgoing(self, msg, only, except_):
with self.m_user_mutex:
for conn in self.m_connections:
if conn == except_:
continue
if only and conn != only:
continue
state = conn.state
if (
state != NetworkConnection.State.kSynchronized
and state != NetworkConnection.State.kActive
):
continue
conn.queueOutgoing(msg)
def _serverThreadMain(self):
if not self.m_server_acceptor.start():
self.m_active = False
self.m_networkMode = NT_NET_MODE_SERVER | NT_NET_MODE_FAILURE
return
self.m_networkMode = NT_NET_MODE_SERVER
try:
while self.m_active:
stream = self.m_server_acceptor.accept()
if not stream:
self.m_active = False
return
if not self.m_active:
return
logger.debug(
"server: client connection from %s port %s",
stream.getPeerIP(),
stream.getPeerPort(),
)
# add to connections list
connection_uid = self.m_connections_uid
self.m_connections_uid += 1
conn = NetworkConnection(
connection_uid,
stream,
self.m_notifier,
self._serverHandshake,
self.m_storage.getMessageEntryType,
verbose=self.m_verbose,
)
conn.set_process_incoming(self.m_storage.processIncoming)
with self.m_user_mutex:
# reuse dead connection slots
for i in range(len(self.m_connections)):
c = self.m_connections[i]
if c.state == NetworkConnection.State.kDead:
self.m_connections[i] = conn
break
else:
self.m_connections.append(conn)
conn.start()
finally:
self.m_networkMode = NT_NET_MODE_NONE
def _clientThreadMain(self):
try:
tcp_connector = TcpConnector(1, self.m_verbose)
while self.m_active:
# sleep between retries
time.sleep(0.250)
tcp_connector.setVerbose(self.m_verbose)
# get next server to connect to
with self.m_user_mutex:
if self.m_client_connector_override:
server_or_servers = self.m_client_connector_override
else:
if not self.m_client_connector:
self.m_networkMode = (
NT_NET_MODE_CLIENT | NT_NET_MODE_FAILURE
)
continue
server_or_servers = self.m_client_connector
# try to connect (with timeout)
if self.m_verbose:
logger.debug("client trying to connect")
stream = tcp_connector.connect(server_or_servers)
if not stream:
self.m_networkMode = NT_NET_MODE_CLIENT | NT_NET_MODE_FAILURE
continue # keep retrying
logger.debug("client connected")
self.m_networkMode = NT_NET_MODE_CLIENT
with self.m_user_mutex:
connection_uid = self.m_connections_uid
self.m_connections_uid += 1
conn = NetworkConnection(
connection_uid,
stream,
self.m_notifier,
self._clientHandshake,
self.m_storage.getMessageEntryType,
verbose=self.m_verbose,
)
conn.set_process_incoming(self.m_storage.processIncoming)
# disconnect any current
# -> different from ntcore because we don't have destructors
for c in self.m_connections:
if c != conn:
c.stop()
del self.m_connections[:]
self.m_connections.append(conn)
conn.set_proto_rev(self.m_reconnect_proto_rev)
conn.start()
# reconnect the next time starting with latest protocol revision
self.m_reconnect_proto_rev = self.m_default_proto
# block until told to reconnect
self.m_do_reconnect = False
self.m_reconnect_cv.wait_for(
lambda: not self.m_active or self.m_do_reconnect
)
finally:
self.m_networkMode = NT_NET_MODE_NONE
def _clientHandshake(self, conn, get_msg, send_msgs):
# get identity
with self.m_user_mutex:
self_id = self.m_identity
# send client hello
if self.m_verbose:
logger.debug("client: sending hello")
send_msgs((Message.clientHello(conn.get_proto_rev(), self_id),))
# wait for response
msg = get_msg()
if not msg:
# disconnected, retry
logger.debug("client: server disconnected before first response")
return False
if msg.type == kProtoUnsup:
if msg.id == 0x0200:
logger.debug("client: connected to NT2 server, reconnecting...")
self._clientReconnect(0x0200)
else:
logger.debug("client: connected to 0x%04x server, giving up...", msg.id)
return False
new_server = True
if conn.get_proto_rev() >= 0x0300:
# should be server hello; if not, disconnect.
if not msg.type == kServerHello:
return False
remote_id = msg.str
if (msg.flags & 1) != 0:
new_server = False
# get the next message
msg = get_msg()
else:
remote_id = "NT2 server"
conn.set_remote_id(remote_id)
# receive initial assignments
incoming = []
verbose = self.m_verbose
while True:
if not msg:
# disconnected, retry
logger.debug("client: server disconnected during initial entries")
return False
if msg.type == kServerHelloDone:
break
if msg.type == kKeepAlive:
# shouldn't receive a keep alive, but handle gracefully
msg = get_msg()
continue
if not msg.type == kEntryAssign:
# unexpected message
logger.debug(
"client: received message (%s) other than entry assignment during initial handshake",
msg.type,
)
return False
if verbose:
logger.debug(
"client %s: received assign str=%s id=%s seq_num=%s val=%s",
self_id,
msg.str,
msg.id,
msg.seq_num_uid,
msg.value,
)
incoming.append(msg)
# get the next message
msg = get_msg()
# generate outgoing assignments
outgoing = []
self.m_storage.applyInitialAssignments(conn, incoming, new_server, outgoing)
if conn.get_proto_rev() >= 0x0300:
outgoing.append(Message.clientHelloDone())
if outgoing:
send_msgs(outgoing)
# stream = conn.get_stream()
# logger.info("client: CONNECTED to server %s port %s",
# stream.getPeerIP(), stream.getPeerPort())
return True
def _serverHandshake(self, conn, get_msg, send_msgs):
verbose = self.m_verbose
# Wait for the client to send us a hello.
msg = get_msg()
if not msg:
logger.debug("server: client disconnected before sending hello")
return False
if not msg.type == kClientHello:
logger.debug("server: client initial message was not client hello")
return False
# Check that the client requested version is not too high.
proto_rev = msg.id
if proto_rev > self.m_default_proto:
logger.debug(
"server: client requested proto > 0x%04x", self.m_default_proto
)
send_msgs((Message.protoUnsup(self.m_default_proto),))
return False
if proto_rev >= 0x0300:
remote_id = msg.str
else:
remote_id = "NT2 client"
conn.set_remote_id(remote_id)
# Set the proto version to the client requested version
if verbose:
logger.debug("server: client protocol 0x%04x", proto_rev)
conn.set_proto_rev(proto_rev)
# Send initial set of assignments
outgoing = []
# Start with server hello. TODO: initial connection flag
if proto_rev >= 0x0300:
with self.m_user_mutex:
outgoing.append(Message.serverHello(0, self.m_identity))
# Get snapshot of initial assignments
self.m_storage.getInitialAssignments(conn, outgoing)
# Finish with server hello done
outgoing.append(Message.serverHelloDone())
# Batch transmit
if verbose:
logger.debug("server: sending initial assignments")
send_msgs(outgoing)
# In proto rev 3.0 and later, handshake concludes with a client hello
# done message, we can batch the assigns before marking the connection
# active. In pre-3.0, need to just immediately mark it active and hand
# off control to the dispatcher to assign them as they arrive.
if proto_rev >= 0x0300:
# receive client initial assignments
incoming = []
while True:
# get the next message (blocks)
msg = get_msg()
if not msg:
# disconnected, retry
logger.debug("server: disconnected waiting for initial entries")
return False
if msg.type == kClientHelloDone:
break
# shouldn't receive a keep alive, but handle gracefully
elif msg.type == kKeepAlive:
continue
if msg.type != kEntryAssign:
# unexpected message
logger.debug(
"server: received message (%s) other than entry assignment during initial handshake",
msg.type,
)
return False
if verbose:
logger.debug(
"received assign str=%s id=%s seq_num=%s",
msg.str,
msg.id,
msg.seq_num_uid,
)
incoming.append(msg)
for msg in incoming:
self.m_storage.processIncoming(msg, conn)
# stream = conn.get_stream()
# logger.info("server: client CONNECTED %s port %s",
# stream.getPeerIP(), stream.getPeerPort())
return True
def _clientReconnect(self, proto_rev=0x0300):
if self.m_networkMode & NT_NET_MODE_SERVER != 0:
return
with self.m_user_mutex:
self.m_reconnect_proto_rev = proto_rev
self.m_do_reconnect = True
self.m_reconnect_cv.notify()

View File

@ -0,0 +1,115 @@
# validated: 2018-11-27 DS 18c8cce6a78d cpp/DsClient.cpp cpp/DsClient.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import json
import threading
from .support.safe_thread import SafeThread
from .tcpsockets.tcp_connector import TcpConnector
import logging
logger = logging.getLogger("nt")
class DsClient(object):
def __init__(self, dispatcher, verbose=False):
self.m_dispatcher = dispatcher
self.verbose = verbose
self.m_active = False
self.m_owner = None # type: SafeThread
self.m_mutex = threading.Lock()
self.m_cond = threading.Condition(self.m_mutex)
self.m_port = None # type: int
self.m_stream = None
def start(self, port):
with self.m_mutex:
self.m_port = port
if not self.m_active:
self.m_active = True
self.m_owner = SafeThread(target=self._thread_main, name="nt-dsclient")
def stop(self):
with self.m_mutex:
# Close the stream so the read (if any) terminates.
self.m_active = False
if self.m_stream:
self.m_stream.close()
self.m_cond.notify()
def _thread_main(self):
oldip = 0
connector = TcpConnector(verbose=False, timeout=1)
while self.m_active:
# wait for periodic reconnect or termination
with self.m_mutex:
self.m_cond.wait_for(lambda: not self.m_active, timeout=0.5)
port = self.m_port
if not self.m_active:
break
self.m_stream = connector.connect(("127.0.0.1", 1742))
if not self.m_active:
break
if not self.m_stream:
continue
while self.m_active and self.m_stream:
json_blob = self.m_stream.readline()
if not json_blob:
# We've reached EOF.
with self.m_mutex:
self.m_stream.close()
self.m_stream = None
if not self.m_active:
break
try:
obj = json.loads(json_blob.decode())
except (json.JSONDecodeError, UnicodeDecodeError):
continue
try:
ip = int(obj["robotIP"])
except (KeyError, ValueError):
continue
# If zero, clear the server override
if ip == 0:
self.m_dispatcher.clearServerOverride()
oldip = 0
continue
# If unchanged, don't reconnect
if ip == oldip:
continue
oldip = ip
# Convert number into dotted quad
ip_str = "%d.%d.%d.%d" % (
(ip >> 24) & 0xFF,
(ip >> 16) & 0xFF,
(ip >> 8) & 0xFF,
ip & 0xFF,
)
if self.verbose:
logger.info("client: DS overriding server IP to %s", ip_str)
self.m_dispatcher.setServerOverride(ip_str, port)
# We disconnected from the DS, clear the server override
self.m_dispatcher.clearServerOverride()
oldip = 0
# Python note: we don't call Dispatcher.clearServerOverride() again.
# Either it was already called, or we were never active.

View File

@ -0,0 +1,122 @@
# validated: 2017-10-01 DS e4a8bff70e77 cpp/EntryNotifier.cpp cpp/EntryNotifier.h cpp/IEntryNotifier.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
from collections import namedtuple
from .callback_manager import CallbackManager, CallbackThread
from .constants import (
NT_NOTIFY_IMMEDIATE,
NT_NOTIFY_LOCAL,
NT_NOTIFY_UPDATE,
NT_NOTIFY_FLAGS,
)
_EntryListenerData = namedtuple(
"EntryListenerData",
[
"prefix",
"local_id", # we don't have entry handles like ntcore has
"flags",
"callback",
"poller_uid",
],
)
#
_EntryNotification = namedtuple(
"EntryNotification", ["name", "value", "flags", "local_id"]
)
_assign_both = NT_NOTIFY_UPDATE | NT_NOTIFY_FLAGS
_immediate_local = NT_NOTIFY_IMMEDIATE | NT_NOTIFY_LOCAL
class EntryNotifierThread(CallbackThread):
def __init__(self):
CallbackThread.__init__(self, "entry-notifier")
def matches(self, listener, data):
if not data.value:
return False
# must match local id or prefix
# -> python-specific: match this first, since it's the most likely thing
# to not match
if listener.local_id is not None:
if listener.local_id != data.local_id:
return False
else:
if not data.name.startswith(listener.prefix):
return False
# Flags must be within requested flag set for this listener.
# Because assign messages can result in both a value and flags update,
# we handle that case specially.
listen_flags = listener.flags & ~_immediate_local
flags = data.flags & ~_immediate_local
if (flags & _assign_both) == _assign_both:
if (listen_flags & _assign_both) == 0:
return False
listen_flags &= ~_assign_both
flags &= ~_assign_both
if (flags & ~listen_flags) != 0:
return False
return True
def setListener(self, data, listener_uid):
pass
def doCallback(self, callback, data):
callback(data)
class EntryNotifier(CallbackManager):
THREAD_CLASS = EntryNotifierThread
def __init__(self, verbose):
CallbackManager.__init__(self, verbose)
self.m_local_notifiers = False
def add(self, callback, prefix, flags):
if (flags & NT_NOTIFY_LOCAL) != 0:
self.m_local_notifiers = True
return self.doAdd(_EntryListenerData(prefix, None, flags, callback, None))
def addById(self, callback, local_id, flags):
if (flags & NT_NOTIFY_LOCAL) != 0:
self.m_local_notifiers = True
return self.doAdd(_EntryListenerData(None, local_id, flags, callback, None))
def addPolled(self, poller_uid, prefix, flags):
if (flags & NT_NOTIFY_LOCAL) != 0:
self.m_local_notifiers = True
return self.doAdd(_EntryListenerData(prefix, None, flags, None, poller_uid))
def addPolledById(self, poller_uid, local_id, flags):
if (flags & NT_NOTIFY_LOCAL) != 0:
self.m_local_notifiers = True
return self.doAdd(_EntryListenerData(None, local_id, flags, None, poller_uid))
def notifyEntry(self, local_id, name, value, flags, only_listener=None):
# optimization: don't generate needless local queue entries if we have
# no local listeners (as this is a common case on the server side)
if not self.m_local_notifiers and (flags & NT_NOTIFY_LOCAL) != 0:
return
self.send(only_listener, _EntryNotification(name, value, flags, local_id))
def start(self):
CallbackManager.start(self)

View File

@ -0,0 +1,228 @@
# validated: 2018-01-06 DV 2287281066f6 cpp/Message.cpp cpp/Message.h
from collections import namedtuple
from .constants import (
kKeepAlive,
kClientHello,
kProtoUnsup,
kServerHello,
kServerHelloDone,
kClientHelloDone,
kEntryAssign,
kEntryUpdate,
kFlagsUpdate,
kEntryDelete,
kClearEntries,
kExecuteRpc,
kRpcResponse,
kClearAllMagic,
NT_VTYPE2RAW,
NT_RAW2VTYPE,
)
class Message(
namedtuple("Message", ["type", "str", "value", "id", "flags", "seq_num_uid"])
):
__slots__ = ()
_empty_msgtypes = (kKeepAlive, kServerHelloDone, kClientHelloDone)
@classmethod
def keepAlive(cls):
return cls(kKeepAlive, None, None, None, None, None)
@classmethod
def clientHello(cls, proto_rev, identity):
return cls(kClientHello, identity, None, proto_rev, None, None)
@classmethod
def protoUnsup(cls, proto_rev):
return cls(kProtoUnsup, None, None, proto_rev, None, None)
@classmethod
def serverHelloDone(cls):
return cls(kServerHelloDone, None, None, None, None, None)
@classmethod
def serverHello(cls, flags, identity):
return cls(kServerHello, identity, None, None, flags, None)
@classmethod
def clientHelloDone(cls):
return cls(kClientHelloDone, None, None, None, None, None)
@classmethod
def entryAssign(cls, name, msg_id, seq_num_uid, value, flags):
return cls(kEntryAssign, name, value, msg_id, flags, seq_num_uid)
@classmethod
def entryUpdate(cls, entry_id, seq_num_uid, value):
return cls(kEntryUpdate, None, value, entry_id, None, seq_num_uid)
@classmethod
def flagsUpdate(cls, msg_id, flags):
return cls(kFlagsUpdate, None, None, msg_id, flags, None)
@classmethod
def entryDelete(cls, entry_id):
return cls(kEntryDelete, None, None, entry_id, None, None)
@classmethod
def clearEntries(cls):
return cls(kClearEntries, None, None, kClearAllMagic, None, None)
@classmethod
def executeRpc(cls, rpc_id, call_uid, params):
return cls(kExecuteRpc, params, None, rpc_id, None, call_uid)
@classmethod
def rpcResponse(cls, rpc_id, call_uid, result):
return cls(kRpcResponse, result, None, rpc_id, None, call_uid)
@classmethod
def read(cls, rstream, codec, get_entry_type) -> "Message":
msgtype = rstream.read(1)
msg_str = None
value = None
msg_id = None
flags = None
seq_num_uid = None
# switch type
if msgtype in cls._empty_msgtypes:
pass
# python optimization: entry updates tend to occur more than
# anything else, so check this first
elif msgtype == kEntryUpdate:
if codec.proto_rev >= 0x0300:
msg_id, seq_num_uid = rstream.readStruct(codec.entryUpdate)
value_type = NT_RAW2VTYPE.get(rstream.read(1))
else:
msg_id, seq_num_uid = rstream.readStruct(codec.entryUpdate)
value_type = get_entry_type(msg_id)
value = codec.read_value(value_type, rstream)
elif msgtype == kClientHello:
(msg_id,) = rstream.readStruct(codec.clientHello)
if msg_id >= 0x0300:
msg_str = codec.read_string_v3(rstream)
elif msgtype == kProtoUnsup:
(msg_id,) = rstream.readStruct(codec.protoUnsup)
elif msgtype == kServerHello:
(flags,) = rstream.readStruct(codec.serverHello)
msg_str = codec.read_string(rstream)
elif msgtype == kEntryAssign:
msg_str = codec.read_string(rstream)
value_type = NT_RAW2VTYPE.get(rstream.read(1))
if codec.proto_rev >= 0x0300:
msg_id, seq_num_uid, flags = rstream.readStruct(codec.entryAssign)
else:
msg_id, seq_num_uid = rstream.readStruct(codec.entryAssign)
flags = 0
value = codec.read_value(value_type, rstream)
elif msgtype == kFlagsUpdate:
msg_id, flags = rstream.readStruct(codec.flagsUpdate)
elif msgtype == kEntryDelete:
(msg_id,) = rstream.readStruct(codec.entryDelete)
elif msgtype == kClearEntries:
(msg_id,) = rstream.readStruct(codec.clearEntries)
if msg_id != kClearAllMagic:
raise ValueError("Bad magic")
elif msgtype == kExecuteRpc:
msg_id, seq_num_uid = rstream.readStruct(codec.executeRpc)
msg_str = codec.read_string(rstream)
elif msgtype == kRpcResponse:
msg_id, seq_num_uid = rstream.readStruct(codec.rpcResponse)
msg_str = codec.read_string(rstream)
else:
raise ValueError("Unrecognized message type %s" % msgtype)
return cls(msgtype, msg_str, value, msg_id, flags, seq_num_uid)
def write(self, out, codec):
msgtype = self.type
# switch type
if msgtype in self._empty_msgtypes:
out.append(msgtype)
elif msgtype == kClientHello:
proto_rev = self.id
out += (msgtype, codec.clientHello.pack(proto_rev))
if proto_rev >= 0x0300:
codec.write_string_v3(self.str, out)
elif msgtype == kProtoUnsup:
out += (msgtype, codec.protoUnsup.pack(self.id))
elif msgtype == kServerHello:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.serverHello.pack(self.flags))
codec.write_string(self.str, out)
elif msgtype == kEntryAssign:
out.append(msgtype)
codec.write_string(self.str, out)
value = self.value
if codec.proto_rev >= 0x0300:
sb = codec.entryAssign.pack(self.id, self.seq_num_uid, self.flags)
else:
sb = codec.entryAssign.pack(self.id, self.seq_num_uid)
out += (NT_VTYPE2RAW[value.type], sb)
codec.write_value(value, out)
elif msgtype == kEntryUpdate:
value = self.value
if codec.proto_rev >= 0x0300:
out += (
msgtype,
codec.entryUpdate.pack(self.id, self.seq_num_uid),
NT_VTYPE2RAW[value.type],
)
else:
out += (msgtype, codec.entryUpdate.pack(self.id, self.seq_num_uid))
codec.write_value(value, out)
elif msgtype == kFlagsUpdate:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.flagsUpdate.pack(self.id, self.flags))
elif msgtype == kEntryDelete:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.entryDelete.pack(self.id))
elif msgtype == kClearEntries:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.clearEntries.pack(self.id))
elif msgtype == kExecuteRpc:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.executeRpc.pack(self.id, self.seq_num_uid))
codec.write_string(self.str, out)
elif msgtype == kRpcResponse:
if codec.proto_rev >= 0x0300:
out += (msgtype, codec.rpcResponse.pack(self.id, self.seq_num_uid))
codec.write_string(self.str, out)
else:
raise ValueError("Internal error: bad value type %s" % self.type)

View File

@ -0,0 +1,519 @@
# validated: 2017-09-28 DS cedbafeb286d cpp/NetworkConnection.cpp cpp/NetworkConnection.h cpp/INetworkConnection.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import threading
from time import monotonic
try:
# Python 3.7 only, should be more efficient
from queue import SimpleQueue as Queue, Empty
except ImportError:
from queue import Queue, Empty
from .constants import (
kEntryAssign,
kEntryUpdate,
kFlagsUpdate,
kEntryDelete,
kClearEntries,
msgtype_str,
)
from .message import Message
from .structs import ConnectionInfo
from .wire import WireCodec
from .support.lists import Pair
from .support.safe_thread import SafeThread
from .tcpsockets.tcp_stream import StreamEOF
import logging
logger = logging.getLogger("nt")
_empty_pair = Pair(0, 0)
_state_map = {
0: "created",
1: "init",
2: "handshake",
3: "synchronized",
4: "active",
5: "dead",
}
class NetworkConnection(object):
class State(object):
kCreated = 0
kInit = 1
kHandshake = 2
kSynchronized = 3
kActive = 4
kDead = 5
def __init__(self, uid, stream, notifier, handshake, get_entry_type, verbose=False):
# logging debugging
self.m_verbose = verbose
self.m_uid = uid
self.m_stream = stream
self.m_notifier = notifier
self.m_handshake = handshake
self.m_get_entry_type = get_entry_type
self.m_active = False
self.m_proto_rev = 0x0300
self.state = self.State.kCreated
self.m_state_mutex = threading.Lock()
self.m_last_update = 0
self.m_outgoing = Queue()
self.m_process_incoming = None
self.m_read_thread = None
self.m_write_thread = None
self.m_remote_id_mutex = threading.Lock()
self.m_remote_id = None
self.m_last_post = 0
self.m_pending_mutex = threading.Lock()
self.m_pending_outgoing = []
self.m_pending_update = {}
# Condition variables for shutdown
self.m_shutdown_mutex = threading.Lock()
# Not needed in python
# self.m_read_shutdown_cv = threading.Condition()
# self.m_write_shutdown_cv = threading.Condition()
self.m_read_shutdown = False
self.m_write_shutdown = False
# turn off Nagle algorithm; we bundle packets for transmission
try:
self.m_stream.setNoDelay()
except IOError as e:
logger.warning("Setting TCP_NODELAY: %s", e)
def start(self):
if self.m_active:
return
self.m_active = True
self.set_state(self.State.kInit)
# clear queue
try:
while True:
self.m_outgoing.get_nowait()
except Empty:
pass
# reset shutdown flags
with self.m_shutdown_mutex:
self.m_read_shutdown = False
self.m_write_shutdown = False
# start threads
self.m_write_thread = SafeThread(
target=self._writeThreadMain, name="nt-net-write"
)
self.m_read_thread = SafeThread(target=self._readThreadMain, name="nt-net-read")
def __repr__(self):
try:
return "<NetworkConnection 0x%x %s>" % (id(self), self.info())
except Exception:
return "<NetworkConnection 0x%x ???>" % id(self)
def stop(self):
logger.debug("NetworkConnection stopping (%s)", self)
if not self.m_active:
return
self.set_state(self.State.kDead)
self.m_active = False
# closing the stream so the read thread terminates
self.m_stream.close()
# send an empty outgoing message set so the write thread terminates
self.m_outgoing.put([])
# wait for threads to terminate, timeout
self.m_write_thread.join(1)
if self.m_write_thread.is_alive():
logger.warning("%s did not die", self.m_write_thread.name)
self.m_read_thread.join(1)
if self.m_read_thread.is_alive():
logger.warning("%s did not die", self.m_write_thread.name)
# clear queue
try:
while True:
self.m_outgoing.get_nowait()
except Empty:
pass
def get_proto_rev(self):
return self.m_proto_rev
def get_stream(self):
return self.m_stream
def info(self):
return ConnectionInfo(
self.remote_id(),
self.m_stream.getPeerIP(),
self.m_stream.getPeerPort(),
self.m_last_update,
self.m_proto_rev,
)
def is_connected(self):
return self.state == self.State.kActive
def last_update(self):
return self.m_last_update
def set_process_incoming(self, func):
self.m_process_incoming = func
def set_proto_rev(self, proto_rev):
self.m_proto_rev = proto_rev
def set_state(self, state):
with self.m_state_mutex:
State = self.State
# Don't update state any more once we've died
if self.state == State.kDead:
return
# One-shot notify state changes
if self.state != State.kActive and state == State.kActive:
info = self.info()
self.m_notifier.notifyConnection(True, info)
logger.info(
"CONNECTED %s port %s (%s)",
info.remote_ip,
info.remote_port,
info.remote_id,
)
elif self.state != State.kDead and state == State.kDead:
info = self.info()
self.m_notifier.notifyConnection(False, info)
logger.info(
"DISCONNECTED %s port %s (%s)",
info.remote_ip,
info.remote_port,
info.remote_id,
)
if self.m_verbose:
logger.debug(
"%s: %s -> %s", self, _state_map[self.state], _state_map[state]
)
self.state = state
# python optimization: don't use getter here
# def state(self):
# return self.m_state
def remote_id(self):
with self.m_remote_id_mutex:
return self.m_remote_id
def set_remote_id(self, remote_id):
with self.m_remote_id_mutex:
self.m_remote_id = remote_id
def uid(self):
return self.m_uid
def _sendMessages(self, msgs):
self.m_outgoing.put(msgs)
def _readThreadMain(self):
decoder = WireCodec(self.m_proto_rev)
verbose = self.m_verbose
def _getMessage():
decoder.set_proto_rev(self.m_proto_rev)
try:
return Message.read(self.m_stream, decoder, self.m_get_entry_type)
except IOError as e:
logger.warning("read error in handshake: %s", e)
# terminate connection on bad message
self.m_stream.close()
return None
self.set_state(self.State.kHandshake)
try:
handshake_success = self.m_handshake(self, _getMessage, self._sendMessages)
except Exception:
logger.exception("Unhandled exception during handshake")
handshake_success = False
if not handshake_success:
self.set_state(self.State.kDead)
self.m_active = False
else:
self.set_state(self.State.kActive)
try:
while self.m_active:
if not self.m_stream:
break
decoder.set_proto_rev(self.m_proto_rev)
try:
msg = Message.read(
self.m_stream, decoder, self.m_get_entry_type
)
except Exception as e:
if not isinstance(e, StreamEOF):
if verbose:
logger.exception("read error")
else:
logger.warning("read error: %s", e)
# terminate connection on bad message
self.m_stream.close()
break
if verbose:
logger.debug(
"%s received type=%s with str=%s id=%s seq_num=%s value=%s",
self.m_stream.sock_type,
msgtype_str(msg.type),
msg.str,
msg.id,
msg.seq_num_uid,
msg.value,
)
self.m_last_update = monotonic()
self.m_process_incoming(msg, self)
except IOError as e:
# connection died probably
logger.debug("IOError in read thread: %s", e)
except Exception:
logger.warning("Unhandled exception in read thread", exc_info=True)
self.set_state(self.State.kDead)
self.m_active = False
# also kill write thread
self.m_outgoing.put([])
with self.m_shutdown_mutex:
self.m_read_shutdown = True
def _writeThreadMain(self):
encoder = WireCodec(self.m_proto_rev)
verbose = self.m_verbose
out = []
try:
while self.m_active:
msgs = self.m_outgoing.get()
if verbose:
logger.debug("write thread woke up")
if msgs:
logger.debug(
"%s sending %s messages", self.m_stream.sock_type, len(msgs)
)
if not msgs:
continue
encoder.set_proto_rev(self.m_proto_rev)
# python-optimization: checking verbose causes extra overhead
if verbose:
for msg in msgs:
if msg:
logger.debug(
"%s sending type=%s with str=%s id=%s seq_num=%s value=%s",
self.m_stream.sock_type,
msgtype_str(msg.type),
msg.str,
msg.id,
msg.seq_num_uid,
msg.value,
)
msg.write(out, encoder)
else:
for msg in msgs:
if msg:
msg.write(out, encoder)
if not self.m_stream:
break
if not out:
continue
self.m_stream.send(b"".join(out))
del out[:]
# if verbose:
# logger.debug('send %s bytes', encoder.size())
except IOError as e:
# connection died probably
if not isinstance(e, StreamEOF):
logger.debug("IOError in write thread: %s", e)
except Exception:
logger.warning("Unhandled exception in write thread", exc_info=True)
self.set_state(self.State.kDead)
self.m_active = False
self.m_stream.close() # also kill read thread
with self.m_shutdown_mutex:
self.m_write_shutdown = True
def queueOutgoing(self, msg):
with self.m_pending_mutex:
# Merge with previous. One case we don't combine: delete/assign loop.
msgtype = msg.type
if msgtype in [kEntryAssign, kEntryUpdate]:
# don't do this for unassigned id's
msg_id = msg.id
if msg_id == 0xFFFF:
self.m_pending_outgoing.append(msg)
return
mpend = self.m_pending_update.get(msg_id)
if mpend is not None and mpend.first != 0:
# overwrite the previous one for this id
oldidx = mpend.first - 1
oldmsg = self.m_pending_outgoing[oldidx]
if (
oldmsg
and oldmsg.type == kEntryAssign
and msgtype == kEntryUpdate
):
# need to update assignment with seq_num and value
oldmsg = Message.entryAssign(
oldmsg.str, msg_id, msg.seq_num_uid, msg.value, oldmsg.flags
)
else:
oldmsg = msg # easy update
self.m_pending_outgoing[oldidx] = oldmsg
else:
# new, remember it
pos = len(self.m_pending_outgoing)
self.m_pending_outgoing.append(msg)
self.m_pending_update[msg_id] = Pair(pos + 1, 0)
elif msgtype == kEntryDelete:
# don't do this for unassigned id's
msg_id = msg.id
if msg_id == 0xFFFF:
self.m_pending_outgoing.append(msg)
return
# clear previous updates
mpend = self.m_pending_update.get(msg_id)
if mpend is not None:
if mpend.first != 0:
self.m_pending_outgoing[mpend.first - 1] = None
if mpend.second != 0:
self.m_pending_outgoing[mpend.second - 1] = None
self.m_pending_update[msg_id] = _empty_pair
# add deletion
self.m_pending_outgoing.append(msg)
elif msgtype == kFlagsUpdate:
# don't do this for unassigned id's
msg_id = msg.id
if id == 0xFFFF:
self.m_pending_outgoing.append(msg)
return
mpend = self.m_pending_update.get(msg_id)
if mpend is not None and mpend.second != 0:
# overwrite the previous one for this id
self.m_pending_outgoing[mpend.second - 1] = msg
else:
# new, remember it
pos = len(self.m_pending_outgoing)
self.m_pending_outgoing.append(msg)
self.m_pending_update[msg_id] = Pair(0, pos + 1)
elif msgtype == kClearEntries:
# knock out all previous assigns/updates!
for i, m in enumerate(self.m_pending_outgoing):
if not m:
continue
t = m.type
if t in [
kEntryAssign,
kEntryUpdate,
kFlagsUpdate,
kEntryDelete,
kClearEntries,
]:
self.m_pending_outgoing[i] = None
self.m_pending_update.clear()
self.m_pending_outgoing.append(msg)
else:
self.m_pending_outgoing.append(msg)
def postOutgoing(self, keep_alive):
with self.m_pending_mutex:
# optimization: don't call monotonic unless needed
# now = monotonic()
if not self.m_pending_outgoing:
if not keep_alive:
return
# send keep-alives once a second (if no other messages have been sent)
now = monotonic()
if (now - self.m_last_post) < 1.0:
return
self.m_outgoing.put((Message.keepAlive(),))
else:
now = monotonic()
self.m_outgoing.put(self.m_pending_outgoing)
self.m_pending_outgoing = []
self.m_pending_update.clear()
self.m_last_post = now

View File

@ -0,0 +1,81 @@
# validated: 2018-11-27 DS ac751d32247e cpp/RpcServer.cpp cpp/RpcServer.h cpp/IRpcServer.h
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
from collections import namedtuple
from .callback_manager import CallbackManager, CallbackThread
from .message import Message
import logging
logger = logging.getLogger("nt")
_RpcListenerData = namedtuple("RpcListenerData", ["callback", "poller_uid"])
_RpcCall = namedtuple(
"RpcCall", ["local_id", "call_uid", "name", "params", "conn_info", "send_response"]
)
class RpcServerThread(CallbackThread):
def __init__(self):
CallbackThread.__init__(self, "rpc-server")
self.m_response_map = {}
def matches(self, listener, data):
return data.name and data.send_response
def setListener(self, data, listener_uid):
lookup_id = (data.local_id, data.call_uid)
self.m_response_map[lookup_id] = data.send_response
def doCallback(self, callback, data):
local_id = data.local_id
call_uid = data.call_uid
lookup_id = (data.local_id, data.call_uid)
callback(data)
# send empty response
send_response = self.m_response_map.get(lookup_id)
if send_response:
send_response(Message.rpcResponse(local_id, call_uid, ""))
class RpcServer(CallbackManager):
THREAD_CLASS = RpcServerThread
def add(self, callback):
return self.doAdd(_RpcListenerData(callback, None))
def addPolled(self, poller_uid):
return self.doAdd(_RpcListenerData(None, poller_uid))
def removeRpc(self, rpc_uid):
return self.remove(rpc_uid)
def processRpc(
self, local_id, call_uid, name, params, conn_info, send_response, rpc_uid
):
call = _RpcCall(local_id, call_uid, name, params, conn_info, send_response)
self.send(rpc_uid, call)
def postRpcResponse(self, local_id, call_uid, result):
thr = self.m_owner
response = thr.m_response_map.pop((local_id, call_uid), None)
if response is None:
logger.warning(
"Posting RPC response to nonexistent call (or duplicate response)"
)
return False
else:
response(Message.rpcResponse(local_id, call_uid, result))
return True
def start(self):
CallbackManager.start(self)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
# validated: 2019-02-26 DS 0e1f9c2ed271 cpp/Storage_load.cpp
import ast
import binascii
import base64
import re
from configparser import RawConfigParser, NoSectionError
from .value import Value
import logging
logger = logging.getLogger("nt")
PERSISTENT_SECTION = "NetworkTables Storage 3.0"
_key_bool = re.compile('boolean "(.+)"')
_key_double = re.compile('double "(.+)"')
_key_string = re.compile('string "(.+)"')
_key_raw = re.compile('raw "(.+)"')
_key_bool_array = re.compile('array boolean "(.+)"')
_key_double_array = re.compile('array double "(.+)"')
_key_string_array = re.compile('array string "(.+)"')
_value_string = re.compile(r'"((?:\\.|[^"\\])*)",?')
# TODO: these escape functions almost certainly don't deal with unicode
# correctly
# TODO: strictly speaking, this isn't 100% compatible with ntcore... but
def _unescape_string(s):
# shortcut if no escapes present
if "\\" not in s:
return s
# let python do the hard work
return ast.literal_eval('"%s"' % s)
def load_entries(fp, filename, prefix):
entries = []
parser = RawConfigParser()
parser.optionxform = str
try:
if hasattr(parser, "read_file"):
parser.read_file(fp, filename)
else:
parser.readfp(fp, filename)
except IOError:
raise
except Exception as e:
raise IOError("Error reading persistent file: %s" % e)
try:
items = parser.items(PERSISTENT_SECTION)
except NoSectionError:
raise IOError("Persistent section not found")
value = None
m = None
for k, v in items:
# Reduces code duplication
if value:
key = _unescape_string(m.group(1))
if key.startswith(prefix):
entries.append((key, value))
value = None
m = _key_bool.match(k)
if m:
if v == "true":
value = Value.makeBoolean(True)
elif v == "false":
value = Value.makeBoolean(False)
else:
logger.warning("Unrecognized boolean value %r for %s", v, m.group(1))
continue
m = _key_double.match(k)
if m:
try:
value = Value.makeDouble(float(v))
except ValueError as e:
logger.warning("Unrecognized double value %r for %s", v, m.group(1))
continue
m = _key_string.match(k)
if m:
mm = _value_string.match(v)
if mm:
value = Value.makeString(_unescape_string(mm.group(1)))
else:
logger.warning("Unrecognized string value %r for %s", v, m.group(1))
continue
m = _key_raw.match(k)
if m:
try:
v = base64.b64decode(v, validate=True)
value = Value.makeRaw(v)
except binascii.Error:
logger.warning("Unrecognized raw value %r for %s", v, m.group(1))
continue
m = _key_bool_array.match(k)
if m:
bools = []
arr = v.strip().split(",")
if arr != [""]:
for vv in arr:
vv = vv.strip()
if vv == "true":
bools.append(True)
elif vv == "false":
bools.append(False)
else:
logger.warning(
"Unrecognized bool '%s' in bool array %s'", vv, m.group(1)
)
bools = None
break
if bools is not None:
value = Value.makeBooleanArray(bools)
continue
m = _key_double_array.match(k)
if m:
doubles = []
arr = v.strip().split(",")
if arr != [""]:
for vv in arr:
try:
doubles.append(float(vv))
except ValueError:
logger.warning(
"Unrecognized double '%s' in double array %s",
vv,
m.group(1),
)
doubles = None
break
value = Value.makeDoubleArray(doubles)
continue
m = _key_string_array.match(k)
if m:
# Technically, this will let invalid inputs in... but,
# I don't really care. Feel free to fix it if you do.
strings = [_unescape_string(vv) for vv in _value_string.findall(v)]
value = Value.makeStringArray(strings)
continue
logger.warning("Unrecognized type '%s'", k)
if value:
key = _unescape_string(m.group(1))
if key.startswith(prefix):
entries.append((key, value))
return entries

View File

@ -0,0 +1,110 @@
# validated: 2018-11-27 DS a2ecb1027a62 cpp/Storage_save.cpp
# ----------------------------------------------------------------------------
# Copyright (c) FIRST 2017. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import ast
import base64
import re
from configparser import RawConfigParser
from .constants import (
NT_BOOLEAN,
NT_DOUBLE,
NT_STRING,
NT_RAW,
NT_BOOLEAN_ARRAY,
NT_DOUBLE_ARRAY,
NT_STRING_ARRAY,
)
import logging
logger = logging.getLogger("nt")
PERSISTENT_SECTION = "NetworkTables Storage 3.0"
_key_bool = re.compile('boolean "(.+)"')
_key_double = re.compile('double "(.+)"')
_key_string = re.compile('string "(.+)"')
_key_raw = re.compile('raw "(.+)"')
_key_bool_array = re.compile('array boolean "(.+)"')
_key_double_array = re.compile('array double "(.+)"')
_key_string_array = re.compile('array string "(.+)"')
_value_string = re.compile(r'"((?:\\.|[^"\\])*)",?')
# TODO: these escape functions almost certainly don't deal with unicode
# correctly
# TODO: strictly speaking, this isn't 100% compatible with ntcore... but
def _unescape_string(s):
# shortcut if no escapes present
if "\\" not in s:
return s
# let python do the hard work
return ast.literal_eval('"%s"' % s)
# This is mostly what we want... unicode strings won't work properly though
_table = {i: chr(i) if i >= 32 and i < 127 else "\\x%02x" % i for i in range(256)}
_table[ord('"')] = '\\"'
_table[ord("\\")] = "\\\\"
_table[ord("\n")] = "\\n"
_table[ord("\t")] = "\\t"
_table[ord("\r")] = "\\r"
def _escape_string(s):
return s.translate(_table)
def save_entries(fp, entries):
parser = RawConfigParser()
parser.optionxform = str
parser.add_section(PERSISTENT_SECTION)
for name, value in entries:
if not value:
continue
t = value.type
v = value.value
if t == NT_BOOLEAN:
name = 'boolean "%s"' % _escape_string(name)
vrepr = "true" if v else "false"
elif t == NT_DOUBLE:
name = 'double "%s"' % _escape_string(name)
vrepr = str(v)
elif t == NT_STRING:
name = 'string "%s"' % _escape_string(name)
vrepr = '"%s"' % _escape_string(v)
elif t == NT_RAW:
name = 'raw "%s"' % _escape_string(name)
vrepr = base64.b64encode(v).decode("ascii")
elif t == NT_BOOLEAN_ARRAY:
name = 'array boolean "%s"' % _escape_string(name)
vrepr = ",".join(["true" if vv else "false" for vv in v])
elif t == NT_DOUBLE_ARRAY:
name = 'array double "%s"' % _escape_string(name)
vrepr = ",".join([str(vv) for vv in v])
elif t == NT_STRING_ARRAY:
name = 'array string "%s"' % _escape_string(name)
vrepr = '","'.join([_escape_string(vv) for vv in v])
if vrepr:
vrepr = '"%s"' % vrepr
else:
continue
parser.set(PERSISTENT_SECTION, name, vrepr)
parser.write(fp, space_around_delimiters=False)

View File

@ -0,0 +1,59 @@
# novalidate
# fmt: off
from collections import namedtuple
#: NetworkTables Entry Information
EntryInfo = namedtuple('EntryInfo', [
# Entry name
'name',
# Entry type
'type',
# Entry flags
'flags',
# Timestamp of last change to entry (type or value).
#'last_change',
])
#: NetworkTables Connection Information
ConnectionInfo = namedtuple('ConnectionInfo', [
'remote_id',
'remote_ip',
'remote_port',
'last_update',
'protocol_version',
])
#: NetworkTables RPC Parameter Definition
RpcParamDef = namedtuple('RpcParamDef', [
'name',
'def_value',
])
#: NetworkTables RPC Result Definition
RpcResultDef = namedtuple('RpcResultDef', [
'name',
'type',
])
#: NetworkTables RPC Definition
RpcDefinition = namedtuple('RpcDefinition', [
'version',
'name',
'params',
'results',
])
#: NetworkTables RPC Call Data
RpcCallInfo = namedtuple('RpcCallInfo', [
'rpc_id',
'call_uid',
'name',
'params',
])

View File

@ -0,0 +1,31 @@
# notrack
__all__ = ["create_rlock", "sock_makefile"]
import socket
import threading
def create_rlock(name):
return threading.RLock()
def sock_makefile(s, mode):
return s.makefile(mode)
def sock_create_connection(address):
return socket.create_connection(address)
# Call this before creating any NetworkTable objects
def enable_lock_debugging(sock_block_period=None):
from . import _impl_debug
_impl_debug.sock_block_period = sock_block_period
g = globals()
g["create_rlock"] = _impl_debug.create_tracked_rlock
g["sock_makefile"] = _impl_debug.blocking_sock_makefile
g["sock_create_connection"] = _impl_debug.blocking_sock_create_connection

View File

@ -0,0 +1,152 @@
# notrack
"""
Instrumentation for finding deadlocks in networktables
"""
from __future__ import print_function
import inspect
import socket
import threading
import time
# Number of seconds to block
sock_block_period = None
# List of locks that can be acquired from the main thread
main_locks = ["entry_lock", "trans_lock"]
# List of locks that are allowed to be held when accessing a socket
# -> must never be locks that can be acquired by the main thread
sock_locks = ["client_conn_lock", "server_conn_lock", "write_lock"]
# Dictionary of locks
# key: name, value: locks that can be held when acquiring the lock
locks = {
# Never held by robot thread
"client_conn_lock": ["client_conn_lock"],
"entry_lock": ["entry_lock", "client_conn_lock"],
# Never held by robot thread
"server_conn_lock": ["server_conn_lock"],
"trans_lock": [
"entry_lock",
# Not 100% sure if this should be allowed
# -> this only happens when NetworkTable API calls are made from
# a fired listener
"server_conn_lock",
"client_conn_lock",
],
# Never held by robot thread
"write_lock": ["client_conn_lock", "server_conn_lock", "write_lock"],
}
local = threading.local()
class WrappedLock(threading._PyRLock):
def __init__(self, name):
threading._PyRLock.__init__(self)
self._name = name
self._nt_creator = _get_caller()
def acquire(self, blocking=True, timeout=-1):
# This check isn't strictly true..
if isinstance(threading.current_thread(), threading._MainThread):
assert self._name in main_locks, (
"%s cannot be held in main thread" % self._name
)
if not hasattr(local, "held_locks"):
local.held_locks = []
for lock in local.held_locks:
assert (
lock in locks[self._name]
), "Cannot hold %s when trying to acquire %s" % (lock._name, self._name)
retval = threading._PyRLock.acquire(self, blocking=blocking, timeout=timeout)
if retval != False:
local.held_locks.append(self)
__enter__ = acquire
def release(self):
threading._PyRLock.release(self)
assert local.held_locks[-1] == self
local.held_locks.pop()
# Allow this to be used in comparisons
def __eq__(self, other):
if isinstance(other, str):
return self._name.__eq__(other)
else:
return self._name.__eq__(other._name)
def __cmp__(self, other):
if isinstance(other, str):
return self._name.__cmp__(other)
else:
return self._name.__cmp__(other._name)
def __hash__(self):
return self._name.__hash__()
def create_tracked_rlock(name):
assert name in locks
return WrappedLock(name)
def assert_not_locked(t):
assert not isinstance(
threading.current_thread(), threading._MainThread
), "Should not make socket calls from main thread"
if not hasattr(local, "held_locks"):
local.held_locks = []
for lock in local.held_locks:
assert lock in sock_locks, "ERROR: network %s was made while holding %s" % (
t,
lock._name,
)
class WrappedFile:
def __init__(self, file):
self._file = file
def write(self, data):
assert_not_locked("write")
if sock_block_period:
time.sleep(sock_block_period)
return self._file.write(data)
def read(self, *args, **kwargs):
assert_not_locked("read")
if sock_block_period:
time.sleep(sock_block_period)
return self._file.read(*args, **kwargs)
def __getattr__(self, attr):
return getattr(self._file, attr)
def blocking_sock_makefile(s, mode):
return WrappedFile(s.makefile(mode))
def blocking_sock_create_connection(address):
assert_not_locked("connect")
if sock_block_period:
time.sleep(sock_block_period)
return socket.create_connection(address)
def _get_caller():
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 3)
return "%s:%s %s" % (calframe[3][1], calframe[3][2], calframe[3][3])

View File

@ -0,0 +1,45 @@
# novalidate
"""
Operations related to LEB128 encoding/decoding
The algorithm is taken from Appendix C of the DWARF 3 spec. For information
on the encodings refer to section "7.6 - Variable Length Data"
"""
import sys
def size_uleb128(value):
count = 0
while True:
value >>= 7
count += 1
if value == 0:
break
return count
def encode_uleb128(value):
out = bytearray()
while True:
byte = value & 0x7F
value >>= 7
if value != 0:
byte = byte | 0x80
out.append(byte)
if value == 0:
break
return out
def read_uleb128(rstream):
result = 0
shift = 0
while True:
b = rstream.read(1)[0]
result |= (b & 0x7F) << shift
shift += 7
if (b & 0x80) == 0:
break
return result

View File

@ -0,0 +1,10 @@
# notrack
from collections import namedtuple
Pair = namedtuple("Pair", ["first", "second"])
def ensure_id_exists(lst, msg_id, default=None):
if msg_id >= len(lst):
lst += [default] * (msg_id - len(lst) + 1)

View File

@ -0,0 +1,52 @@
import threading
import logging
logger = logging.getLogger("nt.th")
class SafeThread(object):
"""
Not exactly the same as wpiutil SafeThread... exists so we don't have
to duplicate functionality in a lot of places
"""
# Name each thread uniquely to make debugging easier
_global_indices_lock = threading.Lock()
_global_indices = {}
def __init__(self, target, name, args=()):
"""
Note: thread is automatically started and daemonized
"""
with SafeThread._global_indices_lock:
idx = SafeThread._global_indices.setdefault(name, -1) + 1
SafeThread._global_indices[name] = idx
name = "%s-%s" % (name, idx)
self.name = name
self._thread = threading.Thread(
target=self._run, name=name, args=(target, args)
)
self._thread.daemon = True
self.is_alive = self._thread.is_alive
self.join = self._thread.join
self._thread.start()
def join(self, timeout=1):
self._thread.join(timeout=timeout)
if not self._thread.is_alive():
logger.warning("Thread %s did not stop!", self.name)
def _run(self, target, args):
logger.debug("Started thread %s", self.name)
try:
target(*args)
except Exception:
logger.warning("Thread %s died unexpectedly", self.name, exc_info=True)
else:
logger.debug("Thread %s exited", self.name)

View File

@ -0,0 +1,18 @@
# novalidate
import threading
class UidVector(dict):
def __init__(self):
self.idx = 0
self.lock = threading.Lock()
def add(self, item):
"""Only use this method to add to the UidVector"""
with self.lock:
idx = self.idx
self.idx += 1
self[idx] = item
return idx

View File

@ -0,0 +1,103 @@
# novalidate
import threading
import socket
from .tcp_stream import TCPStream
import logging
logger = logging.getLogger("nt")
class TcpAcceptor(object):
def __init__(self, port, address):
# Protects open/shutdown/close
# -> This is a condition to allow testing code to wait
# for server startup
self.lock = threading.Condition()
self.m_lsd = None
self.m_port = port
self.m_address = address
self.m_listening = False
self.m_shutdown = False
def waitForStart(self, timeout=None):
with self.lock:
if not self.m_listening:
self.lock.wait(timeout=timeout)
return self.m_listening
def close(self):
with self.lock:
if self.m_lsd:
self.shutdown()
self.m_lsd.close()
self.m_lsd = None
def start(self):
with self.lock:
if self.m_listening:
return False
self.m_lsd = socket.socket()
try:
self.m_lsd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.m_lsd.bind((self.m_address, self.m_port))
# needed for testing
if self.m_port == 0:
self.m_port = self.m_lsd.getsockname()[1]
self.m_lsd.listen(10)
except OSError:
logger.exception("Error starting server")
try:
self.m_lsd.close()
except Exception:
pass
self.m_lsd = None
self.lock.notify()
return False
self.m_listening = True
self.lock.notify()
logger.debug("Listening on %s %s", self.m_address, self.m_port)
return True
def shutdown(self):
with self.lock:
if self.m_listening and not self.m_shutdown:
self.m_shutdown = True
self.m_listening = False
try:
self.m_lsd.shutdown(socket.SHUT_RDWR)
except OSError:
pass
def accept(self):
if not self.m_listening or self.m_shutdown:
return
try:
sd, (peer_ip, peer_port) = self.m_lsd.accept()
except OSError:
if not self.m_shutdown:
logger.warning("Error accepting connection", exc_info=True)
return
if self.m_shutdown:
try:
sd.close()
except Exception:
pass
return
return TCPStream(sd, peer_ip, peer_port, "server")

View File

@ -0,0 +1,73 @@
# novalidate
import socket
import threading
from .tcp_stream import TCPStream
import logging
logger = logging.getLogger("nt.net")
class TcpConnector(object):
def __init__(self, timeout, verbose):
self.cond = threading.Condition()
self.threads = {}
self.active = False
self.result = None
self.timeout = timeout
self.verbose = verbose
def setVerbose(self, verbose):
self.verbose = verbose
def connect(self, server_or_servers):
if isinstance(server_or_servers, tuple):
server, port = server_or_servers
return self._connect(server, port)
# parallel connect
# -> only connect to servers that aren't currently being connected to
with self.cond:
self.active = True
for item in server_or_servers:
if item not in self.threads:
th = threading.Thread(
target=self._thread, args=item, name="TcpConnector"
)
th.daemon = True
th.start()
self.threads[item] = th
self.cond.wait(2 * self.timeout)
self.active = False
result = self.result
self.result = None
return result
def _thread(self, server, port):
stream = self._connect(server, port)
with self.cond:
self.threads.pop((server, port), None)
if self.active and self.result is None:
self.result = stream
self.cond.notify()
def _connect(self, server, port):
try:
if self.verbose:
logger.debug("Trying connection to %s:%s", server, port)
if self.timeout is None:
sd = socket.create_connection((server, port))
else:
sd = socket.create_connection((server, port), timeout=self.timeout)
sd.settimeout(None)
return TCPStream(sd, server, port, "client")
except IOError:
if self.verbose:
logger.debug("Connection to %s:%s failed", server, port)
return

View File

@ -0,0 +1,72 @@
# novalidate
import select
import socket
import threading
class StreamEOF(IOError):
pass
class TCPStream(object):
def __init__(self, sd, peer_ip, peer_port, sock_type):
self.m_sd = sd
self.m_peerIP = peer_ip
self.m_peerPort = peer_port
self.m_rdsock = sd.makefile("rb")
self.m_wrsock = sd.makefile("wb")
self.close_lock = threading.Lock()
# Python-specific for debugging
self.sock_type = sock_type
def read(self, size):
# TODO: ntcore does a select to wait for read to be available. Necessary?
data = self.m_rdsock.read(size)
if size > 0 and len(data) != size:
raise StreamEOF("end of file")
return data
def readline(self):
return self.m_rdsock.readline()
def readStruct(self, s):
sz = s.size
data = self.m_rdsock.read(sz)
if len(data) != sz:
raise StreamEOF("end of file")
return s.unpack(data)
def send(self, contents):
self.m_wrsock.write(contents)
self.m_wrsock.flush()
def close(self):
with self.close_lock:
if self.m_sd:
try:
self.m_sd.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self.m_sd.close()
# self.m_sd = None
def getPeerIP(self):
return self.m_peerIP
def getPeerPort(self):
return self.m_peerPort
def setNoDelay(self):
self.m_sd.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def _waitForReadEvent(self, timeout):
r, _, _ = select.select((self.m_sd,), (), (), timeout)
return len(r) > 0

View File

@ -0,0 +1,119 @@
# validated: 2018-11-27 DS 175c6c1f0130 cpp/Value.cpp include/networktables/NetworkTableValue.h
"""
Internal storage for ntcore values
Uses namedtuple for efficiency, and because Value objects are supposed
to be immutable. Will have to measure that and see if there's a performance
penalty for this in python.
Original ntcore stores the last change time, but it doesn't seem to
be used anywhere, so we don't store that to make equality comparison
more efficient.
"""
from collections import namedtuple
from .constants import (
NT_BOOLEAN,
NT_DOUBLE,
NT_STRING,
NT_RAW,
NT_BOOLEAN_ARRAY,
NT_DOUBLE_ARRAY,
NT_STRING_ARRAY,
NT_RPC,
)
class Value(namedtuple("Value", ["type", "value"])):
__slots__ = ()
@classmethod
def makeBoolean(cls, value):
if value:
return cls._TRUE_VALUE
else:
return cls._FALSE_VALUE
@classmethod
def makeDouble(cls, value):
return cls(NT_DOUBLE, float(value))
@classmethod
def makeString(cls, value):
return cls(NT_STRING, str(value))
@classmethod
def makeRaw(cls, value):
return cls(NT_RAW, bytes(value))
# TODO: array stuff a good idea?
@classmethod
def makeBooleanArray(cls, value):
return cls(NT_BOOLEAN_ARRAY, tuple(bool(v) for v in value))
@classmethod
def makeDoubleArray(cls, value):
return cls(NT_DOUBLE_ARRAY, tuple(float(v) for v in value))
@classmethod
def makeStringArray(cls, value):
return cls(NT_STRING_ARRAY, tuple(str(v) for v in value))
@classmethod
def makeRpc(cls, value):
return cls(NT_RPC, str(value))
@classmethod
def getFactory(cls, value):
if isinstance(value, bool):
return cls.makeBoolean
elif isinstance(value, (int, float)):
return cls.makeDouble
elif isinstance(value, str):
return cls.makeString
elif isinstance(value, (bytes, bytearray)):
return cls.makeRaw
# Do best effort for arrays, but can't catch all cases
# .. if you run into an error here, use a less generic type
elif isinstance(value, (list, tuple)):
if not value:
raise TypeError("If you use a list here, cannot be empty")
first = value[0]
if isinstance(first, bool):
return cls.makeBooleanArray
elif isinstance(first, (int, float)):
return cls.makeDoubleArray
elif isinstance(first, str):
return cls.makeStringArray
else:
raise ValueError("Can only use lists of bool/int/float/strings")
elif value is None:
raise ValueError("Cannot put None into NetworkTable")
else:
raise ValueError(
"Can only put bool/int/float/str/bytes or lists/tuples of them"
)
@classmethod
def getFactoryByType(cls, type_id):
return cls._make_map[type_id]
# optimization
Value._TRUE_VALUE = Value(NT_BOOLEAN, True)
Value._FALSE_VALUE = Value(NT_BOOLEAN, False)
Value._make_map = {
NT_BOOLEAN: Value.makeBoolean,
NT_DOUBLE: Value.makeDouble,
NT_STRING: Value.makeString,
NT_RAW: Value.makeRaw,
NT_BOOLEAN_ARRAY: Value.makeBooleanArray,
NT_DOUBLE_ARRAY: Value.makeDoubleArray,
NT_STRING_ARRAY: Value.makeStringArray,
NT_RPC: Value.makeRpc,
}

View File

@ -0,0 +1,2 @@
# Autogenerated by setup.py
__version__ = '2021.0.0'

View File

@ -0,0 +1,239 @@
# validated: 2017-09-28 DS 5ab20bb27c97 cpp/WireDecoder.cpp cpp/WireDecoder.h cpp/WireEncoder.cpp cpp/WireEncoder.h
"""
This encompasses the WireEncoder and WireDecoder stuff in ntcore
Reading:
Writing:
Each message type will have a write function, which takes
a single list argument. Bytes will be added to that list.
The write routines assume that the messages are a tuple
that have the following format:
# This doesn't make sense
type, str, value, id, flags, seqnum
"""
import logging
import struct
from .constants import (
NT_BOOLEAN,
NT_DOUBLE,
NT_STRING,
NT_RAW,
NT_BOOLEAN_ARRAY,
NT_DOUBLE_ARRAY,
NT_STRING_ARRAY,
NT_RPC,
)
from .support import leb128
from .value import Value
logger = logging.Logger("nt.wire")
_clientHello = struct.Struct(">H")
_protoUnsup = struct.Struct(">H")
_entryAssignV2 = struct.Struct(">HH")
_entryUpdate = struct.Struct(">HH")
_serverHello = struct.Struct("b")
_entryAssignV3 = struct.Struct(">HHb")
_flagsUpdate = struct.Struct(">Hb")
_entryDelete = struct.Struct(">H")
_clearEntries = struct.Struct(">I")
_executeRpc = struct.Struct(">HH")
_rpcResponse = struct.Struct(">HH")
class WireCodec(object):
_bool_fmt = struct.Struct("?")
_double_fmt = struct.Struct(">d")
_string_fmt = struct.Struct(">H")
_array_fmt = struct.Struct("B")
_short_fmt = struct.Struct(">H")
clientHello = _clientHello
protoUnsup = _protoUnsup
entryUpdate = _entryUpdate
def __init__(self, proto_rev):
self.proto_rev = None
self.set_proto_rev(proto_rev)
def set_proto_rev(self, proto_rev):
# python-specific optimization
if self.proto_rev == proto_rev:
return
self.proto_rev = proto_rev
if proto_rev == 0x0200:
self.read_arraylen = self.read_arraylen_v2_v3
self.read_string = self.read_string_v2
self.write_arraylen = self.write_arraylen_v2_v3
self.write_string = self.write_string_v2
self.entryAssign = _entryAssignV2
self._del("serverHello")
self._del("flagsUpdate")
self._del("entryDelete")
self._del("clearEntries")
self._del("executeRpc")
self._del("rpcResponse")
elif proto_rev == 0x0300:
self.read_arraylen = self.read_arraylen_v2_v3
self.read_string = self.read_string_v3
self.write_arraylen = self.write_arraylen_v2_v3
self.write_string = self.write_string_v3
self.entryAssign = _entryAssignV3
self.serverHello = _serverHello
self.flagsUpdate = _flagsUpdate
self.entryDelete = _entryDelete
self.clearEntries = _clearEntries
self.executeRpc = _executeRpc
self.rpcResponse = _rpcResponse
else:
raise ValueError("Unsupported protocol")
def _del(self, attr):
if hasattr(self, attr):
delattr(self, attr)
def read_value(self, vtype, rstream):
if vtype == NT_BOOLEAN:
return Value.makeBoolean(rstream.readStruct(self._bool_fmt)[0])
elif vtype == NT_DOUBLE:
return Value.makeDouble(rstream.readStruct(self._double_fmt)[0])
elif vtype == NT_STRING:
return Value.makeString(self.read_string(rstream))
elif vtype == NT_BOOLEAN_ARRAY:
alen = self.read_arraylen(rstream)
return Value.makeBooleanArray(
[rstream.readStruct(self._bool_fmt)[0] for _ in range(alen)]
)
elif vtype == NT_DOUBLE_ARRAY:
alen = self.read_arraylen(rstream)
return Value.makeDoubleArray(
[rstream.readStruct(self._double_fmt)[0] for _ in range(alen)]
)
elif vtype == NT_STRING_ARRAY:
alen = self.read_arraylen(rstream)
return Value.makeStringArray(
[self.read_string(rstream) for _ in range(alen)]
)
elif self.proto_rev >= 0x0300:
if vtype == NT_RAW:
slen = leb128.read_uleb128(rstream)
return Value.makeRaw(rstream.read(slen))
elif vtype == NT_RPC:
return Value.makeRpc(self.read_string(rstream))
raise ValueError("Cannot decode value type %s" % vtype)
def write_value(self, v, out):
vtype = v.type
if vtype == NT_BOOLEAN:
out.append(self._bool_fmt.pack(v.value))
return
elif vtype == NT_DOUBLE:
out.append(self._double_fmt.pack(v.value))
return
elif vtype == NT_STRING:
self.write_string(v.value, out)
return
elif vtype == NT_BOOLEAN_ARRAY:
alen = self.write_arraylen(v.value, out)
out += (self._bool_fmt.pack(v) for v in v.value[:alen])
return
elif vtype == NT_DOUBLE_ARRAY:
alen = self.write_arraylen(v.value, out)
out += (self._double_fmt.pack(v) for v in v.value[:alen])
return
elif vtype == NT_STRING_ARRAY:
alen = self.write_arraylen(v.value, out)
for s in v.value[:alen]:
self.write_string(s, out)
return
elif self.proto_rev >= 0x0300:
if vtype == NT_RPC:
self.write_string(v.value, out)
return
elif vtype == NT_RAW:
s = v.value
out += (leb128.encode_uleb128(len(s)), s)
return
raise ValueError("Cannot encode invalid value type %s" % vtype)
#
# v2/v3 routines
#
def read_arraylen_v2_v3(self, rstream):
return rstream.readStruct(self._array_fmt)[0]
# v4 perhaps
# def read_arraylen_v3(self, rstream):
# return leb128.read_uleb128(rstream)
def read_string_v2(self, rstream):
slen = rstream.readStruct(self._string_fmt)[0]
b = rstream.read(slen)
try:
return b.decode("utf-8")
except UnicodeDecodeError:
logger.warning("Received an invalid UTF-8 string: %r", b)
return "INVALID UTF-8: %r" % b
def read_string_v3(self, rstream):
slen = leb128.read_uleb128(rstream)
b = rstream.read(slen)
try:
return b.decode("utf-8")
except UnicodeDecodeError:
logger.warning("Received an invalid UTF-8 string: %r", b)
return "INVALID UTF-8: %r" % b
def write_arraylen_v2_v3(self, a, out):
alen = min(len(a), 0xFF)
out.append(self._array_fmt.pack(alen))
return alen
# v4 perhaps
# def write_arraylen_v3(self, a, out):
# alen = len(a)
# out.append(leb128.encode_uleb128(alen))
# return alen
def write_string_v2(self, s, out):
s = s.encode("utf-8")
out += (self._string_fmt.pack(min(len(s), 0xFFFF)), s[:0xFFFF])
def write_string_v3(self, s, out):
s = s.encode("utf-8")
out += (leb128.encode_uleb128(len(s)), s)

View File

@ -0,0 +1,525 @@
from typing import Any, Callable, Sequence, TypeVar, Union
from ._impl.constants import (
NT_BOOLEAN,
NT_DOUBLE,
NT_STRING,
NT_RAW,
NT_BOOLEAN_ARRAY,
NT_DOUBLE_ARRAY,
NT_STRING_ARRAY,
NT_PERSISTENT,
)
from ._impl.value import Value
__all__ = ["NetworkTableEntry"]
D = TypeVar("D")
class NetworkTableEntry:
"""
Holds a value from NetworkTables, and changes it as new entries
come in. Do not create this object directly, use
:func:`.NetworkTablesInstance.getEntry` or :meth:`.NetworkTable.getEntry`
to obtain an instance of this class.
Using NetworkTableEntry objects to access/change NT values is more
efficient than the getX/putX methods of :class:`.NetworkTable`.
.. versionadded:: 2018.0.0
"""
__slots__ = ["__api", "_local_id", "key", "_value"]
def __init__(self, api, local_id, key):
self.__api = api
self._local_id = local_id
self.key = key
self._value = None
def getHandle(self):
"""Gets the native handle for the entry"""
return self._local_id
def exists(self) -> bool:
"""Determines if the entry currently exists"""
return self.__api.getEntryTypeById(self._local_id) != 0
def getName(self) -> str:
"""Gets the name of the entry (the key)"""
return self.key
def getType(self):
"""Gets the type of the entry
:rtype: :class:`.NetworkTablesInstance.EntryTypes`
"""
return self.__api.getEntryTypeById(self._local_id)
def getFlags(self) -> int:
"""Returns the flags.
:returns: the flags (bitmask)
"""
return self.__api.getEntryFlagsById(self._local_id)
def getInfo(self) -> tuple:
"""Gets combined information about the entry.
:returns: Entry information
:rtype: tuple of (name, type, flags)
"""
return self.__api.getEntryInfoById(self._local_id)
@property
def value(self):
"""Property to access the value of this entry, or None if the entry
hasn't been initialized yet (use setXXX or forceXXX)
"""
try:
return self._value[1]
except TypeError:
return None
# deprecated, from autoUpdateValue
def get(self):
try:
return self._value[1]
except TypeError:
return None
def getBoolean(self, defaultValue: D) -> Union[bool, D]:
"""Gets the entry's value as a boolean. If the entry does not exist or is of
different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_BOOLEAN:
return defaultValue
return value[1]
def getDouble(self, defaultValue: D) -> Union[float, D]:
"""Gets the entry's value as a double. If the entry does not exist or is of
different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_DOUBLE:
return defaultValue
return value[1]
getNumber = getDouble
def getString(self, defaultValue: D) -> Union[str, D]:
"""Gets the entry's value as a string. If the entry does not exist or is of
different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_STRING:
return defaultValue
return value[1]
def getRaw(self, defaultValue: D) -> Union[bytes, D]:
"""Gets the entry's value as a raw value (byte array). If the entry does not
exist or is of different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_RAW:
return defaultValue
return value[1]
def getBooleanArray(self, defaultValue: D) -> Union[Sequence[bool], D]:
"""Gets the entry's value as a boolean array. If the entry does not
exist or is of different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_BOOLEAN_ARRAY:
return defaultValue
return value[1]
def getDoubleArray(self, defaultValue: D) -> Union[Sequence[float], D]:
"""Gets the entry's value as a double array. If the entry does not
exist or is of different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
"""
value = self._value
if not value or value[0] != NT_DOUBLE_ARRAY:
return defaultValue
return value[1]
def getStringArray(self, defaultValue: D) -> Union[Sequence[str], D]:
"""Gets the entry's value as a string array. If the entry does not
exist or is of different type, it will return the default value.
:param defaultValue: the value to be returned if no value is found
:returns: the entry's value or the given default value
:rtype: list(float)
"""
value = self._value
if not value or value[0] != NT_STRING_ARRAY:
return defaultValue
return value[1]
@classmethod
def isValidDataType(cls, data):
if isinstance(data, (bytes, bytearray)):
return True
if isinstance(data, (list, tuple)):
if len(data) == 0:
raise ValueError("If you use a list here, cannot be empty")
data = data[0]
return isinstance(data, (int, float, str, bool))
def setDefaultValue(self, defaultValue) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
.. warning:: Do not set an empty list, it will fail
"""
value = Value.getFactory(defaultValue)(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setDefaultBoolean(self, defaultValue: bool) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeBoolean(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setDefaultDouble(self, defaultValue: float) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeDouble(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
setDefaultNumber = setDefaultDouble
def setDefaultString(self, defaultValue: str) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeString(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setDefaultRaw(self, defaultValue: bytes) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeRaw(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setDefaultBooleanArray(self, defaultValue: Sequence[bool]) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeBooleanArray(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setDefaultDoubleArray(self, defaultValue: Sequence[float]) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeDoubleArray(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
setDefaultNumberArray = setDefaultDoubleArray
def setDefaultStringArray(self, defaultValue: Sequence[str]) -> bool:
"""Sets the entry's value if it does not exist.
:param defaultValue: the default value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeStringArray(defaultValue)
return self.__api.setDefaultEntryValueById(self._local_id, value)
def setValue(self, value) -> bool:
"""Sets the entry's value
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
.. warning:: Empty lists will fail
"""
value = Value.getFactory(value)(value)
return self.__api.setEntryValueById(self._local_id, value)
def setBoolean(self, value: bool) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeBoolean(value)
return self.__api.setEntryValueById(self._local_id, value)
def setDouble(self, value: float) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeDouble(value)
return self.__api.setEntryValueById(self._local_id, value)
setNumber = setDouble
def setString(self, value: str) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeString(value)
return self.__api.setEntryValueById(self._local_id, value)
def setRaw(self, value: bytes) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeRaw(value)
return self.__api.setEntryValueById(self._local_id, value)
def setBooleanArray(self, value: Sequence[bool]) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeBooleanArray(value)
return self.__api.setEntryValueById(self._local_id, value)
def setDoubleArray(self, value: Sequence[float]) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeDoubleArray(value)
return self.__api.setEntryValueById(self._local_id, value)
setNumberArray = setDoubleArray
def setStringArray(self, value: Sequence[str]) -> bool:
"""Sets the entry's value.
:param value: the value to set
:returns: False if the entry exists with a different type
"""
value = Value.makeStringArray(value)
return self.__api.setEntryValueById(self._local_id, value)
def forceSetValue(self, value):
"""Sets the entry's value
:param value: the value that will be assigned
.. warning:: Empty lists will fail
"""
value = Value.getFactory(value)(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def forceSetBoolean(self, value: bool):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeBoolean(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def forceSetDouble(self, value: float):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeDouble(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
forceSetNumber = forceSetDouble
def forceSetString(self, value: str):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeString(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def forceSetRaw(self, value: bytes):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeRaw(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def forceSetBooleanArray(self, value: Sequence[bool]):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeBooleanArray(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def forceSetDoubleArray(self, value: Sequence[float]):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeDoubleArray(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
forceSetNumberArray = forceSetDoubleArray
def forceSetStringArray(self, value: Sequence[str]):
"""Sets the entry's value.
:param value: the value to set
"""
value = Value.makeStringArray(value)
return self.__api.setEntryTypeValueById(self._local_id, value)
def setFlags(self, flags: int) -> None:
"""Sets flags.
:param flags: the flags to set (bitmask)
"""
flags = self.getFlags() | flags
self.__api.setEntryFlagsById(self._local_id, flags)
def clearFlags(self, flags: int) -> None:
"""Clears flags
:param flags: the flags to clear (bitmask)
"""
flags = self.getFlags() & ~flags
self.__api.setEntryFlagsById(self._local_id, flags)
def setPersistent(self) -> None:
"""Make value persistent through program restarts."""
self.setFlags(NT_PERSISTENT)
def clearPersistent(self) -> None:
"""Stop making value persistent through program restarts."""
self.clearFlags(NT_PERSISTENT)
def isPersistent(self) -> bool:
"""Returns whether the value is persistent through program restarts.
:returns: True if the value is persistent.
"""
return (self.getFlags() & NT_PERSISTENT) != 0
def delete(self) -> bool:
"""Deletes the entry."""
return self.__api.deleteEntryById(self._local_id)
#
# TODO: RPC entry stuff not implemented
#
def addListener(
self,
listener: Callable[["NetworkTableEntry", str, Any, int], None],
flags: int,
paramIsNew: bool = True,
):
"""Add a listener for changes to the entry
:param listener: the listener to add
:type listener: `callable(entry, key, value, param)`
:param flags: bitmask specifying desired notifications
:type flags: :class:`.NetworkTablesInstance.NotifyFlags`
:param paramIsNew: If True, the listener fourth parameter is a boolean set to True
if the listener is being called because of a new value in the
table. Otherwise, the parameter is an integer of the raw
`NT_NOTIFY_*` flags
:returns: listener handle
"""
return self.__api.addEntryListenerByIdEx(
self, self.key, self._local_id, listener, flags, paramIsNew
)
def removeListener(self, listener_id) -> None:
"""Remove a listener from receiving entry events
:param listener: the callable that was passed to addListener
"""
self.__api.removeEntryListener(listener_id)
# Comparison operators et al
def __lt__(self, other):
raise TypeError(
"< not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __le__(self, other):
raise TypeError(
"<= not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __eq__(self, other):
raise TypeError(
"== not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __ne__(self, other):
raise TypeError(
"!= not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __gt__(self, other):
raise TypeError(
"> not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __ge__(self, other):
raise TypeError(
">= not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __bool__(self):
raise TypeError(
"< not allowed on NetworkTableEntry objects. Use the .value attribute instead"
)
def __repr__(self):
return "<NetworkTableEntry: %s>" % (self._value.__repr__(),)

View File

@ -0,0 +1,756 @@
# todo: tracks NetworkTablesInstance.java
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
from weakref import WeakSet
from ._impl import constants
from ._impl.api import NtCoreApi
from .entry import NetworkTableEntry
from .table import NetworkTable
import logging
logger = logging.getLogger("nt")
__all__ = ["NetworkTablesInstance"]
ServerPortPair = Tuple[str, int]
class NetworkTablesInstance:
"""
The object ``networktables.NetworkTables`` is a global singleton that
you can use to initialize NetworkTables connections, configure global
settings and listeners, and to create table objects which can be used
to send data to/from NetworkTable servers and clients.
First, you must initialize NetworkTables::
from networktables import NetworkTables
# As a client to connect to a robot
NetworkTables.initialize(server='roborio-XXX-frc.local')
Then, to interact with the SmartDashboard you get an instance of the
table, and you can call the various methods::
sd = NetworkTables.getTable('SmartDashboard')
sd.putNumber('someNumber', 1234)
otherNumber = sd.getNumber('otherNumber')
You can create additional NetworkTablesInstance objects.
Instances are completely independent from each other. Table operations on
one instance will not be visible to other instances unless the instances are
connected via the network. The main limitation on instances is that you
cannot have two servers on the same network port. The main utility of
instances is for unit testing, but they can also enable one program to
connect to two different NetworkTables networks.
The global "default" instance (as returned by :meth:`.NetworkTablesInstance.getDefault`) is
always available, and is intended for the common case when there is only
a single NetworkTables instance being used in the program.
Additional instances can be created with the :meth:`.create` function.
.. seealso::
- The examples in the documentation.
- :class:`.NetworkTable`
"""
class EntryTypes:
"""
NetworkTable value types used in :meth:`.NetworkTable.getKeys`
"""
#: True or False
BOOLEAN = constants.NT_BOOLEAN
#: Floating point number
DOUBLE = constants.NT_DOUBLE
#: Strings
STRING = constants.NT_STRING
#: Raw bytes
RAW = constants.NT_RAW
#: List of booleans
BOOLEAN_ARRAY = constants.NT_BOOLEAN_ARRAY
#: List of numbers
DOUBLE_ARRAY = constants.NT_DOUBLE_ARRAY
#: List of strings
STRING_ARRAY = constants.NT_STRING_ARRAY
class EntryFlags:
"""
NetworkTables entry flags
"""
#: Indicates a value that will be persisted on the server
PERSISTENT = constants.NT_PERSISTENT
class NotifyFlags:
"""
Bitflags passed to entry callbacks
"""
#: Initial listener addition
IMMEDIATE = constants.NT_NOTIFY_IMMEDIATE
#: Changed locally
LOCAL = constants.NT_NOTIFY_LOCAL
#: Newly created entry
NEW = constants.NT_NOTIFY_NEW
#: Key deleted
DELETE = constants.NT_NOTIFY_DELETE
#: Value changed
UPDATE = constants.NT_NOTIFY_UPDATE
#: Flags changed
FLAGS = constants.NT_NOTIFY_FLAGS
class NetworkModes:
"""
Bitflags returend from :meth:`.getNetworkMode`
"""
#: Not running
NONE = constants.NT_NET_MODE_NONE
#: Running in server mode
SERVER = constants.NT_NET_MODE_SERVER
#: Running in client mode
CLIENT = constants.NT_NET_MODE_CLIENT
#: Flag for starting (either client or server)
STARTING = constants.NT_NET_MODE_STARTING
#: Flag for failure (either client or server)
FAILURE = constants.NT_NET_MODE_FAILURE
#: Flag indicating in test mode
TEST = constants.NT_NET_MODE_TEST
#: The path separator for sub-tables and keys
PATH_SEPARATOR = "/"
#: The default port that network tables operates on
DEFAULT_PORT = constants.NT_DEFAULT_PORT
@classmethod
def create(cls) -> "NetworkTablesInstance":
"""Create an instance.
:returns: Newly created instance
"""
return cls()
@classmethod
def getDefault(cls) -> "NetworkTablesInstance":
"""Get global default instance."""
try:
return cls._defaultInstance
except AttributeError:
pass
cls._defaultInstance = cls()
return cls._defaultInstance
def __init__(self):
self._init()
def _init(self):
self._api = NtCoreApi(self.__createEntry)
self._tables = {}
self._entry_listeners = {}
self._conn_listeners = {}
if not hasattr(self, "_ntproperties"):
self._ntproperties = WeakSet()
else:
for ntprop in self._ntproperties:
ntprop.reset()
def __createEntry(self, key, local_id):
return NetworkTableEntry(self._api, local_id, key)
def getEntry(self, name: str) -> NetworkTableEntry:
"""Gets the entry for a key.
:param name: Absolute path of key
:returns: Network table entry.
.. versionadded:: 2018.0.0
"""
assert name.startswith("/")
return self._api.getEntry(name)
def getEntries(self, prefix: str, types: int = 0) -> Sequence[NetworkTableEntry]:
"""Get entries starting with the given prefix.
The results are optionally filtered by string prefix and entry type to
only return a subset of all entries.
:param prefix: entry name required prefix; only entries whose name
starts with this string are returned
:param types: bitmask of types; 0 is treated as a "don't care"
:returns: List of matching entries.
:rtype: list of :class:`.NetworkTableEntry`
.. versionadded:: 2018.0.0
"""
return self._api.getEntries(prefix, types)
def getEntryInfo(self, prefix: str, types: int = 0) -> Sequence:
"""Get information about entries starting with the given prefix.
The results are optionally filtered by string prefix and entry type to
only return a subset of all entries.
:param prefix: entry name required prefix; only entries whose name
starts with this string are returned
:param types: bitmask of types; 0 is treated as a "don't care"
:returns: List of entry information.
.. versionadded:: 2018.0.0
"""
return self._api.getEntryInfo(prefix, types)
def getTable(self, key: str) -> NetworkTable:
"""Gets the table with the specified key.
:param key: the key name
:returns: the network table requested
.. versionchanged:: 2018.0.0
No longer automatically initializes network tables
"""
# Must start with separator
if key.startswith(self.PATH_SEPARATOR):
path = key
else:
path = self.PATH_SEPARATOR + key
# Must not end with separator
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
table = self._tables.get(path)
if table is None:
table = NetworkTable(path, self._api, self)
table = self._tables.setdefault(path, table)
return table
def deleteAllEntries(self) -> None:
"""Deletes ALL keys in ALL subtables (except persistent values).
Use with caution!
.. versionadded:: 2018.0.0
"""
self._api.deleteAllEntries()
# Deprecated alias
globalDeleteAll = deleteAllEntries
def addEntryListener(
self,
listener: Callable[[str, Any, int], None],
immediateNotify: bool = True,
localNotify: bool = True,
paramIsNew: bool = True,
) -> None:
"""Adds a listener that will be notified when any key in any
NetworkTable is changed. The keys that are received using this
listener will be full NetworkTable keys. Most users will not
want to use this listener type.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable that has this signature: `callable(key, value, isNew)`
:param immediateNotify: If True, the listener will be called immediately with the current values of the table
:param localNotify: True if you wish to be notified of changes made locally (default is True)
:param paramIsNew: If True, the listener third parameter is a boolean set to True
if the listener is being called because of a new value in the
table. Otherwise, the parameter is an integer of the raw
`NT_NOTIFY_*` flags
.. versionadded:: 2015.2.0
.. versionchanged:: 2017.0.0
`paramIsNew` parameter added
.. versionchanged:: 2018.0.0
Renamed to addEntryListener, no longer initializes NetworkTables
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
"""
gtable = self.getTable("/")
gtable.addTableListener(
listener,
immediateNotify=immediateNotify,
key=0xDEADBEEF,
localNotify=localNotify,
)
def addEntryListenerEx(
self,
listener: Callable[[str, Any, int], None],
flags: int,
paramIsNew: bool = True,
) -> None:
"""Adds a listener that will be notified when any key in any
NetworkTable is changed. The keys that are received using this
listener will be full NetworkTable keys. Most users will not
want to use this listener type.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable that has this signature: `callable(key, value, isNew)`
:param flags: Bitmask of flags that indicate the types of notifications you wish to receive
:type flags: :class:`.NotifyFlags`
:param paramIsNew: If True, the listener third parameter is a boolean set to True
if the listener is being called because of a new value in the
table. Otherwise, the parameter is an integer of the raw
`NT_NOTIFY_*` flags
.. versionadded:: 2017.0.0
.. versionchanged:: 2018.0.0
Renamed to addEntryListenerEx, no longer initializes NetworkTables
"""
gtable = self.getTable("/")
gtable.addTableListenerEx(
listener, flags, key=0xDEADBEEF, paramIsNew=paramIsNew
)
# deprecated aliases
addGlobalListener = addEntryListener
addGlobalListenerEx = addEntryListenerEx
def removeEntryListener(self, listener: Callable[[str, Any, int], None]) -> None:
"""Remove an entry listener.
:param listener: Listener to remove
.. versionadded:: 2018.0.0
"""
self.getTable("/").removeEntryListener(listener)
# Deprecated alias
removeGlobalListener = removeEntryListener
def waitForEntryListenerQueue(self, timeout: float) -> bool:
"""Wait for the entry listener queue to be empty. This is primarily useful
for deterministic testing. This blocks until either the entry listener
queue is empty (e.g. there are no more events that need to be passed along
to callbacks or poll queues) or the timeout expires.
.. warning:: This function is not efficient, so only use it for testing!
:param timeout: timeout, in seconds. Set to 0 for non-blocking behavior,
or None to block indefinitely
:returns: False if timed out, otherwise true.
"""
return self._api.waitForEntryListenerQueue(timeout)
def addConnectionListener(self, listener: Callable, immediateNotify: bool = False):
"""Adds a listener that will be notified when a new connection to a
NetworkTables client/server is established.
The listener is called from a NetworkTables owned thread and should
return as quickly as possible.
:param listener: A function that will be called with two parameters
:type listener: fn(bool, ConnectionInfo)
:param immediateNotify: If True, the listener will be called immediately
with any active connection information
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended.
.. versionchanged:: 2017.0.0
The listener is now a function
"""
assert callable(listener)
cb = lambda info: listener(info.connected, info.conn_info)
listener_id = self._api.addConnectionListener(cb, immediateNotify)
self._conn_listeners.setdefault(listener, []).append(listener_id)
def removeConnectionListener(self, listener: Callable):
"""Removes a connection listener
:param listener: The function registered for connection notifications
"""
for listener_id in self._conn_listeners.pop(listener, []):
self._api.removeConnectionListener(listener_id)
def waitForConnectionListenerQueue(self, timeout: float) -> bool:
"""Wait for the connection listener queue to be empty. This is primarily useful
for deterministic testing. This blocks until either the connection listener
queue is empty (e.g. there are no more events that need to be passed along
to callbacks or poll queues) or the timeout expires.
:param timeout: timeout, in seconds. Set to 0 for non-blocking behavior,
or a negative value to block indefinitely
:returns: False if timed out, otherwise true.
.. versionadded:: 2018.0.0
"""
return self._api.waitForConnectionListenerQueue(timeout)
#
# Client/Server functions
#
def setNetworkIdentity(self, name: str) -> None:
"""Sets the network identity of this node. This is the name used in the
initial connection handshake, and is provided in the connection info
on the remote end.
:param name: A string to communicate to other NetworkTables instances
.. versionadded:: 2017.0.0
"""
self._api.setNetworkIdentity(name)
def getNetworkMode(self):
"""Get the current network mode
.. versionadded:: 2018.0.0
"""
return self._api.getNetworkMode()
def isServer(self) -> bool:
""":returns: True if configured in server mode"""
return (self.getNetworkMode() & self.NetworkModes.SERVER) != 0
def startServer(
self,
persistFilename: str = "networktables.ini",
listenAddress: str = "",
port: int = constants.NT_DEFAULT_PORT,
):
"""Starts a server using the specified filename, listening address, and port.
:param persistFilename: the name of the persist file to use
:param listenAddress: the address to listen on, or empty to listen on any
address
:param port: port to communicate over
.. versionadded:: 2018.0.0
"""
return self._api.startServer(persistFilename, listenAddress, port)
def stopServer(self) -> None:
"""Stops the server if it is running.
.. versionadded:: 2018.0.0
"""
self._api.stopServer()
def startClient(
self,
server_or_servers: Union[str, ServerPortPair, List[ServerPortPair], List[str]],
):
"""Sets server addresses and port for client (without restarting client).
The client will attempt to connect to each server in round robin fashion.
:param server_or_servers: a string, a tuple of (server, port), array of
(server, port), or an array of strings
.. versionadded:: 2018.0.0
"""
self.setServer(server_or_servers)
return self._api.startClient()
def startClientTeam(self, team: int, port: int = constants.NT_DEFAULT_PORT):
"""Starts a client using commonly known robot addresses for the specified
team.
:param team: team number
:param port: port to communicate over
.. versionadded:: 2018.0.0
"""
self.setServerTeam(team, port)
return self._api.startClient()
def stopClient(self) -> None:
"""Stops the client if it is running.
.. versionadded:: 2018.0.0
"""
self._api.stopClient()
def setServer(
self,
server_or_servers: Union[str, ServerPortPair, List[ServerPortPair], List[str]],
) -> None:
"""Sets server addresses and port for client (without restarting client).
The client will attempt to connect to each server in round robin fashion.
:param server_or_servers: a string, a tuple of (server, port), array of
(server, port), or an array of strings
.. versionadded:: 2018.0.0
"""
if isinstance(server_or_servers, list):
server_or_servers = [
item if isinstance(item, tuple) else (item, constants.NT_DEFAULT_PORT)
for item in server_or_servers
]
elif isinstance(server_or_servers, str):
server_or_servers = [(server_or_servers, constants.NT_DEFAULT_PORT)]
self._api.setServer(server_or_servers)
def setServerTeam(self, team: int, port: int = constants.NT_DEFAULT_PORT) -> None:
"""Sets server addresses and port for client based on the team number
(without restarting client). The client will attempt to connect to each
server in round robin fashion.
:param team: Team number
:param port: Port to communicate over
.. versionadded:: 2018.0.0
"""
self._api.setServerTeam(team, port)
def startDSClient(self, port: int = constants.NT_DEFAULT_PORT) -> None:
"""Starts requesting server address from Driver Station.
This connects to the Driver Station running on localhost to obtain the
server IP address.
:param port: server port to use in combination with IP from DS
.. versionadded:: 2018.0.0
Was formerly called setDashboardMode
"""
self._api.startDSClient(port)
setDashboardMode = startDSClient
def setUpdateRate(self, interval: float) -> None:
"""Sets the period of time between writes to the network.
WPILib's networktables and SmartDashboard default to 100ms, we have
set it to 50ms instead for quicker response time. You should not set
this value too low, as it could potentially increase the volume of
data sent over the network.
:param interval: Write flush period in seconds (default is 0.050,
or 50ms)
.. warning:: If you don't know what this setting affects, don't mess
with it!
.. versionadded:: 2017.0.0
"""
self._api.setUpdateRate(interval)
def flush(self) -> None:
"""Flushes all updated values immediately to the network.
.. note:: This is rate-limited to protect the network from flooding.
This is primarily useful for synchronizing network updates
with user code.
.. versionadded:: 2017.0.0
"""
self._api.flush()
def getConnections(self) -> Sequence:
"""Gets information on the currently established network connections.
If operating as a client, this will return either zero or one values.
:returns: list of connection information
:rtype: list
.. versionadded:: 2018.0.0
"""
return self._api.getConnections()
def getRemoteAddress(self) -> Optional[str]:
"""
Only returns a valid address if connected to the server. If
this is a server, returns None
:returns: IP address of server or None
.. versionadded:: 2015.3.2
"""
return self._api.getRemoteAddress()
def isConnected(self) -> bool:
"""
:returns: True if connected to at least one other NetworkTables
instance
"""
return self._api.getIsConnected()
def savePersistent(self, filename: str):
"""Saves persistent keys to a file. The server does this automatically.
:param filename: Name of file to save keys to
:returns: None if success, or a string describing the error on failure
.. versionadded:: 2017.0.0
"""
return self._api.savePersistent(filename)
def loadPersistent(self, filename: str):
"""Loads persistent keys from a file. WPILib will do this automatically
on a robot server.
:param filename: Name of file to load keys from
:returns: None if success, or a string describing the error on failure
.. versionadded:: 2017.0.0
"""
return self._api.loadPersistent(filename)
def saveEntries(self, filename: str, prefix: str):
"""Save table values to a file. The file format used is identical to
that used for SavePersistent.
:param filename: filename
:param prefix: save only keys starting with this prefix
:returns: None if success, or a string describing the error on failure
.. versionadded:: 2018.0.0
"""
return self._api.saveEntries(filename, prefix)
def loadEntries(self, filename: str, prefix: str):
"""Load table values from a file. The file format used is identical to
that used for SavePersistent / LoadPersistent.
:param filename: filename
:param prefix: load only keys starting with this prefix
:returns: None if success, or a string describing the error on failure
.. versionadded:: 2018.0.0
"""
return self._api.loadEntries(filename, prefix)
#
# These methods are unique to pynetworktables
#
def initialize(self, server=None):
"""Initializes NetworkTables and begins operations
:param server: If specified, NetworkTables will be set to client
mode and attempt to connect to the specified server.
This is equivalent to executing::
self.startClient(server)
:type server: str
:returns: True if initialized, False if already initialized
.. versionadded:: 2017.0.0
The *server* parameter
"""
if server is not None:
return self.startClient(server)
else:
return self.startServer()
def shutdown(self) -> None:
"""Stops all NetworkTables activities and unregisters all tables
and callbacks. You can call :meth:`.initialize` again after
calling this.
.. versionadded:: 2017.0.0
"""
self._api.stop()
self._api.destroy()
self._init()
def startTestMode(self, server: bool = True):
"""Setup network tables to run in unit test mode, and enables verbose
logging.
:returns: True if successful
.. versionadded:: 2018.0.0
"""
self.enableVerboseLogging()
return self._api.startTestMode(server)
def enableVerboseLogging(self) -> None:
"""Enable verbose logging that can be useful when trying to diagnose
NetworkTables issues.
.. warning:: Don't enable this in normal use, as it can potentially
cause performance issues due to the volume of logging.
.. versionadded:: 2017.0.0
"""
self._api.setVerboseLogging(True)
def getGlobalTable(self) -> NetworkTable:
"""Returns an object that allows you to write values to absolute
NetworkTable keys (which are paths with / separators).
.. note:: This is now an alias for ``NetworkTables.getTable('/')``
.. versionadded:: 2015.2.0
.. versionchanged:: 2017.0.0
Returns a NetworkTable instance
.. versionchanged:: 2018.0.0
No longer automatically initializes network tables
"""
return self.getTable("/")
def getGlobalAutoUpdateValue(
self, key: str, defaultValue, writeDefault: bool
) -> NetworkTableEntry:
"""Global version of getAutoUpdateValue.
:param key: the full NT path of the value (must start with /)
:param defaultValue: The default value to return if the key doesn't exist
:param writeDefault: If True, force the value to the specified default
:rtype: :class:`.NetworkTableEntry`
.. seealso:: :func:`.ntproperty` is a read-write alternative to this
.. versionadded:: 2015.3.0
.. versionchanged:: 2018.0.0
This now returns the same as :meth:`NetworkTablesInstance.getEntry`
"""
assert key.startswith("/")
entry = self.getEntry(key)
if writeDefault:
entry.forceSetValue(defaultValue)
else:
entry.setDefaultValue(defaultValue)
return entry

View File

@ -0,0 +1,809 @@
__all__ = ["NetworkTable"]
from typing import Callable, List, Optional, Sequence
from ._impl.constants import (
NT_BOOLEAN,
NT_DOUBLE,
NT_STRING,
NT_RAW,
NT_BOOLEAN_ARRAY,
NT_DOUBLE_ARRAY,
NT_STRING_ARRAY,
NT_PERSISTENT,
NT_NOTIFY_IMMEDIATE,
NT_NOTIFY_LOCAL,
NT_NOTIFY_NEW,
NT_NOTIFY_DELETE,
NT_NOTIFY_UPDATE,
NT_NOTIFY_FLAGS,
)
from ._impl.value import Value
from .entry import NetworkTableEntry
import logging
logger = logging.getLogger("nt")
_is_new = NT_NOTIFY_IMMEDIATE | NT_NOTIFY_NEW
class NetworkTable:
"""
This is a NetworkTable object, it allows you to interact with
NetworkTables in a table-based manner. You should not directly
create a NetworkTable object, but instead use
:meth:`.NetworkTables.getTable` to retrieve a NetworkTable instance.
For example, to interact with the SmartDashboard::
from networktables import NetworkTables
sd = NetworkTables.getTable('SmartDashboard')
someNumberEntry = sd.getEntry('someNumber')
someNumberEntry.putNumber(1234)
...
.. seealso::
- The examples in the documentation.
- :class:`.NetworkTablesInstance`
"""
PATH_SEPARATOR = "/"
def __init__(self, path, api, inst):
#: Path of table without trailing slash
self.path = path
self._path = path + self.PATH_SEPARATOR
self._pathsz = len(self._path)
self._api = api
self._inst = inst
self._listeners = {}
def __str__(self):
return "NetworkTable: %s" % self._path
def __repr__(self):
return "<NetworkTable path=%s>" % self._path
def getEntry(self, key: str) -> NetworkTableEntry:
"""Gets the entry for a subkey. This is the preferred API to use
to access NetworkTable keys.
:rtype: :class:`.NetworkTableEntry`
.. versionadded:: 2018.0.0
"""
return self._inst.getEntry(self._path + key)
def getPath(self) -> str:
"""Gets the full path of this table. Does not include the trailing "/".
:returns: The path (e.g "", "/foo").
"""
return self._path
def addEntryListener(
self,
listener: Callable,
immediateNotify: bool = False,
key: Optional[str] = None,
localNotify: bool = False,
) -> None:
"""Adds a listener that will be notified when any key in this
NetworkTable is changed, or when a specified key changes.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable with signature `callable(source, key, value, isNew)`
:param immediateNotify: If True, the listener will be called immediately with the current values of the table
:param key: If specified, the listener will only be called when this key is changed
:param localNotify: True if you wish to be notified of changes made locally (default is False)
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended
.. versionchanged:: 2017.0.0
Added localNotify parameter (defaults to False, which is different from NT2)
"""
flags = NT_NOTIFY_NEW | NT_NOTIFY_UPDATE
if immediateNotify:
flags |= NT_NOTIFY_IMMEDIATE
if localNotify:
flags |= NT_NOTIFY_LOCAL
self.addEntryListenerEx(listener, flags, key=key)
def addEntryListenerEx(
self,
listener: Callable,
flags: int,
key: Optional[str] = None,
paramIsNew: bool = True,
) -> None:
"""Adds a listener that will be notified when any key in this
NetworkTable is changed, or when a specified key changes.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: A callable with signature `callable(source, key, value, param)`
:param flags: Bitmask of flags that indicate the types of notifications you wish to receive
:type flags: :class:`.NotifyFlags`
:param key: If specified, the listener will only be called when this key is changed
:param paramIsNew: If True, the listener fourth parameter is a boolean set to True
if the listener is being called because of a new value in the
table. Otherwise, the parameter is an integer of the raw
`NT_NOTIFY_*` flags
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended
.. versionadded:: 2017.0.0
"""
# if key is None:
# Any key in this table (but not subtables)
if key is None:
# Any key in this table (but not subtables)
_pathsz = self._pathsz
if paramIsNew:
def callback(item):
key_, value_, flags_, _ = item
key_ = key_[_pathsz:]
if "/" not in key_:
listener(self, key_, value_.value, (flags_ & _is_new) != 0)
else:
def callback(item):
key_, value_, flags_, _ = item
key_ = key_[_pathsz:]
if "/" not in key_:
listener(self, key_, value_.value, flags_)
uid = self._api.addEntryListener(self._path, callback, flags)
# Hack: Internal flag used by addGlobalListener*
elif key == 0xDEADBEEF:
if paramIsNew:
def callback(item):
key_, value_, flags_, _ = item
listener(key_, value_.value, (flags_ & _is_new) != 0)
else:
callback = listener
uid = self._api.addEntryListener("/", callback, flags)
else:
entry_id = self._api.getEntryId(self._path + key)
uid = self._api.addEntryListenerByIdEx(
self, key, entry_id, listener, flags, paramIsNew
)
self._listeners.setdefault(listener, []).append(uid)
# deprecated aliases
addTableListener = addEntryListener
addTableListenerEx = addEntryListenerEx
def addSubTableListener(
self, listener: Callable, localNotify: bool = False
) -> None:
"""Adds a listener that will be notified when any key in a subtable of
this NetworkTable is changed.
The listener is called from the NetworkTables I/O thread, and should
return as quickly as possible.
:param listener: Callable to call when previously unseen table appears.
Function signature is `callable(source, key, subtable, True)`
:param localNotify: True if you wish to be notified when local changes
result in a new table
.. warning:: You may call the NetworkTables API from within the
listener, but it is not recommended as we are not
currently sure if deadlocks will occur
.. versionchanged:: 2017.0.0
Added localNotify parameter
"""
notified_tables = {}
def _callback(item):
key, value_, _1, _2 = item
key = key[self._pathsz :]
if "/" in key:
skey = key[: key.index("/")]
o = object()
if notified_tables.setdefault(skey, o) is o:
try:
listener(self, skey, self.getSubTable(skey), True)
except Exception:
logger.warning(
"Unhandled exception in %s", listener, exc_info=True
)
flags = NT_NOTIFY_NEW | NT_NOTIFY_IMMEDIATE
if localNotify:
flags |= NT_NOTIFY_LOCAL
uid = self._api.addEntryListener(self._path, _callback, flags)
self._listeners.setdefault(listener, []).append(uid)
def removeEntryListener(self, listener: Callable) -> None:
"""Removes a table listener
:param listener: callable that was passed to :meth:`.addTableListener`
or :meth:`.addSubTableListener`
"""
uids = self._listeners.pop(listener, [])
for uid in uids:
self._api.removeEntryListener(uid)
# Deprecated alias
removeTableListener = removeEntryListener
def getSubTable(self, key: str) -> "NetworkTable":
"""Returns the table at the specified key. If there is no table at the
specified key, it will create a new table
:param key: the key name
:returns: the networktable to be returned
"""
path = self._path + key
return self._inst.getTable(path)
def containsKey(self, key: str) -> bool:
"""Determines whether the given key is in this table.
:param key: the key to search for
:returns: True if the table as a value assigned to the given key
"""
path = self._path + key
return self._api.getEntryValue(path) is not None
def __contains__(self, key: str) -> bool:
return self.containsKey(key)
def containsSubTable(self, key: str) -> bool:
"""Determines whether there exists a non-empty subtable for this key
in this table.
:param key: the key to search for (must not end with path separator)
:returns: True if there is a subtable with the key which contains at least
one key/subtable of its own
"""
path = self._path + key + self.PATH_SEPARATOR
return len(self._api.getEntryInfo(path, 0)) > 0
def getKeys(self, types: int = 0) -> List[str]:
"""
:param types: bitmask of types; 0 is treated as a "don't care".
:type types: :class:`.EntryTypes`
:returns: keys currently in the table
.. versionadded:: 2017.0.0
"""
keys = []
for entry in self._api.getEntryInfo(self._path, types):
relative_key = entry.name[len(self._path) :]
if self.PATH_SEPARATOR in relative_key:
continue
keys.append(relative_key)
return keys
def getSubTables(self) -> List[str]:
""":returns: subtables currently in the table
.. versionadded:: 2017.0.0
"""
keys = set()
for entry in self._api.getEntryInfo(self._path, 0):
relative_key = entry.name[len(self._path) :]
subst = relative_key.split(self.PATH_SEPARATOR)
if len(subst) == 1:
continue
keys.add(subst[0])
return list(keys)
def setPersistent(self, key: str) -> None:
"""Makes a key's value persistent through program restarts.
:param key: the key to make persistent
.. versionadded:: 2017.0.0
"""
self.setFlags(key, NT_PERSISTENT)
def clearPersistent(self, key: str) -> None:
"""Stop making a key's value persistent through program restarts.
:param key: the key name
.. versionadded:: 2017.0.0
"""
self.clearFlags(key, NT_PERSISTENT)
def isPersistent(self, key: str) -> bool:
"""Returns whether the value is persistent through program restarts.
:param key: the key name
.. versionadded:: 2017.0.0
"""
return self.getFlags(key) & NT_PERSISTENT != 0
def delete(self, key: str) -> None:
"""Deletes the specified key in this table.
:param key: the key name
.. versionadded:: 2017.0.0
"""
path = self._path + key
self._api.deleteEntry(path)
def setFlags(self, key: str, flags: int) -> None:
"""Sets entry flags on the specified key in this table.
:param key: the key name
:param flags: the flags to set (bitmask)
:type flags: :class:`.EntryFlags`
.. versionadded:: 2017.0.0
"""
path = self._path + key
self._api.setEntryFlags(path, self._api.getEntryFlags(path) | flags)
def clearFlags(self, key: str, flags: int) -> None:
"""Clears entry flags on the specified key in this table.
:param key: the key name
:param flags: the flags to clear (bitmask)
:type flags: :class:`.EntryFlags`
.. versionadded:: 2017.0.0
"""
path = self._path + key
self._api.setEntryFlags(path, self._api.getEntryFlags(path) & ~flags)
def getFlags(self, key: str):
"""Returns the entry flags for the specified key.
:param key: the key name
:returns: the flags, or 0 if the key is not defined
:rtype: :class:`.EntryFlags`
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.getEntryFlags(path)
def putNumber(self, key: str, value: float) -> bool:
"""Put a number in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeDouble(value))
def setDefaultNumber(self, key: str, defaultValue: float) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:type defaultValue: int, float
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeDouble(defaultValue))
def getNumber(self, key: str, defaultValue: float) -> float:
"""Gets the number associated with the given name.
:param key: the key to look up
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_DOUBLE:
return defaultValue
return value.value
def putString(self, key: str, value: str) -> bool:
"""Put a string in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeString(value))
def setDefaultString(self, key: str, defaultValue: str) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeString(defaultValue))
def getString(self, key: str, defaultValue: str) -> str:
"""Gets the string associated with the given name. If the key does not
exist or is of different type, it will return the default value.
:param key: the key to look up
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_STRING:
return defaultValue
return value.value
def putBoolean(self, key: str, value: bool) -> bool:
"""Put a boolean in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeBoolean(value))
def setDefaultBoolean(self, key: str, defaultValue: bool) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeBoolean(defaultValue))
def getBoolean(self, key: str, defaultValue: bool) -> bool:
"""Gets the boolean associated with the given name. If the key does not
exist or is of different type, it will return the default value.
:param key: the key name
:param defaultValue: the default value if no value is found
:returns: the key
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_BOOLEAN:
return defaultValue
return value.value
def putBooleanArray(self, key: str, value: Sequence[bool]) -> bool:
"""Put a boolean array in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeBooleanArray(value))
def setDefaultBooleanArray(self, key: str, defaultValue: Sequence[bool]) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(
path, Value.makeBooleanArray(defaultValue)
)
def getBooleanArray(self, key: str, defaultValue) -> Sequence[bool]:
"""Returns the boolean array the key maps to. If the key does not exist or is
of different type, it will return the default value.
:param key: the key to look up
:type key: str
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
.. versionadded:: 2017.0.0
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_BOOLEAN_ARRAY:
return defaultValue
return value.value
def putNumberArray(self, key: str, value: Sequence[float]) -> bool:
"""Put a number array in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeDoubleArray(value))
def setDefaultNumberArray(self, key: str, defaultValue: Sequence[float]) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeDoubleArray(defaultValue))
def getNumberArray(self, key: str, defaultValue) -> Sequence[float]:
"""Returns the number array the key maps to. If the key does not exist or is
of different type, it will return the default value.
:param key: the key to look up
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
.. versionadded:: 2017.0.0
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_DOUBLE_ARRAY:
return defaultValue
return value.value
def putStringArray(self, key: str, value: Sequence[str]) -> bool:
"""Put a string array in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeStringArray(value))
def setDefaultStringArray(self, key: str, defaultValue: Sequence[str]) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeStringArray(defaultValue))
def getStringArray(self, key: str, defaultValue) -> Sequence[str]:
"""Returns the string array the key maps to. If the key does not exist or is
of different type, it will return the default value.
:param key: the key to look up
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
.. versionadded:: 2017.0.0
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_STRING_ARRAY:
return defaultValue
return value.value
def putRaw(self, key: str, value: bytes) -> bool:
"""Put a raw value (byte array) in the table
:param key: the key to be assigned to
:param value: the value that will be assigned
:returns: False if the table key already exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setEntryValue(path, Value.makeRaw(value))
def setDefaultRaw(self, key: str, defaultValue: bytes) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
"""
path = self._path + key
return self._api.setDefaultEntryValue(path, Value.makeRaw(defaultValue))
def getRaw(self, key: str, defaultValue: bytes) -> bytes:
"""Returns the raw value (byte array) the key maps to. If the key does not
exist or is of different type, it will return the default value.
:param key: the key to look up
:param defaultValue: the value to be returned if no value is found
:returns: the value associated with the given key or the given default value
if there is no value associated with the key
.. versionadded:: 2017.0.0
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value or value.type != NT_RAW:
return defaultValue
return value.value
def putValue(self, key: str, value) -> bool:
"""Put a value in the table, trying to autodetect the NT type of
the value. Refer to this table to determine the type mapping:
======= ============================ =================================
PyType NT Type Notes
======= ============================ =================================
bool :attr:`.EntryTypes.BOOLEAN`
int :attr:`.EntryTypes.DOUBLE`
float :attr:`.EntryTypes.DOUBLE`
str :attr:`.EntryTypes.STRING`
bytes :attr:`.EntryTypes.RAW`
list Error Use `putXXXArray` methods instead
tuple Error Use `putXXXArray` methods instead
======= ============================ =================================
:param key: the key to be assigned to
:param value: the value that will be assigned
:type value: bool, int, float, str, bytes
:returns: False if the table key already exists with a different type
.. versionadded:: 2017.0.0
"""
value = Value.getFactory(value)(value)
path = self._path + key
return self._api.setEntryValue(path, value)
def setDefaultValue(self, key: str, defaultValue) -> bool:
"""If the key doesn't currently exist, then the specified value will
be assigned to the key.
:param key: the key to be assigned to
:param defaultValue: the default value to set if key doesn't exist.
:type defaultValue: bool, int, float, str, bytes
:returns: False if the table key exists with a different type
.. versionadded:: 2017.0.0
.. seealso:: :meth:`.putValue`
"""
defaultValue = Value.getFactory(defaultValue)(defaultValue)
path = self._path + key
return self._api.setDefaultEntryValue(path, defaultValue)
def getValue(self, key: str, defaultValue):
"""Gets the value associated with a key. This supports all
NetworkTables types (unlike :meth:`putValue`).
:param key: the key of the value to look up
:param defaultValue: The default value to return if the key doesn't exist
:type defaultValue: any
:returns: the value associated with the given key
:rtype: bool, int, float, str, bytes, tuple
.. versionadded:: 2017.0.0
"""
path = self._path + key
value = self._api.getEntryValue(path)
if not value:
return defaultValue
return value.value
def getAutoUpdateValue(
self, key: str, defaultValue, writeDefault: bool = True
) -> NetworkTableEntry:
"""Returns an object that will be automatically updated when the
value is updated by networktables.
:param key: the key name
:param defaultValue: Default value to use if not in the table
:type defaultValue: any
:param writeDefault: If True, put the default value to the table,
overwriting existing values
:rtype: :class:`.NetworkTableEntry`
.. note:: If you modify the returned value, the value will NOT
be written back to NetworkTables (though now there are functions
you can use to write values). See :func:`.ntproperty` if
you're looking for that sort of thing.
.. seealso:: :func:`.ntproperty` is a better alternative to use
.. versionadded:: 2015.1.3
.. versionchanged:: 2018.0.0
This now returns the same as :meth:`.NetworkTable.getEntry`
"""
return self._inst.getGlobalAutoUpdateValue(
self._path + key, defaultValue, writeDefault
)

View File

@ -0,0 +1,59 @@
Original Apriltag code is:
(C) 2013-2015, The Regents of The University of Michigan
All rights reserved.
This software may be available under alternative licensing
terms. Contact Edwin Olson, ebolson@umich.edu, for more information.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
---
All other code is (C) 2015-2018, Matt Zucker
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.

View File

@ -0,0 +1,66 @@
Metadata-Version: 2.1
Name: apriltag
Version: 0.0.16
Summary: apriltag marker detection
Author: Bernd Winkler
Author-email: bernd.winkler@ipa.fraunhofer.de
Description-Content-Type: text/markdown
License-File: LICENSE.md
Apriltag
--------
apriltag marker detection
based on <https://github.com/swatbotics/apriltag>
Dependencies
------------
- OpenCV (optional)
Example
-------
```python
import apriltag
import cv2
img = cv2.imread('apriltag_foto.jpg'.cv2.IMREAD_GRAYSCALE)
detector = apriltag.Detector()
result = detector.detect(img)
```
result is in the form of
```
[DetectionBase(tag_family='tag36h11', tag_id=2, hamming=0, goodness=0.0, decision_margin=98.58241271972656, homography=array([[ -1.41302664e-01, 1.08428082e+00, 1.67512900e+01],
[ -8.75899366e-01, 1.50245469e-01, 1.70532040e+00],
[ -4.89183533e-04, 2.12210247e-03, 6.02052342e-02]]), center=array([ 278.23643912, 28.32511859]), corners=array([[ 269.8939209 , 41.50381088],
[ 269.57183838, 11.79248142],
[ 286.1383667 , 15.84242821],
[ 286.18066406, 43.48323059]])),
DetectionBase(tag_family='tag36h11', ... etc
```
Multiple options can be specified to tune the detection performance:
--------------------------------------------------------------------
```python
options = apriltag.Detectoroptions(families='tag36h11',
border=1,
nthreads=4,
quad_decimate=1.0,
quad_blur=0.0,
refine_edges=True,
refine_decode=False,
refine_pose=False,
debug=False,
quad_contours=True)
detector = apriltag.Detector()
result = detector.detect(img)
```
Access values
--------
```python
tf = result[0].tag_family
cx = result[0].center[0]
.

View File

@ -0,0 +1,10 @@
__pycache__/apriltag.cpython-311.pyc,,
apriltag-0.0.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
apriltag-0.0.16.dist-info/LICENSE.md,sha256=vScmN1oUqOlrYfA6OQi4BPSKS7vYFdX23xJkFN88TKw,3024
apriltag-0.0.16.dist-info/METADATA,sha256=LNMbQAG6EvRMoYZtdfVTcJWs4mBhx4I6_X_yRuXAVEE,1931
apriltag-0.0.16.dist-info/RECORD,,
apriltag-0.0.16.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apriltag-0.0.16.dist-info/WHEEL,sha256=aB07TJDeTnMJePatzhX6zEA-mPQC-rHQWYqeQNVkxCo,106
apriltag-0.0.16.dist-info/top_level.txt,sha256=HvQS1lv6LFbX4vkt13wvInyaXX1xKAtisEadue8pnEo,19
apriltag.py,sha256=MQRYPsn22UVf_mTNd61RyN2aRTo-1rtaC5XHhUWlO4c,21199
libapriltag.so,sha256=0uZVdriCjyC8jWItht9Moi7rIfG-0mMAt_caSJeAT2g,1004152

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.43.0)
Root-Is-Purelib: false
Tag: cp311-cp311-linux_aarch64

View File

@ -0,0 +1,2 @@
apriltag
apriltags

View File

@ -0,0 +1,679 @@
#!/usr/bin/env python
"""Python wrapper for C version of apriltags. This program creates two
classes that are used to detect apriltags and extract information from
them. Using this module, you can identify all apriltags visible in an
image, and get information about the location and orientation of the
tags.
Original author: Isaac Dulin, Spring 2016
Updates: Matt Zucker, Fall 2016
"""
from __future__ import division
from __future__ import print_function
import ctypes
import collections
import os
import re
import numpy
_HAVE_CV2 = False
if __name__ == '__main__':
try:
import cv2
_HAVE_CV2 = True
except:
pass
######################################################################
# pylint: disable=R0903
class _ImageU8(ctypes.Structure):
'''Wraps image_u8 C struct.'''
_fields_ = [
('width', ctypes.c_int),
('height', ctypes.c_int),
('stride', ctypes.c_int),
('buf', ctypes.POINTER(ctypes.c_uint8))
]
class _Matd(ctypes.Structure):
'''Wraps matd C struct.'''
_fields_ = [
('nrows', ctypes.c_int),
('ncols', ctypes.c_int),
('data', ctypes.c_double*1),
]
class _ZArray(ctypes.Structure):
'''Wraps zarray C struct.'''
_fields_ = [
('el_sz', ctypes.c_size_t),
('size', ctypes.c_int),
('alloc', ctypes.c_int),
('data', ctypes.c_void_p)
]
class _ApriltagFamily(ctypes.Structure):
'''Wraps apriltag_family C struct.'''
_fields_ = [
('ncodes', ctypes.c_int32),
('codes', ctypes.POINTER(ctypes.c_int64)),
('black_border', ctypes.c_int32),
('d', ctypes.c_int32),
('h', ctypes.c_int32),
('name', ctypes.c_char_p),
]
class _ApriltagDetection(ctypes.Structure):
'''Wraps apriltag_detection C struct.'''
_fields_ = [
('family', ctypes.POINTER(_ApriltagFamily)),
('id', ctypes.c_int),
('hamming', ctypes.c_int),
('goodness', ctypes.c_float),
('decision_margin', ctypes.c_float),
('H', ctypes.POINTER(_Matd)),
('c', ctypes.c_double*2),
('p', (ctypes.c_double*2)*4)
]
class _ApriltagDetector(ctypes.Structure):
'''Wraps apriltag_detector C struct.'''
_fields_ = [
('nthreads', ctypes.c_int),
('quad_decimate', ctypes.c_float),
('quad_sigma', ctypes.c_float),
('refine_edges', ctypes.c_int),
('refine_decode', ctypes.c_int),
('refine_pose', ctypes.c_int),
('debug', ctypes.c_int),
('quad_contours', ctypes.c_int),
]
######################################################################
def _ptr_to_array2d(datatype, ptr, rows, cols):
array_type = (datatype*cols)*rows
array_buf = array_type.from_address(ctypes.addressof(ptr))
return numpy.ctypeslib.as_array(array_buf, shape=(rows, cols))
def _image_u8_get_array(img_ptr):
return _ptr_to_array2d(ctypes.c_uint8,
img_ptr.contents.buf.contents,
img_ptr.contents.height,
img_ptr.contents.stride)
def _matd_get_array(mat_ptr):
return _ptr_to_array2d(ctypes.c_double,
mat_ptr.contents.data,
int(mat_ptr.contents.nrows),
int(mat_ptr.contents.ncols))
######################################################################
DetectionBase = collections.namedtuple(
'DetectionBase',
'tag_family, tag_id, hamming, goodness, decision_margin, '
'homography, center, corners')
class Detection(DetectionBase):
'''Pythonic wrapper for apriltag_detection which derives from named
tuple class.
'''
_print_fields = [
'Family', 'ID', 'Hamming error', 'Goodness',
'Decision margin', 'Homography', 'Center', 'Corners'
]
_max_len = max(len(field) for field in _print_fields)
def tostring(self, values=None, indent=0):
'''Converts this object to a string with the given level of indentation.'''
rval = []
indent_str = ' '*(self._max_len+2+indent)
if not values:
values = collections.OrderedDict(zip(self._print_fields, self))
for label in values:
value_str = str(values[label])
if value_str.find('\n') > 0:
value_str = value_str.split('\n')
value_str = [value_str[0]] + [indent_str+v for v in value_str[1:]]
value_str = '\n'.join(value_str)
rval.append('{:>{}s}: {}'.format(
label, self._max_len+indent, value_str))
return '\n'.join(rval)
def __str__(self):
return self.tostring().encode('ascii')
######################################################################
class DetectorOptions(object):
'''Convience wrapper for object to pass into Detector
initializer. You can also pass in the output of an
argparse.ArgumentParser on which you have called add_arguments.
'''
# pylint: disable=R0902
# pylint: disable=R0913
def __init__(self,
families='tag36h11',
border=1,
nthreads=4,
quad_decimate=1.0,
quad_blur=0.0,
refine_edges=True,
refine_decode=False,
refine_pose=False,
debug=False,
quad_contours=True):
self.families = families
self.border = int(border)
self.nthreads = int(nthreads)
self.quad_decimate = float(quad_decimate)
self.quad_sigma = float(quad_blur)
self.refine_edges = int(refine_edges)
self.refine_decode = int(refine_decode)
self.refine_pose = int(refine_pose)
self.debug = int(debug)
self.quad_contours = quad_contours
######################################################################
def add_arguments(parser):
'''Add arguments to the given argparse.ArgumentParser object to enable
passing in the resulting parsed arguments into the initializer for
Detector.
'''
defaults = DetectorOptions()
show_default = ' (default %(default)s)'
parser.add_argument('-f', metavar='FAMILIES',
dest='families', default=defaults.families,
help='Tag families' + show_default)
parser.add_argument('-B', metavar='N',
dest='border', type=int, default=defaults.border,
help='Tag border size in pixels' + show_default)
parser.add_argument('-t', metavar='N',
dest='nthreads', type=int, default=defaults.nthreads,
help='Number of threads' + show_default)
parser.add_argument('-x', metavar='SCALE',
dest='quad_decimate', type=float,
default=defaults.quad_decimate,
help='Quad decimation factor' + show_default)
parser.add_argument('-b', metavar='SIGMA',
dest='quad_sigma', type=float, default=defaults.quad_sigma,
help='Apply low-pass blur to input' + show_default)
parser.add_argument('-0', dest='refine_edges', default=True,
action='store_false',
help='Spend less time trying to align edges of tags')
parser.add_argument('-1', dest='refine_decode', default=False,
action='store_true',
help='Spend more time trying to decode tags')
parser.add_argument('-2', dest='refine_pose', default=False,
action='store_true',
help='Spend more time trying to precisely localize tags')
parser.add_argument('-c', dest='quad_contours', default=False,
action='store_true',
help='Use new contour-based quad detection')
######################################################################
class Detector(object):
'''Pythonic wrapper for apriltag_detector. Initialize by passing in
the output of an argparse.ArgumentParser on which you have called
add_arguments; or an instance of the DetectorOptions class. You can
also optionally pass in a list of paths to search for the C dynamic
library used by ctypes.
'''
def __init__(self, options=None, searchpath=[]):
if options is None:
options = DetectorOptions()
self.options = options
# detect OS to get extension for DLL
uname0 = os.uname()[0]
if uname0 == 'Darwin':
extension = '.dylib'
else:
extension = '.so' # TODO test on windows?
filename = 'libapriltag'+extension
self.libc = None
self.tag_detector = None
for path in searchpath:
relpath = os.path.join(path, filename)
if os.path.exists(relpath):
self.libc = ctypes.CDLL(relpath)
break
if self.libc is None:
fpath_installed = os.path.dirname(__file__) + "/" + filename
self.libc = ctypes.CDLL(fpath_installed)
# if full path not found just try opening the raw filename;
# this should search whatever paths dlopen is supposed to
# search.
if self.libc is None:
self.libc = ctypes.CDLL(filename)
if self.libc is None:
raise RuntimeError('could not find DLL named ' + filename)
# declare return types of libc function
self._declare_return_types()
# create the c-_apriltag_detector object
self.tag_detector = self.libc.apriltag_detector_create()
self.tag_detector.contents.nthreads = int(options.nthreads)
self.tag_detector.contents.quad_decimate = float(options.quad_decimate)
self.tag_detector.contents.quad_sigma = float(options.quad_sigma)
self.tag_detector.refine_edges = int(options.refine_edges)
self.tag_detector.refine_decode = int(options.refine_decode)
self.tag_detector.refine_pose = int(options.refine_pose)
if options.quad_contours:
self.libc.apriltag_detector_enable_quad_contours(self.tag_detector, 1)
self.families = []
flist = self.libc.apriltag_family_list()
for i in range(flist.contents.size):
ptr = ctypes.c_char_p()
self.libc.zarray_get(flist, i, ctypes.byref(ptr))
self.families.append(ctypes.string_at(ptr))
self.libc.apriltag_family_list_destroy(flist)
if options.families == 'all':
families_list = self.families
elif isinstance(options.families, list):
families_list = options.families
else:
families_list = [n for n in re.split(r'\W+', options.families) if n]
# add tags
for family in families_list:
self.add_tag_family(family)
def __del__(self):
if self.tag_detector is not None:
self.libc.apriltag_detector_destroy(self.tag_detector)
def detect(self, img, return_image=False):
'''Run detectons on the provided image. The image must be a grayscale
image of type numpy.uint8.'''
assert len(img.shape) == 2
assert img.dtype == numpy.uint8
c_img = self._convert_image(img)
return_info = []
#detect apriltags in the image
detections = self.libc.apriltag_detector_detect(self.tag_detector, c_img)
apriltag = ctypes.POINTER(_ApriltagDetection)()
for i in range(0, detections.contents.size):
#extract the data for each apriltag that was identified
self.libc.zarray_get(detections, i, ctypes.byref(apriltag))
tag = apriltag.contents
homography = _matd_get_array(tag.H).copy()
center = numpy.ctypeslib.as_array(tag.c, shape=(2,)).copy()
corners = numpy.ctypeslib.as_array(tag.p, shape=(4, 2)).copy()
detection = Detection(
ctypes.string_at(tag.family.contents.name),
tag.id,
tag.hamming,
tag.goodness,
tag.decision_margin,
homography,
center,
corners)
#Append this dict to the tag data array
return_info.append(detection)
self.libc.image_u8_destroy(c_img)
if return_image:
dimg = self._vis_detections(img.shape, detections)
rval = return_info, dimg
else:
rval = return_info
self.libc.apriltag_detections_destroy(detections)
return rval
def add_tag_family(self, name):
'''Add a single tag family to this detector.'''
family = self.libc.apriltag_family_create(name.encode('ascii'))
if family:
family.contents.border = self.options.border
self.libc.apriltag_detector_add_family(self.tag_detector, family)
else:
print('Unrecognized tag family name. Try e.g. tag36h11')
def detection_pose(self, detection, camera_params, tag_size=1, z_sign=1):
fx, fy, cx, cy = [ ctypes.c_double(c) for c in camera_params ]
H = self.libc.matd_create(3, 3)
arr = _matd_get_array(H)
arr[:] = detection.homography
corners = detection.corners.flatten().astype(numpy.float64)
dptr = ctypes.POINTER(ctypes.c_double)
corners = corners.ctypes.data_as(dptr)
init_error = ctypes.c_double(0)
final_error = ctypes.c_double(0)
Mptr = self.libc.pose_from_homography(H, fx, fy, cx, cy,
ctypes.c_double(tag_size),
ctypes.c_double(z_sign),
corners,
dptr(init_error),
dptr(final_error))
M = _matd_get_array(Mptr).copy()
self.libc.matd_destroy(H)
self.libc.matd_destroy(Mptr)
return M, init_error.value, final_error.value
def _vis_detections(self, shape, detections):
height, width = shape
c_dimg = self.libc.image_u8_create(width, height)
self.libc.apriltag_vis_detections(detections, c_dimg)
tmp = _image_u8_get_array(c_dimg)
rval = tmp[:, :width].copy()
self.libc.image_u8_destroy(c_dimg)
return rval
def _declare_return_types(self):
self.libc.apriltag_detector_create.restype = ctypes.POINTER(_ApriltagDetector)
self.libc.apriltag_family_create.restype = ctypes.POINTER(_ApriltagFamily)
self.libc.apriltag_detector_detect.restype = ctypes.POINTER(_ZArray)
self.libc.image_u8_create.restype = ctypes.POINTER(_ImageU8)
self.libc.image_u8_write_pnm.restype = ctypes.c_int
self.libc.apriltag_family_list.restype = ctypes.POINTER(_ZArray)
self.libc.apriltag_vis_detections.restype = None
self.libc.pose_from_homography.restype = ctypes.POINTER(_Matd)
self.libc.matd_create.restype = ctypes.POINTER(_Matd)
def _convert_image(self, img):
height = img.shape[0]
width = img.shape[1]
c_img = self.libc.image_u8_create(width, height)
tmp = _image_u8_get_array(c_img)
# copy the opencv image into the destination array, accounting for the
# difference between stride & width.
tmp[:, :width] = img
# tmp goes out of scope here but we don't care because
# the underlying data is still in c_img.
return c_img
######################################################################
def _get_demo_searchpath():
return [
os.path.join(os.path.dirname(__file__), '../build/lib'),
os.path.join(os.getcwd(), '../build/lib')
]
######################################################################
def _camera_params(pstr):
pstr = pstr.strip()
if pstr[0] == '(' and pstr[-1] == ')':
pstr = pstr[1:-1]
params = tuple( [ float(param.strip()) for param in pstr.split(',') ] )
assert( len(params) == 4)
return params
######################################################################
def _draw_pose(overlay, camera_params, tag_size, pose, z_sign=1):
opoints = numpy.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2*z_sign,
1, -1, -2*z_sign,
1, 1, -2*z_sign,
-1, 1, -2*z_sign,
]).reshape(-1, 1, 3) * 0.5*tag_size
edges = numpy.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
fx, fy, cx, cy = camera_params
K = numpy.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
rvec, _ = cv2.Rodrigues(pose[:3,:3])
tvec = pose[:3, 3]
dcoeffs = numpy.zeros(5)
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = numpy.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
for i, j in edges:
cv2.line(overlay, ipoints[i], ipoints[j], (0, 255, 0), 1, 16)
######################################################################
def main():
'''Test function for this Python wrapper.'''
from argparse import ArgumentParser
# for some reason pylint complains about members being undefined :(
# pylint: disable=E1101
parser = ArgumentParser(
description='test apriltag Python bindings')
parser.add_argument('filenames', metavar='IMAGE', nargs='+',
help='files to scan')
parser.add_argument('-n', '--no-gui', action='store_true',
help='suppress OpenCV gui')
parser.add_argument('-d', '--debug-images', action='store_true',
help='output debug detection image')
parser.add_argument('-k', '--camera-params', type=_camera_params,
default=None,
help='intrinsic parameters for camera (in the form fx,fy,cx,cy)')
parser.add_argument('-s', '--tag-size', type=float,
default=1.0,
help='tag size in user-specified units (default=1.0)')
add_arguments(parser)
options = parser.parse_args()
# set up a reasonable search path for the apriltag DLL inside the
# github repo this file lives in;
#
# for "real" deployments, either install the DLL in the appropriate
# system-wide library directory, or specify your own search paths
# as needed.
det = Detector(options, searchpath=_get_demo_searchpath())
use_gui = not options.no_gui
if use_gui and not _HAVE_CV2:
use_gui = False
print('suppressing GUI because cv2 module not found')
if not _HAVE_CV2:
from PIL import Image
for filename in options.filenames:
if _HAVE_CV2:
orig = cv2.imread(filename)
if len(orig.shape) == 3:
gray = cv2.cvtColor(orig, cv2.COLOR_RGB2GRAY)
else:
gray = orig
else:
pil_image = Image.open(filename)
orig = numpy.array(pil_image)
gray = numpy.array(pil_image.convert('L'))
detections, dimg = det.detect(gray, return_image=True)
if len(orig.shape) == 3:
overlay = orig // 2 + dimg[:, :, None] // 2
else:
overlay = gray // 2 + dimg // 2
num_detections = len(detections)
print('Detected {} tags in {}\n'.format(
num_detections, os.path.split(filename)[1]))
for i, detection in enumerate(detections):
print( 'Detection {} of {}:'.format(i+1, num_detections))
print()
print(detection.tostring(indent=2))
if options.camera_params is not None:
pose, e0, e1 = det.detection_pose(detection,
options.camera_params,
options.tag_size)
if _HAVE_CV2:
_draw_pose(overlay,
options.camera_params,
options.tag_size,
pose)
print(detection.tostring(
collections.OrderedDict([('Pose',pose),
('InitError', e0),
('FinalError', e1)]),
indent=2))
print()
if options.debug_images:
if _HAVE_CV2:
cv2.imwrite('detections.png', overlay)
else:
output = Image.fromarray(overlay)
output.save('detections.png')
if use_gui:
cv2.imshow('win', overlay)
while cv2.waitKey(5) < 0:
pass
if __name__ == '__main__':
main()

View File

@ -0,0 +1,118 @@
__all__: list[str] = []
# Enumerations
StsOk: int
STS_OK: int
StsBackTrace: int
STS_BACK_TRACE: int
StsError: int
STS_ERROR: int
StsInternal: int
STS_INTERNAL: int
StsNoMem: int
STS_NO_MEM: int
StsBadArg: int
STS_BAD_ARG: int
StsBadFunc: int
STS_BAD_FUNC: int
StsNoConv: int
STS_NO_CONV: int
StsAutoTrace: int
STS_AUTO_TRACE: int
HeaderIsNull: int
HEADER_IS_NULL: int
BadImageSize: int
BAD_IMAGE_SIZE: int
BadOffset: int
BAD_OFFSET: int
BadDataPtr: int
BAD_DATA_PTR: int
BadStep: int
BAD_STEP: int
BadModelOrChSeq: int
BAD_MODEL_OR_CH_SEQ: int
BadNumChannels: int
BAD_NUM_CHANNELS: int
BadNumChannel1U: int
BAD_NUM_CHANNEL1U: int
BadDepth: int
BAD_DEPTH: int
BadAlphaChannel: int
BAD_ALPHA_CHANNEL: int
BadOrder: int
BAD_ORDER: int
BadOrigin: int
BAD_ORIGIN: int
BadAlign: int
BAD_ALIGN: int
BadCallBack: int
BAD_CALL_BACK: int
BadTileSize: int
BAD_TILE_SIZE: int
BadCOI: int
BAD_COI: int
BadROISize: int
BAD_ROISIZE: int
MaskIsTiled: int
MASK_IS_TILED: int
StsNullPtr: int
STS_NULL_PTR: int
StsVecLengthErr: int
STS_VEC_LENGTH_ERR: int
StsFilterStructContentErr: int
STS_FILTER_STRUCT_CONTENT_ERR: int
StsKernelStructContentErr: int
STS_KERNEL_STRUCT_CONTENT_ERR: int
StsFilterOffsetErr: int
STS_FILTER_OFFSET_ERR: int
StsBadSize: int
STS_BAD_SIZE: int
StsDivByZero: int
STS_DIV_BY_ZERO: int
StsInplaceNotSupported: int
STS_INPLACE_NOT_SUPPORTED: int
StsObjectNotFound: int
STS_OBJECT_NOT_FOUND: int
StsUnmatchedFormats: int
STS_UNMATCHED_FORMATS: int
StsBadFlag: int
STS_BAD_FLAG: int
StsBadPoint: int
STS_BAD_POINT: int
StsBadMask: int
STS_BAD_MASK: int
StsUnmatchedSizes: int
STS_UNMATCHED_SIZES: int
StsUnsupportedFormat: int
STS_UNSUPPORTED_FORMAT: int
StsOutOfRange: int
STS_OUT_OF_RANGE: int
StsParseError: int
STS_PARSE_ERROR: int
StsNotImplemented: int
STS_NOT_IMPLEMENTED: int
StsBadMemBlock: int
STS_BAD_MEM_BLOCK: int
StsAssert: int
STS_ASSERT: int
GpuNotSupported: int
GPU_NOT_SUPPORTED: int
GpuApiCallError: int
GPU_API_CALL_ERROR: int
OpenGlNotSupported: int
OPEN_GL_NOT_SUPPORTED: int
OpenGlApiCallError: int
OPEN_GL_API_CALL_ERROR: int
OpenCLApiCallError: int
OPEN_CLAPI_CALL_ERROR: int
OpenCLDoubleNotSupported: int
OPEN_CLDOUBLE_NOT_SUPPORTED: int
OpenCLInitError: int
OPEN_CLINIT_ERROR: int
OpenCLNoAMDBlasFft: int
OPEN_CLNO_AMDBLAS_FFT: int
Code = int
"""One of [StsOk, STS_OK, StsBackTrace, STS_BACK_TRACE, StsError, STS_ERROR, StsInternal, STS_INTERNAL, StsNoMem, STS_NO_MEM, StsBadArg, STS_BAD_ARG, StsBadFunc, STS_BAD_FUNC, StsNoConv, STS_NO_CONV, StsAutoTrace, STS_AUTO_TRACE, HeaderIsNull, HEADER_IS_NULL, BadImageSize, BAD_IMAGE_SIZE, BadOffset, BAD_OFFSET, BadDataPtr, BAD_DATA_PTR, BadStep, BAD_STEP, BadModelOrChSeq, BAD_MODEL_OR_CH_SEQ, BadNumChannels, BAD_NUM_CHANNELS, BadNumChannel1U, BAD_NUM_CHANNEL1U, BadDepth, BAD_DEPTH, BadAlphaChannel, BAD_ALPHA_CHANNEL, BadOrder, BAD_ORDER, BadOrigin, BAD_ORIGIN, BadAlign, BAD_ALIGN, BadCallBack, BAD_CALL_BACK, BadTileSize, BAD_TILE_SIZE, BadCOI, BAD_COI, BadROISize, BAD_ROISIZE, MaskIsTiled, MASK_IS_TILED, StsNullPtr, STS_NULL_PTR, StsVecLengthErr, STS_VEC_LENGTH_ERR, StsFilterStructContentErr, STS_FILTER_STRUCT_CONTENT_ERR, StsKernelStructContentErr, STS_KERNEL_STRUCT_CONTENT_ERR, StsFilterOffsetErr, STS_FILTER_OFFSET_ERR, StsBadSize, STS_BAD_SIZE, StsDivByZero, STS_DIV_BY_ZERO, StsInplaceNotSupported, STS_INPLACE_NOT_SUPPORTED, StsObjectNotFound, STS_OBJECT_NOT_FOUND, StsUnmatchedFormats, STS_UNMATCHED_FORMATS, StsBadFlag, STS_BAD_FLAG, StsBadPoint, STS_BAD_POINT, StsBadMask, STS_BAD_MASK, StsUnmatchedSizes, STS_UNMATCHED_SIZES, StsUnsupportedFormat, STS_UNSUPPORTED_FORMAT, StsOutOfRange, STS_OUT_OF_RANGE, StsParseError, STS_PARSE_ERROR, StsNotImplemented, STS_NOT_IMPLEMENTED, StsBadMemBlock, STS_BAD_MEM_BLOCK, StsAssert, STS_ASSERT, GpuNotSupported, GPU_NOT_SUPPORTED, GpuApiCallError, GPU_API_CALL_ERROR, OpenGlNotSupported, OPEN_GL_NOT_SUPPORTED, OpenGlApiCallError, OPEN_GL_API_CALL_ERROR, OpenCLApiCallError, OPEN_CLAPI_CALL_ERROR, OpenCLDoubleNotSupported, OPEN_CLDOUBLE_NOT_SUPPORTED, OpenCLInitError, OPEN_CLINIT_ERROR, OpenCLNoAMDBlasFft, OPEN_CLNO_AMDBLAS_FFT]"""

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) Olli-Pekka Heinisuo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,181 @@
'''
OpenCV Python binary extension loader
'''
import os
import importlib
import sys
__all__ = []
try:
import numpy
import numpy.core.multiarray
except ImportError:
print('OpenCV bindings requires "numpy" package.')
print('Install it via command:')
print(' pip install numpy')
raise
# TODO
# is_x64 = sys.maxsize > 2**32
def __load_extra_py_code_for_module(base, name, enable_debug_print=False):
module_name = "{}.{}".format(__name__, name)
export_module_name = "{}.{}".format(base, name)
native_module = sys.modules.pop(module_name, None)
try:
py_module = importlib.import_module(module_name)
except ImportError as err:
if enable_debug_print:
print("Can't load Python code for module:", module_name,
". Reason:", err)
# Extension doesn't contain extra py code
return False
if base in sys.modules and not hasattr(sys.modules[base], name):
setattr(sys.modules[base], name, py_module)
sys.modules[export_module_name] = py_module
# If it is C extension module it is already loaded by cv2 package
if native_module:
setattr(py_module, "_native", native_module)
for k, v in filter(lambda kv: not hasattr(py_module, kv[0]),
native_module.__dict__.items()):
if enable_debug_print: print(' symbol({}): {} = {}'.format(name, k, v))
setattr(py_module, k, v)
return True
def __collect_extra_submodules(enable_debug_print=False):
def modules_filter(module):
return all((
# module is not internal
not module.startswith("_"),
not module.startswith("python-"),
# it is not a file
os.path.isdir(os.path.join(_extra_submodules_init_path, module))
))
if sys.version_info[0] < 3:
if enable_debug_print:
print("Extra submodules is loaded only for Python 3")
return []
__INIT_FILE_PATH = os.path.abspath(__file__)
_extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH)
return filter(modules_filter, os.listdir(_extra_submodules_init_path))
def bootstrap():
import sys
import copy
save_sys_path = copy.copy(sys.path)
if hasattr(sys, 'OpenCV_LOADER'):
print(sys.path)
raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.')
sys.OpenCV_LOADER = True
DEBUG = False
if hasattr(sys, 'OpenCV_LOADER_DEBUG'):
DEBUG = True
import platform
if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system())))
LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
PYTHON_EXTENSIONS_PATHS = []
BINARIES_PATHS = []
g_vars = globals()
l_vars = locals().copy()
if sys.version_info[:2] < (3, 0):
from . load_config_py2 import exec_file_wrapper
else:
from . load_config_py3 import exec_file_wrapper
def load_first_config(fnames, required=True):
for fname in fnames:
fpath = os.path.join(LOADER_DIR, fname)
if not os.path.exists(fpath):
if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath))
continue
if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath))
exec_file_wrapper(fpath, g_vars, l_vars)
return True
if required:
raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames))
load_first_config(['config.py'], True)
load_first_config([
'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]),
'config-{}.py'.format(sys.version_info[0])
], True)
if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS'])))
if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS'])))
applySysPathWorkaround = False
if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'):
applySysPathWorkaround = True
else:
try:
BASE_DIR = os.path.dirname(LOADER_DIR)
if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR:
applySysPathWorkaround = True
except:
if DEBUG: print('OpenCV loader: exception during checking workaround for sys.path[0]')
pass # applySysPathWorkaround is False
for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']):
sys.path.insert(1 if not applySysPathWorkaround else 0, p)
if os.name == 'nt':
if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302
for p in l_vars['BINARIES_PATHS']:
try:
os.add_dll_directory(p)
except Exception as e:
if DEBUG: print('Failed os.add_dll_directory(): '+ str(e))
pass
os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '')
if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH'])))
else:
# amending of LD_LIBRARY_PATH works for sub-processes only
os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '')
if DEBUG: print("Relink everything from native cv2 module to cv2 package")
py_module = sys.modules.pop("cv2")
native_module = importlib.import_module("cv2")
sys.modules["cv2"] = py_module
setattr(py_module, "_native", native_module)
for item_name, item in filter(lambda kv: kv[0] not in ("__file__", "__loader__", "__spec__",
"__name__", "__package__"),
native_module.__dict__.items()):
if item_name not in g_vars:
g_vars[item_name] = item
sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502)
try:
del sys.OpenCV_LOADER
except Exception as e:
if DEBUG:
print("Exception during delete OpenCV_LOADER:", e)
if DEBUG: print('OpenCV loader: binary extension... OK')
for submodule in __collect_extra_submodules(DEBUG):
if __load_extra_py_code_for_module("cv2", submodule, DEBUG):
if DEBUG: print("Extra Python code for", submodule, "is loaded")
if DEBUG: print('OpenCV loader: DONE')
bootstrap()

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More