aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/dissectors/make-dissector-reg.py18
-rw-r--r--tools/gr-usrptest/CMakeLists.txt198
-rw-r--r--tools/gr-usrptest/MANIFEST.md16
-rw-r--r--tools/gr-usrptest/README.md18
-rw-r--r--tools/gr-usrptest/apps/CMakeLists.txt26
-rwxr-xr-xtools/gr-usrptest/apps/usrp_phasealignment.py72
-rwxr-xr-xtools/gr-usrptest/apps/usrp_selftest.py17
-rw-r--r--tools/gr-usrptest/cmake/Modules/CMakeParseArgumentsCopy.cmake138
-rw-r--r--tools/gr-usrptest/cmake/Modules/FindCppUnit.cmake39
-rw-r--r--tools/gr-usrptest/cmake/Modules/FindGnuradioRuntime.cmake36
-rw-r--r--tools/gr-usrptest/cmake/Modules/GrMiscUtils.cmake528
-rw-r--r--tools/gr-usrptest/cmake/Modules/GrPlatform.cmake54
-rw-r--r--tools/gr-usrptest/cmake/Modules/GrPython.cmake241
-rw-r--r--tools/gr-usrptest/cmake/Modules/GrSwig.cmake251
-rw-r--r--tools/gr-usrptest/cmake/Modules/GrTest.cmake143
-rw-r--r--tools/gr-usrptest/cmake/Modules/UseSWIG.cmake304
-rw-r--r--tools/gr-usrptest/cmake/Modules/usrptestConfig.cmake30
-rw-r--r--tools/gr-usrptest/cmake/cmake_uninstall.cmake.in32
-rw-r--r--tools/gr-usrptest/docs/CMakeLists.txt35
-rw-r--r--tools/gr-usrptest/docs/README.usrptest11
-rw-r--r--tools/gr-usrptest/docs/doxygen/CMakeLists.txt52
-rw-r--r--tools/gr-usrptest/docs/doxygen/Doxyfile.in1922
-rw-r--r--tools/gr-usrptest/docs/doxygen/Doxyfile.swig_doc.in1890
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/__init__.py82
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/base.py219
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/doxyindex.py301
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/generated/__init__.py7
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/generated/compound.py503
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/generated/compoundsuper.py8342
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/generated/index.py77
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/generated/indexsuper.py523
-rw-r--r--tools/gr-usrptest/docs/doxygen/doxyxml/text.py56
-rw-r--r--tools/gr-usrptest/docs/doxygen/other/group_defs.dox7
-rw-r--r--tools/gr-usrptest/docs/doxygen/other/main_page.dox10
-rw-r--r--tools/gr-usrptest/docs/doxygen/swig_doc.py328
-rwxr-xr-xtools/gr-usrptest/examples/lv_control_example.py81
-rw-r--r--tools/gr-usrptest/examples/phase_diff_x310_ubx_example.grc3296
-rw-r--r--tools/gr-usrptest/grc/CMakeLists.txt23
-rw-r--r--tools/gr-usrptest/grc/usrptest_measurement_sink_f.xml38
-rw-r--r--tools/gr-usrptest/grc/usrptest_phase_calc_ccf.xml20
-rw-r--r--tools/gr-usrptest/include/usrptest/CMakeLists.txt26
-rw-r--r--tools/gr-usrptest/include/usrptest/api.h33
-rw-r--r--tools/gr-usrptest/include/usrptest/measurement_sink_f.h61
-rw-r--r--tools/gr-usrptest/lib/CMakeLists.txt59
-rw-r--r--tools/gr-usrptest/lib/measurement_sink_f_impl.cc124
-rw-r--r--tools/gr-usrptest/lib/measurement_sink_f_impl.h62
-rw-r--r--tools/gr-usrptest/lib/qa_usrptest.cc36
-rw-r--r--tools/gr-usrptest/lib/qa_usrptest.h38
-rw-r--r--tools/gr-usrptest/lib/test_usrptest.cc48
-rw-r--r--tools/gr-usrptest/python/CMakeLists.txt49
-rw-r--r--tools/gr-usrptest/python/__init__.py35
-rw-r--r--tools/gr-usrptest/python/build_utils.py226
-rw-r--r--tools/gr-usrptest/python/build_utils_codes.py52
-rw-r--r--tools/gr-usrptest/python/flowgraphs/CMakeLists.txt28
-rw-r--r--tools/gr-usrptest/python/flowgraphs/__init__.py14
-rw-r--r--tools/gr-usrptest/python/flowgraphs/phasealignment_fg.py119
-rw-r--r--tools/gr-usrptest/python/flowgraphs/selftest_fg.py122
-rw-r--r--tools/gr-usrptest/python/functions.py157
-rw-r--r--tools/gr-usrptest/python/labview_control/CMakeLists.txt28
-rw-r--r--tools/gr-usrptest/python/labview_control/__init__.py14
-rw-r--r--tools/gr-usrptest/python/labview_control/lv_control.py87
-rw-r--r--tools/gr-usrptest/python/phase_calc_ccf.py47
-rwxr-xr-xtools/gr-usrptest/python/qa_measurement_sink_f.py41
-rw-r--r--tools/gr-usrptest/python/rts_tests/CMakeLists.txt33
-rw-r--r--tools/gr-usrptest/python/rts_tests/__init__.py14
-rwxr-xr-xtools/gr-usrptest/python/rts_tests/test_phasealignment.py174
-rw-r--r--tools/gr-usrptest/python/setup.py13
-rw-r--r--tools/gr-usrptest/swig/CMakeLists.txt65
-rw-r--r--tools/gr-usrptest/swig/usrptest_swig.i16
-rw-r--r--tools/kitchen_sink/kitchen_sink.cpp1
70 files changed, 21796 insertions, 10 deletions
diff --git a/tools/dissectors/make-dissector-reg.py b/tools/dissectors/make-dissector-reg.py
index 44972909b..37170a84f 100755
--- a/tools/dissectors/make-dissector-reg.py
+++ b/tools/dissectors/make-dissector-reg.py
@@ -59,7 +59,7 @@ elif registertype == "dissectors":
*/
"""
else:
- print "Unknown output type '%s'" % registertype
+ print("Unknown output type '%s'" % registertype)
sys.exit(1)
@@ -77,7 +77,7 @@ for file in files:
filenames.append(os.path.join(srcdir, file))
if len(filenames) < 1:
- print "No files found"
+ print("No files found")
sys.exit(1)
@@ -118,7 +118,7 @@ if cache_filename:
cache_file = open(cache_filename, 'rb')
cache = pickle.load(cache_file)
cache_file.close()
- if not cache.has_key(VERSION_KEY) or cache[VERSION_KEY] != CUR_VERSION:
+ if (VERSION_KEY not in cache) or cache[VERSION_KEY] != CUR_VERSION:
cache = {VERSION_KEY: CUR_VERSION}
except:
cache = {VERSION_KEY: CUR_VERSION}
@@ -127,10 +127,10 @@ if cache_filename:
for filename in filenames:
file = open(filename)
cur_mtime = os.fstat(file.fileno())[ST_MTIME]
- if cache and cache.has_key(filename):
+ if cache and (filename in cache):
cdict = cache[filename]
if cur_mtime == cdict['mtime']:
-# print "Pulling %s from cache" % (filename)
+# print("Pulling %s from cache" % (filename))
regs['proto_reg'].extend(cdict['proto_reg'])
regs['handoff_reg'].extend(cdict['handoff_reg'])
regs['wtap_register'].extend(cdict['wtap_register'])
@@ -144,7 +144,7 @@ for filename in filenames:
'handoff_reg': [],
'wtap_register': [],
}
-# print "Searching %s" % (filename)
+# print("Searching %s" % (filename))
for line in file.readlines():
for action in patterns:
regex = action[1]
@@ -154,7 +154,7 @@ for filename in filenames:
sym_type = action[0]
regs[sym_type].append(symbol)
if cache is not None:
-# print "Caching %s for %s: %s" % (sym_type, filename, symbol)
+# print("Caching %s for %s: %s" % (sym_type, filename, symbol))
cache[filename][sym_type].append(symbol)
file.close()
@@ -165,7 +165,7 @@ if cache is not None and cache_filename is not None:
# Make sure we actually processed something
if len(regs['proto_reg']) < 1:
- print "No protocol registrations found"
+ print("No protocol registrations found")
sys.exit(1)
# Sort the lists to make them pretty
@@ -252,7 +252,7 @@ register_wtap_module(void)
reg_code.write(line)
reg_code.write("}\n");
- reg_code.write("#endif\n");
+ reg_code.write("#endif\n");
else:
reg_code.write("""
static gulong proto_reg_count(void)
diff --git a/tools/gr-usrptest/CMakeLists.txt b/tools/gr-usrptest/CMakeLists.txt
new file mode 100644
index 000000000..3eeea5f75
--- /dev/null
+++ b/tools/gr-usrptest/CMakeLists.txt
@@ -0,0 +1,198 @@
+# Copyright 2011,2012,2014,2016 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Project setup
+########################################################################
+cmake_minimum_required(VERSION 2.6)
+project(gr-usrptest CXX C)
+enable_testing()
+
+#install to PyBOMBS target prefix if defined
+if(DEFINED ENV{PYBOMBS_PREFIX})
+ set(CMAKE_INSTALL_PREFIX $ENV{PYBOMBS_PREFIX})
+ message(STATUS "PyBOMBS installed GNU Radio. Setting CMAKE_INSTALL_PREFIX to $ENV{PYBOMBS_PREFIX}")
+endif()
+
+#select the release build type by default to get optimization flags
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE "Release")
+ message(STATUS "Build type not specified: defaulting to release.")
+endif(NOT CMAKE_BUILD_TYPE)
+set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "")
+
+#make sure our local CMake Modules path comes first
+list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules)
+
+# Set the version information here
+set(VERSION_INFO_MAJOR_VERSION 1)
+set(VERSION_INFO_API_COMPAT 0)
+set(VERSION_INFO_MINOR_VERSION 0)
+set(VERSION_INFO_MAINT_VERSION git)
+
+########################################################################
+# Compiler specific setup
+########################################################################
+if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32)
+ #http://gcc.gnu.org/wiki/Visibility
+ add_definitions(-fvisibility=hidden)
+endif()
+
+########################################################################
+# Find boost
+########################################################################
+if(UNIX AND EXISTS "/usr/lib64")
+ list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix
+endif(UNIX AND EXISTS "/usr/lib64")
+set(Boost_ADDITIONAL_VERSIONS
+ "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39"
+ "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44"
+ "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49"
+ "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54"
+ "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59"
+ "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64"
+ "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69"
+)
+find_package(Boost "1.35" COMPONENTS filesystem system)
+
+if(NOT Boost_FOUND)
+ message(FATAL_ERROR "Boost required to compile usrptest")
+endif()
+
+########################################################################
+# Install directories
+########################################################################
+include(GrPlatform) #define LIB_SUFFIX
+set(GR_RUNTIME_DIR bin)
+set(GR_LIBRARY_DIR lib${LIB_SUFFIX})
+set(GR_INCLUDE_DIR include/usrptest)
+set(GR_DATA_DIR share)
+set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME})
+set(GR_DOC_DIR ${GR_DATA_DIR}/doc)
+set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME})
+set(GR_CONF_DIR etc)
+set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d)
+set(GR_LIBEXEC_DIR libexec)
+set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME})
+set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks)
+
+########################################################################
+# On Apple only, set install name and use rpath correctly, if not already set
+########################################################################
+if(APPLE)
+ if(NOT CMAKE_INSTALL_NAME_DIR)
+ set(CMAKE_INSTALL_NAME_DIR
+ ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
+ PATH "Library Install Name Destination Directory" FORCE)
+ endif(NOT CMAKE_INSTALL_NAME_DIR)
+ if(NOT CMAKE_INSTALL_RPATH)
+ set(CMAKE_INSTALL_RPATH
+ ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
+ PATH "Library Install RPath" FORCE)
+ endif(NOT CMAKE_INSTALL_RPATH)
+ if(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
+ set(CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE
+ BOOL "Do Build Using Library Install RPath" FORCE)
+ endif(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
+endif(APPLE)
+
+########################################################################
+# Find gnuradio build dependencies
+########################################################################
+find_package(CppUnit)
+find_package(Doxygen)
+
+# Search for GNU Radio and its components and versions. Add any
+# components required to the list of GR_REQUIRED_COMPONENTS (in all
+# caps such as FILTER or FFT) and change the version to the minimum
+# API compatible version required.
+set(GR_REQUIRED_COMPONENTS RUNTIME)
+find_package(Gnuradio "3.7.2" REQUIRED)
+list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules)
+include(GrVersion)
+
+if(NOT CPPUNIT_FOUND)
+ message(FATAL_ERROR "CppUnit required to compile usrptest")
+endif()
+
+########################################################################
+# Setup doxygen option
+########################################################################
+if(DOXYGEN_FOUND)
+ option(ENABLE_DOXYGEN "Build docs using Doxygen" ON)
+else(DOXYGEN_FOUND)
+ option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF)
+endif(DOXYGEN_FOUND)
+
+########################################################################
+# Setup the include and linker paths
+########################################################################
+include_directories(
+ ${CMAKE_SOURCE_DIR}/lib
+ ${CMAKE_SOURCE_DIR}/include
+ ${CMAKE_BINARY_DIR}/lib
+ ${CMAKE_BINARY_DIR}/include
+ ${Boost_INCLUDE_DIRS}
+ ${CPPUNIT_INCLUDE_DIRS}
+ ${GNURADIO_ALL_INCLUDE_DIRS}
+)
+
+link_directories(
+ ${Boost_LIBRARY_DIRS}
+ ${CPPUNIT_LIBRARY_DIRS}
+ ${GNURADIO_RUNTIME_LIBRARY_DIRS}
+)
+
+# Set component parameters
+set(GR_USRPTEST_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE)
+set(GR_USRPTEST_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE)
+
+########################################################################
+# Create uninstall target
+########################################################################
+configure_file(
+ ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
+@ONLY)
+
+add_custom_target(uninstall
+ ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
+)
+
+########################################################################
+# Add subdirectories
+########################################################################
+add_subdirectory(include/usrptest)
+add_subdirectory(lib)
+add_subdirectory(swig)
+add_subdirectory(python)
+add_subdirectory(grc)
+add_subdirectory(apps)
+add_subdirectory(docs)
+
+########################################################################
+# Install cmake search helper for this library
+########################################################################
+if(NOT CMAKE_MODULES_DIR)
+ set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake)
+endif(NOT CMAKE_MODULES_DIR)
+
+install(FILES cmake/Modules/usrptestConfig.cmake
+ DESTINATION ${CMAKE_MODULES_DIR}/usrptest
+)
diff --git a/tools/gr-usrptest/MANIFEST.md b/tools/gr-usrptest/MANIFEST.md
new file mode 100644
index 000000000..83cce8174
--- /dev/null
+++ b/tools/gr-usrptest/MANIFEST.md
@@ -0,0 +1,16 @@
+title: The USRPTEST OOT Module
+brief: Short description of gr-usrptest
+tags: # Tags are arbitrary, but look at CGRAN what other authors are using
+ - sdr
+author:
+ - Andrej Rode <andrej.rode@ettus.com>
+copyright_owner:
+ - Ettus Research LLC
+license:
+#repo: # Put the URL of the repository here, or leave blank for default
+#website: <module_website> # If you have a separate project website, put it here
+#icon: <icon_url> # Put a URL to a square image here that will be used as an icon on CGRAN
+---
+A longer, multi-line description of gr-usrptest.
+You may use some *basic* Markdown here.
+If left empty, it will try to find a README file instead.
diff --git a/tools/gr-usrptest/README.md b/tools/gr-usrptest/README.md
new file mode 100644
index 000000000..bc8a70c1d
--- /dev/null
+++ b/tools/gr-usrptest/README.md
@@ -0,0 +1,18 @@
+# gr-usrptest OOT-Module
+
+## Usage
+This OOT is used to run GNU Radio based tests on various USRPs and daughterboards catch regressions.
+
+## Structure
+gr-usrptest follows the structure of a regular OOT-Module with some additions.
+The python directory contains a couple additional submodules.
+ - flowgraphs
+ - Contains dynamically configured GNU Radio flowgraphs.
+ - rts_tests
+ - Contains tests which can be run unsupervised and store results
+ - labview_control
+ - Contains classes and functions to control a remote LabVIEW instance with python_labview_automation
+
+## Applications
+ - usrp_phasealignment.py
+ - calculates phase differences between an arbitrary number of USRP devices. Runs phase difference measurement a speficied number of times and retunes the USRP daugtherboards to a random frequency between measurements. Prints average phase difference and standard deviation for every measurement in human readable format.
diff --git a/tools/gr-usrptest/apps/CMakeLists.txt b/tools/gr-usrptest/apps/CMakeLists.txt
new file mode 100644
index 000000000..a40192924
--- /dev/null
+++ b/tools/gr-usrptest/apps/CMakeLists.txt
@@ -0,0 +1,26 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+include(GrPython)
+
+GR_PYTHON_INSTALL(
+ PROGRAMS
+ usrp_phasealignment.py
+ DESTINATION bin
+)
diff --git a/tools/gr-usrptest/apps/usrp_phasealignment.py b/tools/gr-usrptest/apps/usrp_phasealignment.py
new file mode 100755
index 000000000..928788be4
--- /dev/null
+++ b/tools/gr-usrptest/apps/usrp_phasealignment.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+from gnuradio.uhd.uhd_app import UHDApp
+from usrptest.flowgraphs import phasealignment_fg
+from usrptest.functions import run_test, setup_phase_alignment_parser, setup_tx_phase_alignment_parser, setup_manual_phase_alignment_parser
+import time
+import argparse
+
+
+def plot_results(results):
+ import matplotlib.pyplot as plt
+ ax = plt.axes()
+ ax.set_ylim(-180, 180)
+ for result in results:
+ plt.errorbar(
+ range(len(result['avg'])),
+ result["avg"],
+ result["stddev"],
+ label="{} - {}".format(result["first"], result["second"]),
+ axes=ax)
+ ax.legend(loc='upper left', bbox_to_anchor=(0.0, 0.0))
+ plt.show()
+
+
+def print_results(results):
+ for result in results:
+ print('Results for: {first} - {second}'.format(
+ first=result['first'], second=result['second']))
+ for i, (avg,
+ stddev) in enumerate(zip(result['avg'], result['stddev'])):
+ print('\t {}. run avg: {}, stddev: {}'.format(i + 1, avg, stddev))
+
+
+def main():
+ parser = argparse.ArgumentParser(conflict_handler='resolve')
+ UHDApp.setup_argparser(parser=parser)
+ parser = setup_phase_alignment_parser(parser)
+ parser = setup_tx_phase_alignment_parser(parser)
+ parser = setup_manual_phase_alignment_parser(parser)
+ args = parser.parse_args()
+ test_app = UHDApp(args=args)
+ if args.auto and args.start_freq and args.stop_freq:
+ from random import uniform
+ bw = (args.stop_freq - args.start_freq) / args.freq_bands
+ for nband in range(args.freq_bands):
+ freq1 = args.start_freq + nband * bw
+ new_freq = uniform(freq1, freq1 + bw)
+ test_app.args.freq = new_freq
+ raw_input(
+ "New test frequency: {:f} MHz. Adjust your signal generator and press ENTER to start measurement.".
+ format(new_freq / 1e6))
+ fg = phasealignment_fg.phasealignment_fg(test_app)
+ fg.start()
+ results = run_test(fg, args.runs)
+ fg.stop()
+ fg.wait()
+ if args.plot:
+ plot_results(results)
+ print_results(results)
+ else:
+ fg = phasealignment_fg.phasealignment_fg(test_app)
+ fg.start()
+ results = run_test(fg, args.runs)
+ fg.stop()
+ fg.wait()
+ if args.plot:
+ plot_results(results)
+ print_results(results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/gr-usrptest/apps/usrp_selftest.py b/tools/gr-usrptest/apps/usrp_selftest.py
new file mode 100755
index 000000000..3dcd4e6ca
--- /dev/null
+++ b/tools/gr-usrptest/apps/usrp_selftest.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import argparse
+from usrptest import parsers
+from usrptest.flowgraphs import selftest_fg
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser = parsers.add_core_args(parser)
+ parser = parsers.add_selftest_args(parser)
+ args = parser.parse_args()
+ my_flowgraph = selftest_fg.selftest_fg(args.frequency, args.samp_rate, args.dphase ,args.devices)
+ results = my_flowgraph.run()
+ print(results)
+if __name__ == '__main__':
+ main()
diff --git a/tools/gr-usrptest/cmake/Modules/CMakeParseArgumentsCopy.cmake b/tools/gr-usrptest/cmake/Modules/CMakeParseArgumentsCopy.cmake
new file mode 100644
index 000000000..7ce4c49ae
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/CMakeParseArgumentsCopy.cmake
@@ -0,0 +1,138 @@
+# CMAKE_PARSE_ARGUMENTS(<prefix> <options> <one_value_keywords> <multi_value_keywords> args...)
+#
+# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for
+# parsing the arguments given to that macro or function.
+# It processes the arguments and defines a set of variables which hold the
+# values of the respective options.
+#
+# The <options> argument contains all options for the respective macro,
+# i.e. keywords which can be used when calling the macro without any value
+# following, like e.g. the OPTIONAL keyword of the install() command.
+#
+# The <one_value_keywords> argument contains all keywords for this macro
+# which are followed by one value, like e.g. DESTINATION keyword of the
+# install() command.
+#
+# The <multi_value_keywords> argument contains all keywords for this macro
+# which can be followed by more than one value, like e.g. the TARGETS or
+# FILES keywords of the install() command.
+#
+# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the
+# keywords listed in <options>, <one_value_keywords> and
+# <multi_value_keywords> a variable composed of the given <prefix>
+# followed by "_" and the name of the respective keyword.
+# These variables will then hold the respective value from the argument list.
+# For the <options> keywords this will be TRUE or FALSE.
+#
+# All remaining arguments are collected in a variable
+# <prefix>_UNPARSED_ARGUMENTS, this can be checked afterwards to see whether
+# your macro was called with unrecognized parameters.
+#
+# As an example here a my_install() macro, which takes similar arguments as the
+# real install() command:
+#
+# function(MY_INSTALL)
+# set(options OPTIONAL FAST)
+# set(oneValueArgs DESTINATION RENAME)
+# set(multiValueArgs TARGETS CONFIGURATIONS)
+# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+# ...
+#
+# Assume my_install() has been called like this:
+# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub)
+#
+# After the cmake_parse_arguments() call the macro will have set the following
+# variables:
+# MY_INSTALL_OPTIONAL = TRUE
+# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install()
+# MY_INSTALL_DESTINATION = "bin"
+# MY_INSTALL_RENAME = "" (was not used)
+# MY_INSTALL_TARGETS = "foo;bar"
+# MY_INSTALL_CONFIGURATIONS = "" (was not used)
+# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL"
+#
+# You can the continue and process these variables.
+#
+# Keywords terminate lists of values, e.g. if directly after a one_value_keyword
+# another recognized keyword follows, this is interpreted as the beginning of
+# the new option.
+# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
+# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
+# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
+
+#=============================================================================
+# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+
+if(__CMAKE_PARSE_ARGUMENTS_INCLUDED)
+ return()
+endif()
+set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE)
+
+
+function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames)
+ # first set all result variables to empty/FALSE
+ foreach(arg_name ${_singleArgNames} ${_multiArgNames})
+ set(${prefix}_${arg_name})
+ endforeach(arg_name)
+
+ foreach(option ${_optionNames})
+ set(${prefix}_${option} FALSE)
+ endforeach(option)
+
+ set(${prefix}_UNPARSED_ARGUMENTS)
+
+ set(insideValues FALSE)
+ set(currentArgName)
+
+ # now iterate over all arguments and fill the result variables
+ foreach(currentArg ${ARGN})
+ list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword
+ list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword
+ list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword
+
+ if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1)
+ if(insideValues)
+ if("${insideValues}" STREQUAL "SINGLE")
+ set(${prefix}_${currentArgName} ${currentArg})
+ set(insideValues FALSE)
+ elseif("${insideValues}" STREQUAL "MULTI")
+ list(APPEND ${prefix}_${currentArgName} ${currentArg})
+ endif()
+ else(insideValues)
+ list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg})
+ endif(insideValues)
+ else()
+ if(NOT ${optionIndex} EQUAL -1)
+ set(${prefix}_${currentArg} TRUE)
+ set(insideValues FALSE)
+ elseif(NOT ${singleArgIndex} EQUAL -1)
+ set(currentArgName ${currentArg})
+ set(${prefix}_${currentArgName})
+ set(insideValues "SINGLE")
+ elseif(NOT ${multiArgIndex} EQUAL -1)
+ set(currentArgName ${currentArg})
+ set(${prefix}_${currentArgName})
+ set(insideValues "MULTI")
+ endif()
+ endif()
+
+ endforeach(currentArg)
+
+ # propagate the result variables to the caller:
+ foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames})
+ set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE)
+ endforeach(arg_name)
+ set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE)
+
+endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs)
diff --git a/tools/gr-usrptest/cmake/Modules/FindCppUnit.cmake b/tools/gr-usrptest/cmake/Modules/FindCppUnit.cmake
new file mode 100644
index 000000000..f93ade341
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/FindCppUnit.cmake
@@ -0,0 +1,39 @@
+# http://www.cmake.org/pipermail/cmake/2006-October/011446.html
+# Modified to use pkg config and use standard var names
+
+#
+# Find the CppUnit includes and library
+#
+# This module defines
+# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc.
+# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit.
+# CPPUNIT_FOUND, If false, do not try to use CppUnit.
+
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_CPPUNIT "cppunit")
+
+FIND_PATH(CPPUNIT_INCLUDE_DIRS
+ NAMES cppunit/TestCase.h
+ HINTS ${PC_CPPUNIT_INCLUDE_DIR}
+ ${CMAKE_INSTALL_PREFIX}/include
+ PATHS
+ /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(CPPUNIT_LIBRARIES
+ NAMES cppunit
+ HINTS ${PC_CPPUNIT_LIBDIR}
+ ${CMAKE_INSTALL_PREFIX}/lib
+ ${CMAKE_INSTALL_PREFIX}/lib64
+ PATHS
+ ${CPPUNIT_INCLUDE_DIRS}/../lib
+ /usr/local/lib
+ /usr/lib
+)
+
+LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS})
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
+MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS)
diff --git a/tools/gr-usrptest/cmake/Modules/FindGnuradioRuntime.cmake b/tools/gr-usrptest/cmake/Modules/FindGnuradioRuntime.cmake
new file mode 100644
index 000000000..afed684a5
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/FindGnuradioRuntime.cmake
@@ -0,0 +1,36 @@
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime)
+
+if(PC_GNURADIO_RUNTIME_FOUND)
+ # look for include files
+ FIND_PATH(
+ GNURADIO_RUNTIME_INCLUDE_DIRS
+ NAMES gnuradio/top_block.h
+ HINTS $ENV{GNURADIO_RUNTIME_DIR}/include
+ ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS}
+ ${CMAKE_INSTALL_PREFIX}/include
+ PATHS /usr/local/include
+ /usr/include
+ )
+
+ # look for libs
+ FIND_LIBRARY(
+ GNURADIO_RUNTIME_LIBRARIES
+ NAMES gnuradio-runtime
+ HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib
+ ${PC_GNURADIO_RUNTIME_LIBDIR}
+ ${CMAKE_INSTALL_PREFIX}/lib/
+ ${CMAKE_INSTALL_PREFIX}/lib64/
+ PATHS /usr/local/lib
+ /usr/local/lib64
+ /usr/lib
+ /usr/lib64
+ )
+
+ set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND})
+endif(PC_GNURADIO_RUNTIME_FOUND)
+
+INCLUDE(FindPackageHandleStandardArgs)
+# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used.
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES)
+MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS)
diff --git a/tools/gr-usrptest/cmake/Modules/GrMiscUtils.cmake b/tools/gr-usrptest/cmake/Modules/GrMiscUtils.cmake
new file mode 100644
index 000000000..5bad57c51
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/GrMiscUtils.cmake
@@ -0,0 +1,528 @@
+# Copyright 2010-2011,2014 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE)
+
+########################################################################
+# Set global variable macro.
+# Used for subdirectories to export settings.
+# Example: include and library paths.
+########################################################################
+function(GR_SET_GLOBAL var)
+ set(${var} ${ARGN} CACHE INTERNAL "" FORCE)
+endfunction(GR_SET_GLOBAL)
+
+########################################################################
+# Set the pre-processor definition if the condition is true.
+# - def the pre-processor definition to set and condition name
+########################################################################
+function(GR_ADD_COND_DEF def)
+ if(${def})
+ add_definitions(-D${def})
+ endif(${def})
+endfunction(GR_ADD_COND_DEF)
+
+########################################################################
+# Check for a header and conditionally set a compile define.
+# - hdr the relative path to the header file
+# - def the pre-processor definition to set
+########################################################################
+function(GR_CHECK_HDR_N_DEF hdr def)
+ include(CheckIncludeFileCXX)
+ CHECK_INCLUDE_FILE_CXX(${hdr} ${def})
+ GR_ADD_COND_DEF(${def})
+endfunction(GR_CHECK_HDR_N_DEF)
+
+########################################################################
+# Include subdirectory macro.
+# Sets the CMake directory variables,
+# includes the subdirectory CMakeLists.txt,
+# resets the CMake directory variables.
+#
+# This macro includes subdirectories rather than adding them
+# so that the subdirectory can affect variables in the level above.
+# This provides a work-around for the lack of convenience libraries.
+# This way a subdirectory can append to the list of library sources.
+########################################################################
+macro(GR_INCLUDE_SUBDIRECTORY subdir)
+ #insert the current directories on the front of the list
+ list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR})
+ list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR})
+
+ #set the current directories to the names of the subdirs
+ set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir})
+ set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir})
+
+ #include the subdirectory CMakeLists to run it
+ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt)
+
+ #reset the value of the current directories
+ list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR)
+ list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR)
+
+ #pop the subdir names of the front of the list
+ list(REMOVE_AT _cmake_source_dirs 0)
+ list(REMOVE_AT _cmake_binary_dirs 0)
+endmacro(GR_INCLUDE_SUBDIRECTORY)
+
+########################################################################
+# Check if a compiler flag works and conditionally set a compile define.
+# - flag the compiler flag to check for
+# - have the variable to set with result
+########################################################################
+macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have)
+ include(CheckCXXCompilerFlag)
+ CHECK_CXX_COMPILER_FLAG(${flag} ${have})
+ if(${have})
+ if(${CMAKE_VERSION} VERSION_GREATER "2.8.4")
+ STRING(FIND "${CMAKE_CXX_FLAGS}" "${flag}" flag_dup)
+ if(${flag_dup} EQUAL -1)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
+ endif(${flag_dup} EQUAL -1)
+ endif(${CMAKE_VERSION} VERSION_GREATER "2.8.4")
+ endif(${have})
+endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE)
+
+########################################################################
+# Generates the .la libtool file
+# This appears to generate libtool files that cannot be used by auto*.
+# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest])
+# Notice: there is not COMPONENT option, these will not get distributed.
+########################################################################
+function(GR_LIBTOOL)
+ if(NOT DEFINED GENERATE_LIBTOOL)
+ set(GENERATE_LIBTOOL OFF) #disabled by default
+ endif()
+
+ if(GENERATE_LIBTOOL)
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN})
+
+ find_program(LIBTOOL libtool)
+ if(LIBTOOL)
+ include(CMakeMacroLibtoolFile)
+ CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION})
+ endif(LIBTOOL)
+ endif(GENERATE_LIBTOOL)
+
+endfunction(GR_LIBTOOL)
+
+########################################################################
+# Do standard things to the library target
+# - set target properties
+# - make install rules
+# Also handle gnuradio custom naming conventions w/ extras mode.
+########################################################################
+function(GR_LIBRARY_FOO target)
+ #parse the arguments for component names
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN})
+
+ #set additional target properties
+ set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER})
+
+ #install the generated files like so...
+ install(TARGETS ${target}
+ LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file
+ ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file
+ RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file
+ )
+
+ #extras mode enabled automatically on linux
+ if(NOT DEFINED LIBRARY_EXTRAS)
+ set(LIBRARY_EXTRAS ${LINUX})
+ endif()
+
+ #special extras mode to enable alternative naming conventions
+ if(LIBRARY_EXTRAS)
+
+ #create .la file before changing props
+ GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR})
+
+ #give the library a special name with ultra-zero soversion
+ set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0")
+ set(target_name lib${target}-${LIBVER}.so.0.0.0)
+
+ #custom command to generate symlinks
+ add_custom_command(
+ TARGET ${target}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
+ COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
+ COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install
+ )
+
+ #and install the extra symlinks
+ install(
+ FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so
+ ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0
+ DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT}
+ )
+
+ endif(LIBRARY_EXTRAS)
+endfunction(GR_LIBRARY_FOO)
+
+########################################################################
+# Create a dummy custom command that depends on other targets.
+# Usage:
+# GR_GEN_TARGET_DEPS(unique_name target_deps <target1> <target2> ...)
+# ADD_CUSTOM_COMMAND(<the usual args> ${target_deps})
+#
+# Custom command cant depend on targets, but can depend on executables,
+# and executables can depend on targets. So this is the process:
+########################################################################
+function(GR_GEN_TARGET_DEPS name var)
+ file(
+ WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
+ "int main(void){return 0;}\n"
+ )
+ execute_process(
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp
+ )
+ add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp)
+ if(ARGN)
+ add_dependencies(${name} ${ARGN})
+ endif(ARGN)
+
+ if(CMAKE_CROSSCOMPILING)
+ set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross
+ else()
+ set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE)
+ endif()
+endfunction(GR_GEN_TARGET_DEPS)
+
+########################################################################
+# Control use of gr_logger
+# Usage:
+# GR_LOGGING()
+#
+# Will set ENABLE_GR_LOG to 1 by default.
+# Can manually set with -DENABLE_GR_LOG=0|1
+########################################################################
+function(GR_LOGGING)
+ find_package(Log4cpp)
+
+ OPTION(ENABLE_GR_LOG "Use gr_logger" ON)
+ if(ENABLE_GR_LOG)
+ # If gr_logger is enabled, make it usable
+ add_definitions( -DENABLE_GR_LOG )
+
+ # also test LOG4CPP; if we have it, use this version of the logger
+ # otherwise, default to the stdout/stderr model.
+ if(LOG4CPP_FOUND)
+ SET(HAVE_LOG4CPP True CACHE INTERNAL "" FORCE)
+ add_definitions( -DHAVE_LOG4CPP )
+ else(not LOG4CPP_FOUND)
+ SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE)
+ endif(LOG4CPP_FOUND)
+
+ SET(ENABLE_GR_LOG ${ENABLE_GR_LOG} CACHE INTERNAL "" FORCE)
+
+ else(ENABLE_GR_LOG)
+ SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE)
+ SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE)
+ endif(ENABLE_GR_LOG)
+
+ message(STATUS "ENABLE_GR_LOG set to ${ENABLE_GR_LOG}.")
+ message(STATUS "HAVE_LOG4CPP set to ${HAVE_LOG4CPP}.")
+ message(STATUS "LOG4CPP_LIBRARIES set to ${LOG4CPP_LIBRARIES}.")
+
+endfunction(GR_LOGGING)
+
+########################################################################
+# Run GRCC to compile .grc files into .py files.
+#
+# Usage: GRCC(filename, directory)
+# - filenames: List of file name of .grc file
+# - directory: directory of built .py file - usually in
+# ${CMAKE_CURRENT_BINARY_DIR}
+# - Sets PYFILES: output converted GRC file names to Python files.
+########################################################################
+function(GRCC)
+ # Extract directory from list of args, remove it for the list of filenames.
+ list(GET ARGV -1 directory)
+ list(REMOVE_AT ARGV -1)
+ set(filenames ${ARGV})
+ file(MAKE_DIRECTORY ${directory})
+
+ SET(GRCC_COMMAND ${CMAKE_SOURCE_DIR}/gr-utils/python/grcc)
+
+ # GRCC uses some stuff in grc and gnuradio-runtime, so we force
+ # the known paths here
+ list(APPEND PYTHONPATHS
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/gnuradio-runtime/python
+ ${CMAKE_SOURCE_DIR}/gnuradio-runtime/lib/swig
+ ${CMAKE_BINARY_DIR}/gnuradio-runtime/lib/swig
+ )
+
+ if(WIN32)
+ #SWIG generates the python library files into a subdirectory.
+ #Therefore, we must append this subdirectory into PYTHONPATH.
+ #Only do this for the python directories matching the following:
+ foreach(pydir ${PYTHONPATHS})
+ get_filename_component(name ${pydir} NAME)
+ if(name MATCHES "^(swig|lib|src)$")
+ list(APPEND PYTHONPATHS ${pydir}/${CMAKE_BUILD_TYPE})
+ endif()
+ endforeach(pydir)
+ endif(WIN32)
+
+ file(TO_NATIVE_PATH "${PYTHONPATHS}" pypath)
+
+ if(UNIX)
+ list(APPEND pypath "$PYTHONPATH")
+ string(REPLACE ";" ":" pypath "${pypath}")
+ set(ENV{PYTHONPATH} ${pypath})
+ endif(UNIX)
+
+ if(WIN32)
+ list(APPEND pypath "%PYTHONPATH%")
+ string(REPLACE ";" "\\;" pypath "${pypath}")
+ #list(APPEND environs "PYTHONPATH=${pypath}")
+ set(ENV{PYTHONPATH} ${pypath})
+ endif(WIN32)
+
+ foreach(f ${filenames})
+ execute_process(
+ COMMAND ${GRCC_COMMAND} -d ${directory} ${f}
+ )
+ string(REPLACE ".grc" ".py" pyfile "${f}")
+ string(REPLACE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" pyfile "${pyfile}")
+ list(APPEND pyfiles ${pyfile})
+ endforeach(f)
+
+ set(PYFILES ${pyfiles} PARENT_SCOPE)
+endfunction(GRCC)
+
+########################################################################
+# Check if HAVE_PTHREAD_SETSCHEDPARAM and HAVE_SCHED_SETSCHEDULER
+# should be defined
+########################################################################
+macro(GR_CHECK_LINUX_SCHED_AVAIL)
+set(CMAKE_REQUIRED_LIBRARIES -lpthread)
+ CHECK_CXX_SOURCE_COMPILES("
+ #include <pthread.h>
+ int main(){
+ pthread_t pthread;
+ pthread_setschedparam(pthread, 0, 0);
+ return 0;
+ } " HAVE_PTHREAD_SETSCHEDPARAM
+ )
+ GR_ADD_COND_DEF(HAVE_PTHREAD_SETSCHEDPARAM)
+
+ CHECK_CXX_SOURCE_COMPILES("
+ #include <sched.h>
+ int main(){
+ pid_t pid;
+ sched_setscheduler(pid, 0, 0);
+ return 0;
+ } " HAVE_SCHED_SETSCHEDULER
+ )
+ GR_ADD_COND_DEF(HAVE_SCHED_SETSCHEDULER)
+endmacro(GR_CHECK_LINUX_SCHED_AVAIL)
+
+########################################################################
+# Macros to generate source and header files from template
+########################################################################
+macro(GR_EXPAND_X_H component root)
+
+ include(GrPython)
+
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+"#!${PYTHON_EXECUTABLE}
+
+import sys, os, re
+sys.path.append('${GR_RUNTIME_PYTHONPATH}')
+sys.path.append('${CMAKE_SOURCE_DIR}/python')
+os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
+os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
+
+if __name__ == '__main__':
+ import build_utils
+ root, inp = sys.argv[1:3]
+ for sig in sys.argv[3:]:
+ name = re.sub ('X+', sig, root)
+ d = build_utils.standard_dict2(name, sig, '${component}')
+ build_utils.expand_template(d, inp)
+")
+
+ #make a list of all the generated headers
+ unset(expanded_files_h)
+ foreach(sig ${ARGN})
+ string(REGEX REPLACE "X+" ${sig} name ${root})
+ list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h)
+ endforeach(sig)
+ unset(name)
+
+ #create a command to generate the headers
+ add_custom_command(
+ OUTPUT ${expanded_files_h}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+ ${root} ${root}.h.t ${ARGN}
+ )
+
+ #install rules for the generated headers
+ list(APPEND generated_includes ${expanded_files_h})
+
+endmacro(GR_EXPAND_X_H)
+
+macro(GR_EXPAND_X_CC_H component root)
+
+ include(GrPython)
+
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+"#!${PYTHON_EXECUTABLE}
+
+import sys, os, re
+sys.path.append('${GR_RUNTIME_PYTHONPATH}')
+sys.path.append('${CMAKE_SOURCE_DIR}/python')
+os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
+os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
+
+if __name__ == '__main__':
+ import build_utils
+ root, inp = sys.argv[1:3]
+ for sig in sys.argv[3:]:
+ name = re.sub ('X+', sig, root)
+ d = build_utils.standard_impl_dict2(name, sig, '${component}')
+ build_utils.expand_template(d, inp)
+")
+
+ #make a list of all the generated files
+ unset(expanded_files_cc)
+ unset(expanded_files_h)
+ foreach(sig ${ARGN})
+ string(REGEX REPLACE "X+" ${sig} name ${root})
+ list(APPEND expanded_files_cc ${CMAKE_CURRENT_BINARY_DIR}/${name}.cc)
+ list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h)
+ endforeach(sig)
+ unset(name)
+
+ #create a command to generate the source files
+ add_custom_command(
+ OUTPUT ${expanded_files_cc}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.cc.t
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+ ${root} ${root}.cc.t ${ARGN}
+ )
+
+ #create a command to generate the header files
+ add_custom_command(
+ OUTPUT ${expanded_files_h}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+ ${root} ${root}.h.t ${ARGN}
+ )
+
+ #make source files depends on headers to force generation
+ set_source_files_properties(${expanded_files_cc}
+ PROPERTIES OBJECT_DEPENDS "${expanded_files_h}"
+ )
+
+ #install rules for the generated files
+ list(APPEND generated_sources ${expanded_files_cc})
+ list(APPEND generated_headers ${expanded_files_h})
+
+endmacro(GR_EXPAND_X_CC_H)
+
+macro(GR_EXPAND_X_CC_H_IMPL component root)
+
+ include(GrPython)
+
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+"#!${PYTHON_EXECUTABLE}
+
+import sys, os, re
+sys.path.append('${GR_RUNTIME_PYTHONPATH}')
+sys.path.append('${CMAKE_SOURCE_DIR}/python')
+os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}'
+os.chdir('${CMAKE_CURRENT_BINARY_DIR}')
+
+if __name__ == '__main__':
+ import build_utils
+ root, inp = sys.argv[1:3]
+ for sig in sys.argv[3:]:
+ name = re.sub ('X+', sig, root)
+ d = build_utils.standard_dict(name, sig, '${component}')
+ build_utils.expand_template(d, inp, '_impl')
+")
+
+ #make a list of all the generated files
+ unset(expanded_files_cc_impl)
+ unset(expanded_files_h_impl)
+ unset(expanded_files_h)
+ foreach(sig ${ARGN})
+ string(REGEX REPLACE "X+" ${sig} name ${root})
+ list(APPEND expanded_files_cc_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.cc)
+ list(APPEND expanded_files_h_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.h)
+ list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/../include/gnuradio/${component}/${name}.h)
+ endforeach(sig)
+ unset(name)
+
+ #create a command to generate the _impl.cc files
+ add_custom_command(
+ OUTPUT ${expanded_files_cc_impl}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.cc.t
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+ ${root} ${root}_impl.cc.t ${ARGN}
+ )
+
+ #create a command to generate the _impl.h files
+ add_custom_command(
+ OUTPUT ${expanded_files_h_impl}
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.h.t
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py
+ ${root} ${root}_impl.h.t ${ARGN}
+ )
+
+ #make _impl.cc source files depend on _impl.h to force generation
+ set_source_files_properties(${expanded_files_cc_impl}
+ PROPERTIES OBJECT_DEPENDS "${expanded_files_h_impl}"
+ )
+
+ #make _impl.h source files depend on headers to force generation
+ set_source_files_properties(${expanded_files_h_impl}
+ PROPERTIES OBJECT_DEPENDS "${expanded_files_h}"
+ )
+
+ #install rules for the generated files
+ list(APPEND generated_sources ${expanded_files_cc_impl})
+ list(APPEND generated_headers ${expanded_files_h_impl})
+
+endmacro(GR_EXPAND_X_CC_H_IMPL)
diff --git a/tools/gr-usrptest/cmake/Modules/GrPlatform.cmake b/tools/gr-usrptest/cmake/Modules/GrPlatform.cmake
new file mode 100644
index 000000000..fbbea5fee
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/GrPlatform.cmake
@@ -0,0 +1,54 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_PLATFORM_CMAKE TRUE)
+
+########################################################################
+# Setup additional defines for OS types
+########################################################################
+if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ set(LINUX TRUE)
+endif()
+
+if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/debian_version")
+ set(DEBIAN TRUE)
+endif()
+
+if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/redhat-release")
+ set(REDHAT TRUE)
+endif()
+
+if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/slackware-version")
+ set(SLACKWARE TRUE)
+endif()
+
+########################################################################
+# when the library suffix should be 64 (applies to redhat linux family)
+########################################################################
+if (REDHAT OR SLACKWARE)
+ set(LIB64_CONVENTION TRUE)
+endif()
+
+if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$")
+ set(LIB_SUFFIX 64)
+endif()
+set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix")
diff --git a/tools/gr-usrptest/cmake/Modules/GrPython.cmake b/tools/gr-usrptest/cmake/Modules/GrPython.cmake
new file mode 100644
index 000000000..06e061e21
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/GrPython.cmake
@@ -0,0 +1,241 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_PYTHON_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_PYTHON_CMAKE TRUE)
+
+########################################################################
+# Setup the python interpreter:
+# This allows the user to specify a specific interpreter,
+# or finds the interpreter via the built-in cmake module.
+########################################################################
+#this allows the user to override PYTHON_EXECUTABLE
+if(PYTHON_EXECUTABLE)
+
+ set(PYTHONINTERP_FOUND TRUE)
+
+#otherwise if not set, try to automatically find it
+else(PYTHON_EXECUTABLE)
+
+ #use the built-in find script
+ find_package(PythonInterp 2)
+
+ #and if that fails use the find program routine
+ if(NOT PYTHONINTERP_FOUND)
+ find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5)
+ if(PYTHON_EXECUTABLE)
+ set(PYTHONINTERP_FOUND TRUE)
+ endif(PYTHON_EXECUTABLE)
+ endif(NOT PYTHONINTERP_FOUND)
+
+endif(PYTHON_EXECUTABLE)
+
+if (CMAKE_CROSSCOMPILING)
+ set(QA_PYTHON_EXECUTABLE "/usr/bin/python")
+else (CMAKE_CROSSCOMPILING)
+ set(QA_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE})
+endif(CMAKE_CROSSCOMPILING)
+
+#make the path to the executable appear in the cmake gui
+set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter")
+set(QA_PYTHON_EXECUTABLE ${QA_PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter for QA tests")
+
+#make sure we can use -B with python (introduced in 2.6)
+if(PYTHON_EXECUTABLE)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -B -c ""
+ OUTPUT_QUIET ERROR_QUIET
+ RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT
+ )
+ if(PYTHON_HAS_DASH_B_RESULT EQUAL 0)
+ set(PYTHON_DASH_B "-B")
+ endif()
+endif(PYTHON_EXECUTABLE)
+
+########################################################################
+# Check for the existence of a python module:
+# - desc a string description of the check
+# - mod the name of the module to import
+# - cmd an additional command to run
+# - have the result variable to set
+########################################################################
+macro(GR_PYTHON_CHECK_MODULE desc mod cmd have)
+ message(STATUS "")
+ message(STATUS "Python checking for ${desc}")
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "
+#########################################
+try:
+ import ${mod}
+ assert ${cmd}
+except ImportError, AssertionError: exit(-1)
+except: pass
+#########################################"
+ RESULT_VARIABLE ${have}
+ )
+ if(${have} EQUAL 0)
+ message(STATUS "Python checking for ${desc} - found")
+ set(${have} TRUE)
+ else(${have} EQUAL 0)
+ message(STATUS "Python checking for ${desc} - not found")
+ set(${have} FALSE)
+ endif(${have} EQUAL 0)
+endmacro(GR_PYTHON_CHECK_MODULE)
+
+########################################################################
+# Sets the python installation directory GR_PYTHON_DIR
+########################################################################
+if(NOT DEFINED GR_PYTHON_DIR)
+execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "
+from distutils import sysconfig
+print sysconfig.get_python_lib(plat_specific=True, prefix='')
+" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+endif()
+file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR)
+
+########################################################################
+# Create an always-built target with a unique name
+# Usage: GR_UNIQUE_TARGET(<description> <dependencies list>)
+########################################################################
+function(GR_UNIQUE_TARGET desc)
+ file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
+unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
+print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))"
+ OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE)
+ add_custom_target(${_target} ALL DEPENDS ${ARGN})
+endfunction(GR_UNIQUE_TARGET)
+
+########################################################################
+# Install python sources (also builds and installs byte-compiled python)
+########################################################################
+function(GR_PYTHON_INSTALL)
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN})
+
+ ####################################################################
+ if(GR_PYTHON_INSTALL_FILES)
+ ####################################################################
+ install(${ARGN}) #installs regular python files
+
+ #create a list of all generated files
+ unset(pysrcfiles)
+ unset(pycfiles)
+ unset(pyofiles)
+ foreach(pyfile ${GR_PYTHON_INSTALL_FILES})
+ get_filename_component(pyfile ${pyfile} ABSOLUTE)
+ list(APPEND pysrcfiles ${pyfile})
+
+ #determine if this file is in the source or binary directory
+ file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile})
+ string(LENGTH "${source_rel_path}" source_rel_path_len)
+ file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile})
+ string(LENGTH "${binary_rel_path}" binary_rel_path_len)
+
+ #and set the generated path appropriately
+ if(${source_rel_path_len} GREATER ${binary_rel_path_len})
+ set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path})
+ else()
+ set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path})
+ endif()
+ list(APPEND pycfiles ${pygenfile}c)
+ list(APPEND pyofiles ${pygenfile}o)
+
+ #ensure generation path exists
+ get_filename_component(pygen_path ${pygenfile} PATH)
+ file(MAKE_DIRECTORY ${pygen_path})
+
+ endforeach(pyfile)
+
+ #the command to generate the pyc files
+ add_custom_command(
+ DEPENDS ${pysrcfiles} OUTPUT ${pycfiles}
+ COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles}
+ )
+
+ #the command to generate the pyo files
+ add_custom_command(
+ DEPENDS ${pysrcfiles} OUTPUT ${pyofiles}
+ COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles}
+ )
+
+ #create install rule and add generated files to target list
+ set(python_install_gen_targets ${pycfiles} ${pyofiles})
+ install(FILES ${python_install_gen_targets}
+ DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
+ COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
+ )
+
+ ####################################################################
+ elseif(GR_PYTHON_INSTALL_PROGRAMS)
+ ####################################################################
+ file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native)
+
+ if (CMAKE_CROSSCOMPILING)
+ set(pyexe_native "/usr/bin/env python")
+ endif()
+
+ foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS})
+ get_filename_component(pyfile_name ${pyfile} NAME)
+ get_filename_component(pyfile ${pyfile} ABSOLUTE)
+ string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe")
+ list(APPEND python_install_gen_targets ${pyexefile})
+
+ get_filename_component(pyexefile_path ${pyexefile} PATH)
+ file(MAKE_DIRECTORY ${pyexefile_path})
+
+ add_custom_command(
+ OUTPUT ${pyexefile} DEPENDS ${pyfile}
+ COMMAND ${PYTHON_EXECUTABLE} -c
+ "import re; R=re.compile('^\#!.*$\\n',flags=re.MULTILINE); open('${pyexefile}','w').write('\#!${pyexe_native}\\n'+R.sub('',open('${pyfile}','r').read()))"
+ COMMENT "Shebangin ${pyfile_name}"
+ VERBATIM
+ )
+
+ #on windows, python files need an extension to execute
+ get_filename_component(pyfile_ext ${pyfile} EXT)
+ if(WIN32 AND NOT pyfile_ext)
+ set(pyfile_name "${pyfile_name}.py")
+ endif()
+
+ install(PROGRAMS ${pyexefile} RENAME ${pyfile_name}
+ DESTINATION ${GR_PYTHON_INSTALL_DESTINATION}
+ COMPONENT ${GR_PYTHON_INSTALL_COMPONENT}
+ )
+ endforeach(pyfile)
+
+ endif()
+
+ GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets})
+
+endfunction(GR_PYTHON_INSTALL)
+
+########################################################################
+# Write the python helper script that generates byte code files
+########################################################################
+file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py "
+import sys, py_compile
+files = sys.argv[1:]
+srcs, gens = files[:len(files)/2], files[len(files)/2:]
+for src, gen in zip(srcs, gens):
+ py_compile.compile(file=src, cfile=gen, doraise=True)
+")
diff --git a/tools/gr-usrptest/cmake/Modules/GrSwig.cmake b/tools/gr-usrptest/cmake/Modules/GrSwig.cmake
new file mode 100644
index 000000000..abf4dc461
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/GrSwig.cmake
@@ -0,0 +1,251 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_SWIG_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_SWIG_CMAKE TRUE)
+
+include(GrPython)
+
+########################################################################
+# Builds a swig documentation file to be generated into python docstrings
+# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....)
+#
+# Set the following variable to specify extra dependent targets:
+# - GR_SWIG_DOCS_SOURCE_DEPS
+# - GR_SWIG_DOCS_TARGET_DEPS
+########################################################################
+function(GR_SWIG_MAKE_DOCS output_file)
+ if(ENABLE_DOXYGEN)
+
+ #setup the input files variable list, quote formated
+ set(input_files)
+ unset(INPUT_PATHS)
+ foreach(input_path ${ARGN})
+ if(IS_DIRECTORY ${input_path}) #when input path is a directory
+ file(GLOB input_path_h_files ${input_path}/*.h)
+ else() #otherwise its just a file, no glob
+ set(input_path_h_files ${input_path})
+ endif()
+ list(APPEND input_files ${input_path_h_files})
+ set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"")
+ endforeach(input_path)
+
+ #determine the output directory
+ get_filename_component(name ${output_file} NAME_WE)
+ get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH)
+ set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs)
+ make_directory(${OUTPUT_DIRECTORY})
+
+ #generate the Doxyfile used by doxygen
+ configure_file(
+ ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in
+ ${OUTPUT_DIRECTORY}/Doxyfile
+ @ONLY)
+
+ #Create a dummy custom command that depends on other targets
+ include(GrMiscUtils)
+ GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS})
+
+ #call doxygen on the Doxyfile + input headers
+ add_custom_command(
+ OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml
+ DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile
+ COMMENT "Generating doxygen xml for ${name} docs"
+ )
+
+ #call the swig_doc script on the xml files
+ add_custom_command(
+ OUTPUT ${output_file}
+ DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml
+ COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B}
+ ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py
+ ${OUTPUT_DIRECTORY}/xml
+ ${output_file}
+ COMMENT "Generating python docstrings for ${name}"
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen
+ )
+
+ else(ENABLE_DOXYGEN)
+ file(WRITE ${output_file} "\n") #no doxygen -> empty file
+ endif(ENABLE_DOXYGEN)
+endfunction(GR_SWIG_MAKE_DOCS)
+
+########################################################################
+# Build a swig target for the common gnuradio use case. Usage:
+# GR_SWIG_MAKE(target ifile ifile ifile...)
+#
+# Set the following variables before calling:
+# - GR_SWIG_FLAGS
+# - GR_SWIG_INCLUDE_DIRS
+# - GR_SWIG_LIBRARIES
+# - GR_SWIG_SOURCE_DEPS
+# - GR_SWIG_TARGET_DEPS
+# - GR_SWIG_DOC_FILE
+# - GR_SWIG_DOC_DIRS
+########################################################################
+macro(GR_SWIG_MAKE name)
+ set(ifiles ${ARGN})
+
+ # Shimming this in here to take care of a SWIG bug with handling
+ # vector<size_t> and vector<unsigned int> (on 32-bit machines) and
+ # vector<long unsigned int> (on 64-bit machines). Use this to test
+ # the size of size_t, then set SIZE_T_32 if it's a 32-bit machine
+ # or not if it's 64-bit. The logic in gr_type.i handles the rest.
+ INCLUDE(CheckTypeSize)
+ CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T)
+ CHECK_TYPE_SIZE("unsigned int" SIZEOF_UINT)
+ if(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT})
+ list(APPEND GR_SWIG_FLAGS -DSIZE_T_32)
+ endif(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT})
+
+ #do swig doc generation if specified
+ if(GR_SWIG_DOC_FILE)
+ set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS})
+ list(APPEND GR_SWIG_DOCS_TARGET_DEPS ${GR_SWIG_TARGET_DEPS})
+ GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS})
+ add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE})
+ list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc ${GR_RUNTIME_SWIG_DOC_FILE})
+ endif()
+
+ #append additional include directories
+ find_package(PythonLibs 2)
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs)
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS})
+
+ #prepend local swig directories
+ list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_SOURCE_DIR})
+ list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_BINARY_DIR})
+
+ #determine include dependencies for swig file
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE}
+ ${CMAKE_BINARY_DIR}/get_swig_deps.py
+ "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}"
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ )
+
+ #Create a dummy custom command that depends on other targets
+ include(GrMiscUtils)
+ GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS})
+ set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag)
+ add_custom_command(
+ OUTPUT ${tag_file}
+ DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps}
+ COMMAND ${CMAKE_COMMAND} -E touch ${tag_file}
+ )
+
+ #append the specified include directories
+ include_directories(${GR_SWIG_INCLUDE_DIRS})
+ list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file})
+
+ #setup the swig flags with flags and include directories
+ set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS})
+ foreach(dir ${GR_SWIG_INCLUDE_DIRS})
+ list(APPEND CMAKE_SWIG_FLAGS "-I${dir}")
+ endforeach(dir)
+
+ #set the C++ property on the swig .i file so it builds
+ set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON)
+
+ #setup the actual swig library target to be built
+ include(UseSWIG)
+ SWIG_ADD_MODULE(${name} python ${ifiles})
+ SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES})
+ if(${name} STREQUAL "runtime_swig")
+ SET_TARGET_PROPERTIES(${SWIG_MODULE_runtime_swig_REAL_NAME} PROPERTIES DEFINE_SYMBOL "gnuradio_runtime_EXPORTS")
+ endif(${name} STREQUAL "runtime_swig")
+
+endmacro(GR_SWIG_MAKE)
+
+########################################################################
+# Install swig targets generated by GR_SWIG_MAKE. Usage:
+# GR_SWIG_INSTALL(
+# TARGETS target target target...
+# [DESTINATION destination]
+# [COMPONENT component]
+# )
+########################################################################
+macro(GR_SWIG_INSTALL)
+
+ include(CMakeParseArgumentsCopy)
+ CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN})
+
+ foreach(name ${GR_SWIG_INSTALL_TARGETS})
+ install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME}
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
+ )
+
+ include(GrPython)
+ GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ COMPONENT ${GR_SWIG_INSTALL_COMPONENT}
+ )
+
+ GR_LIBTOOL(
+ TARGET ${SWIG_MODULE_${name}_REAL_NAME}
+ DESTINATION ${GR_SWIG_INSTALL_DESTINATION}
+ )
+
+ endforeach(name)
+
+endmacro(GR_SWIG_INSTALL)
+
+########################################################################
+# Generate a python file that can determine swig dependencies.
+# Used by the make macro above to determine extra dependencies.
+# When you build C++, CMake figures out the header dependencies.
+# This code essentially performs that logic for swig includes.
+########################################################################
+file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py "
+
+import os, sys, re
+
+i_include_matcher = re.compile('%(include|import)\\s*[<|\"](.*)[>|\"]')
+h_include_matcher = re.compile('#(include)\\s*[<|\"](.*)[>|\"]')
+include_dirs = sys.argv[2].split(';')
+
+def get_swig_incs(file_path):
+ if file_path.endswith('.i'): matcher = i_include_matcher
+ else: matcher = h_include_matcher
+ file_contents = open(file_path, 'r').read()
+ return matcher.findall(file_contents, re.MULTILINE)
+
+def get_swig_deps(file_path, level):
+ deps = [file_path]
+ if level == 0: return deps
+ for keyword, inc_file in get_swig_incs(file_path):
+ for inc_dir in include_dirs:
+ inc_path = os.path.join(inc_dir, inc_file)
+ if not os.path.exists(inc_path): continue
+ deps.extend(get_swig_deps(inc_path, level-1))
+ break #found, we dont search in lower prio inc dirs
+ return deps
+
+if __name__ == '__main__':
+ ifiles = sys.argv[1].split(';')
+ deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], [])
+ #sys.stderr.write(';'.join(set(deps)) + '\\n\\n')
+ print(';'.join(set(deps)))
+")
diff --git a/tools/gr-usrptest/cmake/Modules/GrTest.cmake b/tools/gr-usrptest/cmake/Modules/GrTest.cmake
new file mode 100644
index 000000000..62caab4b5
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/GrTest.cmake
@@ -0,0 +1,143 @@
+# Copyright 2010-2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+if(DEFINED __INCLUDED_GR_TEST_CMAKE)
+ return()
+endif()
+set(__INCLUDED_GR_TEST_CMAKE TRUE)
+
+########################################################################
+# Add a unit test and setup the environment for a unit test.
+# Takes the same arguments as the ADD_TEST function.
+#
+# Before calling set the following variables:
+# GR_TEST_TARGET_DEPS - built targets for the library path
+# GR_TEST_LIBRARY_DIRS - directories for the library path
+# GR_TEST_PYTHON_DIRS - directories for the python path
+# GR_TEST_ENVIRONS - other environment key/value pairs
+########################################################################
+function(GR_ADD_TEST test_name)
+
+ #Ensure that the build exe also appears in the PATH.
+ list(APPEND GR_TEST_TARGET_DEPS ${ARGN})
+
+ #In the land of windows, all libraries must be in the PATH.
+ #Since the dependent libraries are not yet installed,
+ #we must manually set them in the PATH to run tests.
+ #The following appends the path of a target dependency.
+ foreach(target ${GR_TEST_TARGET_DEPS})
+ get_target_property(location ${target} LOCATION)
+ if(location)
+ get_filename_component(path ${location} PATH)
+ string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path})
+ list(APPEND GR_TEST_LIBRARY_DIRS ${path})
+ endif(location)
+ endforeach(target)
+
+ if(WIN32)
+ #SWIG generates the python library files into a subdirectory.
+ #Therefore, we must append this subdirectory into PYTHONPATH.
+ #Only do this for the python directories matching the following:
+ foreach(pydir ${GR_TEST_PYTHON_DIRS})
+ get_filename_component(name ${pydir} NAME)
+ if(name MATCHES "^(swig|lib|src)$")
+ list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE})
+ endif()
+ endforeach(pydir)
+ endif(WIN32)
+
+ file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir)
+ file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list?
+ file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list?
+
+ set(environs "VOLK_GENERIC=1" "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}")
+ list(APPEND environs ${GR_TEST_ENVIRONS})
+
+ #http://www.cmake.org/pipermail/cmake/2009-May/029464.html
+ #Replaced this add test + set environs code with the shell script generation.
+ #Its nicer to be able to manually run the shell script to diagnose problems.
+ #ADD_TEST(${ARGV})
+ #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}")
+
+ if(UNIX)
+ set(LD_PATH_VAR "LD_LIBRARY_PATH")
+ if(APPLE)
+ set(LD_PATH_VAR "DYLD_LIBRARY_PATH")
+ endif()
+
+ set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH")
+ list(APPEND libpath "$${LD_PATH_VAR}")
+ list(APPEND pypath "$PYTHONPATH")
+
+ #replace list separator with the path separator
+ string(REPLACE ";" ":" libpath "${libpath}")
+ string(REPLACE ";" ":" pypath "${pypath}")
+ list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}")
+
+ #generate a bat file that sets the environment and runs the test
+ if (CMAKE_CROSSCOMPILING)
+ set(SHELL "/bin/sh")
+ else(CMAKE_CROSSCOMPILING)
+ find_program(SHELL sh)
+ endif(CMAKE_CROSSCOMPILING)
+ set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh)
+ file(WRITE ${sh_file} "#!${SHELL}\n")
+ #each line sets an environment variable
+ foreach(environ ${environs})
+ file(APPEND ${sh_file} "export ${environ}\n")
+ endforeach(environ)
+ #load the command to run with its arguments
+ foreach(arg ${ARGN})
+ file(APPEND ${sh_file} "${arg} ")
+ endforeach(arg)
+ file(APPEND ${sh_file} "\n")
+
+ #make the shell file executable
+ execute_process(COMMAND chmod +x ${sh_file})
+
+ add_test(${test_name} ${SHELL} ${sh_file})
+
+ endif(UNIX)
+
+ if(WIN32)
+ list(APPEND libpath ${DLL_PATHS} "%PATH%")
+ list(APPEND pypath "%PYTHONPATH%")
+
+ #replace list separator with the path separator (escaped)
+ string(REPLACE ";" "\\;" libpath "${libpath}")
+ string(REPLACE ";" "\\;" pypath "${pypath}")
+ list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}")
+
+ #generate a bat file that sets the environment and runs the test
+ set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat)
+ file(WRITE ${bat_file} "@echo off\n")
+ #each line sets an environment variable
+ foreach(environ ${environs})
+ file(APPEND ${bat_file} "SET ${environ}\n")
+ endforeach(environ)
+ #load the command to run with its arguments
+ foreach(arg ${ARGN})
+ file(APPEND ${bat_file} "${arg} ")
+ endforeach(arg)
+ file(APPEND ${bat_file} "\n")
+
+ add_test(${test_name} ${bat_file})
+ endif(WIN32)
+
+endfunction(GR_ADD_TEST)
diff --git a/tools/gr-usrptest/cmake/Modules/UseSWIG.cmake b/tools/gr-usrptest/cmake/Modules/UseSWIG.cmake
new file mode 100644
index 000000000..c0f172870
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/UseSWIG.cmake
@@ -0,0 +1,304 @@
+# - SWIG module for CMake
+# Defines the following macros:
+# SWIG_ADD_MODULE(name language [ files ])
+# - Define swig module with given name and specified language
+# SWIG_LINK_LIBRARIES(name [ libraries ])
+# - Link libraries to swig module
+# All other macros are for internal use only.
+# To get the actual name of the swig module,
+# use: ${SWIG_MODULE_${name}_REAL_NAME}.
+# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify
+# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add
+# special flags to all swig calls.
+# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify
+# where to write all the swig generated module (swig -outdir option)
+# The name-specific variable SWIG_MODULE_<name>_EXTRA_DEPS may be used
+# to specify extra dependencies for the generated modules.
+# If the source file generated by swig need some special flag you can use
+# set_source_files_properties( ${swig_generated_file_fullname}
+# PROPERTIES COMPILE_FLAGS "-bla")
+
+
+#=============================================================================
+# Copyright 2004-2009 Kitware, Inc.
+# Copyright 2009 Mathieu Malaterre <mathieu.malaterre@gmail.com>
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+set(SWIG_CXX_EXTENSION "cxx")
+set(SWIG_EXTRA_LIBRARIES "")
+
+set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py")
+
+#
+# For given swig module initialize variables associated with it
+#
+macro(SWIG_MODULE_INITIALIZE name language)
+ string(TOUPPER "${language}" swig_uppercase_language)
+ string(TOLOWER "${language}" swig_lowercase_language)
+ set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}")
+ set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}")
+
+ set(SWIG_MODULE_${name}_REAL_NAME "${name}")
+ if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN")
+ message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found")
+ elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON")
+ # when swig is used without the -interface it will produce in the module.py
+ # a 'import _modulename' statement, which implies having a corresponding
+ # _modulename.so (*NIX), _modulename.pyd (Win32).
+ set(SWIG_MODULE_${name}_REAL_NAME "_${name}")
+ elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL")
+ set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow")
+ endif()
+endmacro()
+
+#
+# For a given language, input file, and output file, determine extra files that
+# will be generated. This is internal swig macro.
+#
+
+macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile)
+ set(${outfiles} "")
+ get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename
+ ${infile} SWIG_MODULE_NAME)
+ if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND")
+ get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE)
+ endif()
+ foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION})
+ set(${outfiles} ${${outfiles}}
+ "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}")
+ endforeach()
+endmacro()
+
+#
+# Take swig (*.i) file and add proper custom commands for it
+#
+macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile)
+ set(swig_full_infile ${infile})
+ get_filename_component(swig_source_file_path "${infile}" PATH)
+ get_filename_component(swig_source_file_name_we "${infile}" NAME_WE)
+ get_source_file_property(swig_source_file_generated ${infile} GENERATED)
+ get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS)
+ get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS)
+ if("${swig_source_file_flags}" STREQUAL "NOTFOUND")
+ set(swig_source_file_flags "")
+ endif()
+ set(swig_source_file_fullname "${infile}")
+ if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}")
+ string(REGEX REPLACE
+ "^${CMAKE_CURRENT_SOURCE_DIR}" ""
+ swig_source_file_relative_path
+ "${swig_source_file_path}")
+ else()
+ if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}")
+ string(REGEX REPLACE
+ "^${CMAKE_CURRENT_BINARY_DIR}" ""
+ swig_source_file_relative_path
+ "${swig_source_file_path}")
+ set(swig_source_file_generated 1)
+ else()
+ set(swig_source_file_relative_path "${swig_source_file_path}")
+ if(swig_source_file_generated)
+ set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}")
+ else()
+ set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}")
+ endif()
+ endif()
+ endif()
+
+ set(swig_generated_file_fullname
+ "${CMAKE_CURRENT_BINARY_DIR}")
+ if(swig_source_file_relative_path)
+ set(swig_generated_file_fullname
+ "${swig_generated_file_fullname}/${swig_source_file_relative_path}")
+ endif()
+ # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir
+ if(CMAKE_SWIG_OUTDIR)
+ set(swig_outdir ${CMAKE_SWIG_OUTDIR})
+ else()
+ set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR})
+ endif()
+ SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE}
+ swig_extra_generated_files
+ "${swig_outdir}"
+ "${infile}")
+ set(swig_generated_file_fullname
+ "${swig_generated_file_fullname}/${swig_source_file_name_we}")
+ # add the language into the name of the file (i.e. TCL_wrap)
+ # this allows for the same .i file to be wrapped into different languages
+ set(swig_generated_file_fullname
+ "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap")
+
+ if(swig_source_file_cplusplus)
+ set(swig_generated_file_fullname
+ "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}")
+ else()
+ set(swig_generated_file_fullname
+ "${swig_generated_file_fullname}.c")
+ endif()
+
+ # Shut up some warnings from poor SWIG code generation that we
+ # can do nothing about, when this flag is available
+ include(CheckCXXCompilerFlag)
+ check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
+ if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
+ set_source_files_properties(${swig_generated_file_fullname}
+ PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable")
+ endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE)
+
+ get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES)
+ set(swig_include_dirs)
+ foreach(it ${cmake_include_directories})
+ set(swig_include_dirs ${swig_include_dirs} "-I${it}")
+ endforeach()
+
+ set(swig_special_flags)
+ # default is c, so add c++ flag if it is c++
+ if(swig_source_file_cplusplus)
+ set(swig_special_flags ${swig_special_flags} "-c++")
+ endif()
+ set(swig_extra_flags)
+ if(SWIG_MODULE_${name}_EXTRA_FLAGS)
+ set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS})
+ endif()
+
+ # hack to work around CMake bug in add_custom_command with multiple OUTPUT files
+
+ file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib
+unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5]
+print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))"
+ OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+
+ file(
+ WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in
+ "int main(void){return 0;}\n"
+ )
+
+ # create dummy dependencies
+ add_custom_command(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp
+ DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS}
+ COMMENT ""
+ )
+
+ # create the dummy target
+ add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp)
+
+ # add a custom command to the dummy target
+ add_custom_command(
+ TARGET ${_target}
+ # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir)
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir}
+ COMMAND "${SWIG_EXECUTABLE}"
+ ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}"
+ ${swig_source_file_flags}
+ ${CMAKE_SWIG_FLAGS}
+ -outdir ${swig_outdir}
+ ${swig_special_flags}
+ ${swig_extra_flags}
+ ${swig_include_dirs}
+ -o "${swig_generated_file_fullname}"
+ "${swig_source_file_fullname}"
+ COMMENT "Swig source"
+ )
+
+ #add dummy independent dependencies from the _target to each file
+ #that will be generated by the SWIG command above
+
+ set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files})
+
+ foreach(swig_gen_file ${${outfiles}})
+ add_custom_command(
+ OUTPUT ${swig_gen_file}
+ COMMAND ""
+ DEPENDS ${_target}
+ COMMENT ""
+ )
+ endforeach()
+
+ set_source_files_properties(
+ ${outfiles} PROPERTIES GENERATED 1
+ )
+
+endmacro()
+
+#
+# Create Swig module
+#
+macro(SWIG_ADD_MODULE name language)
+ SWIG_MODULE_INITIALIZE(${name} ${language})
+ set(swig_dot_i_sources)
+ set(swig_other_sources)
+ foreach(it ${ARGN})
+ if(${it} MATCHES ".*\\.i$")
+ set(swig_dot_i_sources ${swig_dot_i_sources} "${it}")
+ else()
+ set(swig_other_sources ${swig_other_sources} "${it}")
+ endif()
+ endforeach()
+
+ set(swig_generated_sources)
+ foreach(it ${swig_dot_i_sources})
+ SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it})
+ set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}")
+ endforeach()
+ get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES)
+ set_directory_properties(PROPERTIES
+ ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}")
+ add_library(${SWIG_MODULE_${name}_REAL_NAME}
+ MODULE
+ ${swig_generated_sources}
+ ${swig_other_sources})
+ string(TOLOWER "${language}" swig_lowercase_language)
+ if ("${swig_lowercase_language}" STREQUAL "java")
+ if (APPLE)
+ # In java you want:
+ # System.loadLibrary("LIBRARY");
+ # then JNI will look for a library whose name is platform dependent, namely
+ # MacOS : libLIBRARY.jnilib
+ # Windows: LIBRARY.dll
+ # Linux : libLIBRARY.so
+ set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib")
+ endif ()
+ endif ()
+ if ("${swig_lowercase_language}" STREQUAL "python")
+ # this is only needed for the python case where a _modulename.so is generated
+ set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "")
+ # Python extension modules on Windows must have the extension ".pyd"
+ # instead of ".dll" as of Python 2.5. Older python versions do support
+ # this suffix.
+ # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000
+ # <quote>
+ # Windows: .dll is no longer supported as a filename extension for extension modules.
+ # .pyd is now the only filename extension that will be searched for.
+ # </quote>
+ if(WIN32 AND NOT CYGWIN)
+ set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd")
+ endif()
+ endif ()
+endmacro()
+
+#
+# Like TARGET_LINK_LIBRARIES but for swig modules
+#
+macro(SWIG_LINK_LIBRARIES name)
+ if(SWIG_MODULE_${name}_REAL_NAME)
+ target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN})
+ else()
+ message(SEND_ERROR "Cannot find Swig library \"${name}\".")
+ endif()
+endmacro()
diff --git a/tools/gr-usrptest/cmake/Modules/usrptestConfig.cmake b/tools/gr-usrptest/cmake/Modules/usrptestConfig.cmake
new file mode 100644
index 000000000..5244d466e
--- /dev/null
+++ b/tools/gr-usrptest/cmake/Modules/usrptestConfig.cmake
@@ -0,0 +1,30 @@
+INCLUDE(FindPkgConfig)
+PKG_CHECK_MODULES(PC_USRPTEST usrptest)
+
+FIND_PATH(
+ USRPTEST_INCLUDE_DIRS
+ NAMES usrptest/api.h
+ HINTS $ENV{USRPTEST_DIR}/include
+ ${PC_USRPTEST_INCLUDEDIR}
+ PATHS ${CMAKE_INSTALL_PREFIX}/include
+ /usr/local/include
+ /usr/include
+)
+
+FIND_LIBRARY(
+ USRPTEST_LIBRARIES
+ NAMES gnuradio-usrptest
+ HINTS $ENV{USRPTEST_DIR}/lib
+ ${PC_USRPTEST_LIBDIR}
+ PATHS ${CMAKE_INSTALL_PREFIX}/lib
+ ${CMAKE_INSTALL_PREFIX}/lib64
+ /usr/local/lib
+ /usr/local/lib64
+ /usr/lib
+ /usr/lib64
+)
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(USRPTEST DEFAULT_MSG USRPTEST_LIBRARIES USRPTEST_INCLUDE_DIRS)
+MARK_AS_ADVANCED(USRPTEST_LIBRARIES USRPTEST_INCLUDE_DIRS)
+
diff --git a/tools/gr-usrptest/cmake/cmake_uninstall.cmake.in b/tools/gr-usrptest/cmake/cmake_uninstall.cmake.in
new file mode 100644
index 000000000..9ae1ae4bd
--- /dev/null
+++ b/tools/gr-usrptest/cmake/cmake_uninstall.cmake.in
@@ -0,0 +1,32 @@
+# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F
+
+IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
+ MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"")
+ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
+
+FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
+STRING(REGEX REPLACE "\n" ";" files "${files}")
+FOREACH(file ${files})
+ MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"")
+ IF(EXISTS "$ENV{DESTDIR}${file}")
+ EXEC_PROGRAM(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ IF(NOT "${rm_retval}" STREQUAL 0)
+ MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
+ ENDIF(NOT "${rm_retval}" STREQUAL 0)
+ ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}")
+ EXEC_PROGRAM(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ IF(NOT "${rm_retval}" STREQUAL 0)
+ MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
+ ENDIF(NOT "${rm_retval}" STREQUAL 0)
+ ELSE(EXISTS "$ENV{DESTDIR}${file}")
+ MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
+ ENDIF(EXISTS "$ENV{DESTDIR}${file}")
+ENDFOREACH(file)
diff --git a/tools/gr-usrptest/docs/CMakeLists.txt b/tools/gr-usrptest/docs/CMakeLists.txt
new file mode 100644
index 000000000..f16fbf6db
--- /dev/null
+++ b/tools/gr-usrptest/docs/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Setup dependencies
+########################################################################
+find_package(Doxygen)
+
+########################################################################
+# Begin conditional configuration
+########################################################################
+if(ENABLE_DOXYGEN)
+
+########################################################################
+# Add subdirectories
+########################################################################
+add_subdirectory(doxygen)
+
+endif(ENABLE_DOXYGEN)
diff --git a/tools/gr-usrptest/docs/README.usrptest b/tools/gr-usrptest/docs/README.usrptest
new file mode 100644
index 000000000..512e3334b
--- /dev/null
+++ b/tools/gr-usrptest/docs/README.usrptest
@@ -0,0 +1,11 @@
+This is the usrptest-write-a-block package meant as a guide to building
+out-of-tree packages. To use the usrptest blocks, the Python namespaces
+is in 'usrptest', which is imported as:
+
+ import usrptest
+
+See the Doxygen documentation for details about the blocks available
+in this package. A quick listing of the details can be found in Python
+after importing by using:
+
+ help(usrptest)
diff --git a/tools/gr-usrptest/docs/doxygen/CMakeLists.txt b/tools/gr-usrptest/docs/doxygen/CMakeLists.txt
new file mode 100644
index 000000000..1b4479929
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/CMakeLists.txt
@@ -0,0 +1,52 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Create the doxygen configuration file
+########################################################################
+file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir)
+file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir)
+file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir)
+file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir)
+
+set(HAVE_DOT ${DOXYGEN_DOT_FOUND})
+set(enable_html_docs YES)
+set(enable_latex_docs NO)
+set(enable_xml_docs YES)
+
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
+ ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+@ONLY)
+
+set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html)
+
+########################################################################
+# Make and install doxygen docs
+########################################################################
+add_custom_command(
+ OUTPUT ${BUILT_DIRS}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Generating documentation with doxygen"
+)
+
+add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS})
+
+install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR})
diff --git a/tools/gr-usrptest/docs/doxygen/Doxyfile.in b/tools/gr-usrptest/docs/doxygen/Doxyfile.in
new file mode 100644
index 000000000..2becf4b28
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/Doxyfile.in
@@ -0,0 +1,1922 @@
+# Doxyfile 1.8.4
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed
+# in front of the TAG it is preceding .
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
+
+PROJECT_NAME = "GNU Radio's USRPTEST Package"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
+# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
+# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = YES
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
+# itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields or simple typedef fields will be shown
+# inline in the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO (the default), structs, classes, and unions are shown on a separate
+# page (for HTML and Man pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can
+# be an expensive process and often the same symbol appear multiple times in
+# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
+# small doxygen will become slower. If the cache is too large, memory is wasted.
+# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
+# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
+# symbols.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = NO
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = NO
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = NO
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= NO
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if section-label ... \endif
+# and \cond section-label ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path. Do not use
+# file names with spaces, bibtex cannot handle them.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text "
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @top_srcdir@ \
+ @top_builddir@
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.h \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE = @abs_top_builddir@/docs/doxygen/html \
+ @abs_top_builddir@/docs/doxygen/xml \
+ @abs_top_builddir@/docs/doxygen/other/doxypy.py \
+ @abs_top_builddir@/_CPack_Packages \
+ @abs_top_srcdir@/cmake
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.deps/* \
+ */.libs/* \
+ */.svn/* \
+ */CVS/* \
+ */__init__.py \
+ */qa_*.cc \
+ */qa_*.h \
+ */qa_*.py
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS = ad9862 \
+ numpy \
+ *swig* \
+ *Swig* \
+ *my_top_block* \
+ *my_graph* \
+ *app_top_block* \
+ *am_rx_graph* \
+ *_queue_watcher_thread* \
+ *parse* \
+ *MyFrame* \
+ *MyApp* \
+ *PyObject* \
+ *wfm_rx_block* \
+ *_sptr* \
+ *debug* \
+ *wfm_rx_sca_block* \
+ *tv_rx_block* \
+ *wxapt_rx_block* \
+ *example_signal*
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be ignored.
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS = *.py=@top_srcdir@/doc/doxygen/other/doxypy.py
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = @enable_html_docs@
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = YES
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX = YES
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
+
+GENERATE_TREEVIEW = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 180
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
+# SVG. The default value is HTML-CSS, which is slower, but has the best
+# compatibility.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax.
+# However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
+# pieces of code that will be used on startup of the MathJax code.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript.
+# There are two flavours of web server based search depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools.
+# See the manual for details.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain
+# the search results. Doxygen ships with an example indexer (doxyindexer) and
+# search engine (doxysearch.cgi) which are based on the open source search
+# engine library Xapian. See the manual for configuration details.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will returned the search results when EXTERNAL_SEARCH is enabled.
+# Doxygen ships with an example search engine (doxysearch) which is based on
+# the open source search engine library Xapian. See the manual for configuration
+# details.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
+# of to a relative location where the documentation can be found.
+# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = @enable_latex_docs@
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4 will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
+# or other source files which should be copied to the LaTeX output directory.
+# Note that the files will be copied as-is; there are no commands or markers
+# available.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = @enable_xml_docs@
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
+# that can be used to generate PDF.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it. If left blank docbook will be used as the default path.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
+# in the related pages index. If set to NO, only the current project's
+# pages will be listed.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = @HAVE_DOT@
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = NO
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# manageable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
+
+INTERACTIVE_SVG = NO
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/tools/gr-usrptest/docs/doxygen/Doxyfile.swig_doc.in b/tools/gr-usrptest/docs/doxygen/Doxyfile.swig_doc.in
new file mode 100644
index 000000000..57736d7d0
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/Doxyfile.swig_doc.in
@@ -0,0 +1,1890 @@
+# Doxyfile 1.8.4
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed
+# in front of the TAG it is preceding .
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
+
+PROJECT_NAME = @CPACK_PACKAGE_NAME@
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
+# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
+# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
+# itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields or simple typedef fields will be shown
+# inline in the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO (the default), structs, classes, and unions are shown on a separate
+# page (for HTML and Man pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can
+# be an expensive process and often the same symbol appear multiple times in
+# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
+# small doxygen will become slower. If the cache is too large, memory is wasted.
+# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
+# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
+# symbols.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if section-label ... \endif
+# and \cond section-label ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path. Do not use
+# file names with spaces, bibtex cannot handle them.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @INPUT_PATHS@
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be ignored.
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = NO
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
+# SVG. The default value is HTML-CSS, which is slower, but has the best
+# compatibility.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax.
+# However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
+# pieces of code that will be used on startup of the MathJax code.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript.
+# There are two flavours of web server based search depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools.
+# See the manual for details.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain
+# the search results. Doxygen ships with an example indexer (doxyindexer) and
+# search engine (doxysearch.cgi) which are based on the open source search
+# engine library Xapian. See the manual for configuration details.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will returned the search results when EXTERNAL_SEARCH is enabled.
+# Doxygen ships with an example search engine (doxysearch) which is based on
+# the open source search engine library Xapian. See the manual for configuration
+# details.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
+# of to a relative location where the documentation can be found.
+# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4 will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
+# or other source files which should be copied to the LaTeX output directory.
+# Note that the files will be copied as-is; there are no commands or markers
+# available.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = YES
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
+# that can be used to generate PDF.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it. If left blank docbook will be used as the default path.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
+# in the related pages index. If set to NO, only the current project's
+# pages will be listed.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# manageable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
+
+INTERACTIVE_SVG = NO
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/__init__.py b/tools/gr-usrptest/docs/doxygen/doxyxml/__init__.py
new file mode 100644
index 000000000..5cd0b3c6c
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/__init__.py
@@ -0,0 +1,82 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Python interface to contents of doxygen xml documentation.
+
+Example use:
+See the contents of the example folder for the C++ and
+doxygen-generated xml used in this example.
+
+>>> # Parse the doxygen docs.
+>>> import os
+>>> this_dir = os.path.dirname(globals()['__file__'])
+>>> xml_path = this_dir + "/example/xml/"
+>>> di = DoxyIndex(xml_path)
+
+Get a list of all top-level objects.
+
+>>> print([mem.name() for mem in di.members()])
+[u'Aadvark', u'aadvarky_enough', u'main']
+
+Get all functions.
+
+>>> print([mem.name() for mem in di.in_category(DoxyFunction)])
+[u'aadvarky_enough', u'main']
+
+Check if an object is present.
+
+>>> di.has_member(u'Aadvark')
+True
+>>> di.has_member(u'Fish')
+False
+
+Get an item by name and check its properties.
+
+>>> aad = di.get_member(u'Aadvark')
+>>> print(aad.brief_description)
+Models the mammal Aadvark.
+>>> print(aad.detailed_description)
+Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.
+<BLANKLINE>
+This line is uninformative and is only to test line breaks in the comments.
+>>> [mem.name() for mem in aad.members()]
+[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness']
+>>> aad.get_member(u'print').brief_description
+u'Outputs the vital aadvark statistics.'
+
+"""
+
+from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
+
+def _test():
+ import os
+ this_dir = os.path.dirname(globals()['__file__'])
+ xml_path = this_dir + "/example/xml/"
+ di = DoxyIndex(xml_path)
+ # Get the Aadvark class
+ aad = di.get_member('Aadvark')
+ aad.brief_description
+ import doctest
+ return doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/base.py b/tools/gr-usrptest/docs/doxygen/doxyxml/base.py
new file mode 100644
index 000000000..e8f026ab9
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/base.py
@@ -0,0 +1,219 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+A base class is created.
+
+Classes based upon this are used to make more user-friendly interfaces
+to the doxygen xml docs than the generated classes provide.
+"""
+
+import os
+import pdb
+
+from xml.parsers.expat import ExpatError
+
+from generated import compound
+
+
+class Base(object):
+
+ class Duplicate(StandardError):
+ pass
+
+ class NoSuchMember(StandardError):
+ pass
+
+ class ParsingError(StandardError):
+ pass
+
+ def __init__(self, parse_data, top=None):
+ self._parsed = False
+ self._error = False
+ self._parse_data = parse_data
+ self._members = []
+ self._dict_members = {}
+ self._in_category = {}
+ self._data = {}
+ if top is not None:
+ self._xml_path = top._xml_path
+ # Set up holder of references
+ else:
+ top = self
+ self._refs = {}
+ self._xml_path = parse_data
+ self.top = top
+
+ @classmethod
+ def from_refid(cls, refid, top=None):
+ """ Instantiate class from a refid rather than parsing object. """
+ # First check to see if its already been instantiated.
+ if top is not None and refid in top._refs:
+ return top._refs[refid]
+ # Otherwise create a new instance and set refid.
+ inst = cls(None, top=top)
+ inst.refid = refid
+ inst.add_ref(inst)
+ return inst
+
+ @classmethod
+ def from_parse_data(cls, parse_data, top=None):
+ refid = getattr(parse_data, 'refid', None)
+ if refid is not None and top is not None and refid in top._refs:
+ return top._refs[refid]
+ inst = cls(parse_data, top=top)
+ if refid is not None:
+ inst.refid = refid
+ inst.add_ref(inst)
+ return inst
+
+ def add_ref(self, obj):
+ if hasattr(obj, 'refid'):
+ self.top._refs[obj.refid] = obj
+
+ mem_classes = []
+
+ def get_cls(self, mem):
+ for cls in self.mem_classes:
+ if cls.can_parse(mem):
+ return cls
+ raise StandardError(("Did not find a class for object '%s'." \
+ % (mem.get_name())))
+
+ def convert_mem(self, mem):
+ try:
+ cls = self.get_cls(mem)
+ converted = cls.from_parse_data(mem, self.top)
+ if converted is None:
+ raise StandardError('No class matched this object.')
+ self.add_ref(converted)
+ return converted
+ except StandardError, e:
+ print e
+
+ @classmethod
+ def includes(cls, inst):
+ return isinstance(inst, cls)
+
+ @classmethod
+ def can_parse(cls, obj):
+ return False
+
+ def _parse(self):
+ self._parsed = True
+
+ def _get_dict_members(self, cat=None):
+ """
+ For given category a dictionary is returned mapping member names to
+ members of that category. For names that are duplicated the name is
+ mapped to None.
+ """
+ self.confirm_no_error()
+ if cat not in self._dict_members:
+ new_dict = {}
+ for mem in self.in_category(cat):
+ if mem.name() not in new_dict:
+ new_dict[mem.name()] = mem
+ else:
+ new_dict[mem.name()] = self.Duplicate
+ self._dict_members[cat] = new_dict
+ return self._dict_members[cat]
+
+ def in_category(self, cat):
+ self.confirm_no_error()
+ if cat is None:
+ return self._members
+ if cat not in self._in_category:
+ self._in_category[cat] = [mem for mem in self._members
+ if cat.includes(mem)]
+ return self._in_category[cat]
+
+ def get_member(self, name, cat=None):
+ self.confirm_no_error()
+ # Check if it's in a namespace or class.
+ bits = name.split('::')
+ first = bits[0]
+ rest = '::'.join(bits[1:])
+ member = self._get_dict_members(cat).get(first, self.NoSuchMember)
+ # Raise any errors that are returned.
+ if member in set([self.NoSuchMember, self.Duplicate]):
+ raise member()
+ if rest:
+ return member.get_member(rest, cat=cat)
+ return member
+
+ def has_member(self, name, cat=None):
+ try:
+ mem = self.get_member(name, cat=cat)
+ return True
+ except self.NoSuchMember:
+ return False
+
+ def data(self):
+ self.confirm_no_error()
+ return self._data
+
+ def members(self):
+ self.confirm_no_error()
+ return self._members
+
+ def process_memberdefs(self):
+ mdtss = []
+ for sec in self._retrieved_data.compounddef.sectiondef:
+ mdtss += sec.memberdef
+ # At the moment we lose all information associated with sections.
+ # Sometimes a memberdef is in several sectiondef.
+ # We make sure we don't get duplicates here.
+ uniques = set([])
+ for mem in mdtss:
+ converted = self.convert_mem(mem)
+ pair = (mem.name, mem.__class__)
+ if pair not in uniques:
+ uniques.add(pair)
+ self._members.append(converted)
+
+ def retrieve_data(self):
+ filename = os.path.join(self._xml_path, self.refid + '.xml')
+ try:
+ self._retrieved_data = compound.parse(filename)
+ except ExpatError:
+ print('Error in xml in file %s' % filename)
+ self._error = True
+ self._retrieved_data = None
+
+ def check_parsed(self):
+ if not self._parsed:
+ self._parse()
+
+ def confirm_no_error(self):
+ self.check_parsed()
+ if self._error:
+ raise self.ParsingError()
+
+ def error(self):
+ self.check_parsed()
+ return self._error
+
+ def name(self):
+ # first see if we can do it without processing.
+ if self._parse_data is not None:
+ return self._parse_data.name
+ self.check_parsed()
+ return self._retrieved_data.compounddef.name
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/doxyindex.py b/tools/gr-usrptest/docs/doxygen/doxyxml/doxyindex.py
new file mode 100644
index 000000000..78e815376
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/doxyindex.py
@@ -0,0 +1,301 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Classes providing more user-friendly interfaces to the doxygen xml
+docs than the generated classes provide.
+"""
+
+import os
+
+from generated import index
+from base import Base
+from text import description
+
+class DoxyIndex(Base):
+ """
+ Parses a doxygen xml directory.
+ """
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyIndex, self)._parse()
+ self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
+ for mem in self._root.compound:
+ converted = self.convert_mem(mem)
+ # For files and namespaces we want the contents to be
+ # accessible directly from the parent rather than having
+ # to go through the file object.
+ if self.get_cls(mem) == DoxyFile:
+ if mem.name.endswith('.h'):
+ self._members += converted.members()
+ self._members.append(converted)
+ elif self.get_cls(mem) == DoxyNamespace:
+ self._members += converted.members()
+ self._members.append(converted)
+ else:
+ self._members.append(converted)
+
+
+def generate_swig_doc_i(self):
+ """
+ %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
+ Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
+ """
+ pass
+
+
+class DoxyCompMem(Base):
+
+
+ kind = None
+
+ def __init__(self, *args, **kwargs):
+ super(DoxyCompMem, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def can_parse(cls, obj):
+ return obj.kind == cls.kind
+
+ def set_descriptions(self, parse_data):
+ bd = description(getattr(parse_data, 'briefdescription', None))
+ dd = description(getattr(parse_data, 'detaileddescription', None))
+ self._data['brief_description'] = bd
+ self._data['detailed_description'] = dd
+
+ def set_parameters(self, data):
+ vs = [ddc.value for ddc in data.detaileddescription.content_]
+ pls = []
+ for v in vs:
+ if hasattr(v, 'parameterlist'):
+ pls += v.parameterlist
+ pis = []
+ for pl in pls:
+ pis += pl.parameteritem
+ dpis = []
+ for pi in pis:
+ dpi = DoxyParameterItem(pi)
+ dpi._parse()
+ dpis.append(dpi)
+ self._data['params'] = dpis
+
+
+class DoxyCompound(DoxyCompMem):
+ pass
+
+class DoxyMember(DoxyCompMem):
+ pass
+
+class DoxyFunction(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'function'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyFunction, self)._parse()
+ self.set_descriptions(self._parse_data)
+ self.set_parameters(self._parse_data)
+ if not self._data['params']:
+ # If the params weren't set by a comment then just grab the names.
+ self._data['params'] = []
+ prms = self._parse_data.param
+ for prm in prms:
+ self._data['params'].append(DoxyParam(prm))
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+ params = property(lambda self: self.data()['params'])
+
+Base.mem_classes.append(DoxyFunction)
+
+
+class DoxyParam(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyParam, self)._parse()
+ self.set_descriptions(self._parse_data)
+ self._data['declname'] = self._parse_data.declname
+
+ @property
+ def description(self):
+ descriptions = []
+ if self.brief_description:
+ descriptions.append(self.brief_description)
+ if self.detailed_description:
+ descriptions.append(self.detailed_description)
+ return '\n\n'.join(descriptions)
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+ name = property(lambda self: self.data()['declname'])
+
+class DoxyParameterItem(DoxyMember):
+ """A different representation of a parameter in Doxygen."""
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyParameterItem, self)._parse()
+ names = []
+ for nl in self._parse_data.parameternamelist:
+ for pn in nl.parametername:
+ names.append(description(pn))
+ # Just take first name
+ self._data['name'] = names[0]
+ # Get description
+ pd = description(self._parse_data.get_parameterdescription())
+ self._data['description'] = pd
+
+ description = property(lambda self: self.data()['description'])
+ name = property(lambda self: self.data()['name'])
+
+
+class DoxyClass(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'class'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyClass, self)._parse()
+ self.retrieve_data()
+ if self._error:
+ return
+ self.set_descriptions(self._retrieved_data.compounddef)
+ self.set_parameters(self._retrieved_data.compounddef)
+ # Sectiondef.kind tells about whether private or public.
+ # We just ignore this for now.
+ self.process_memberdefs()
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+ params = property(lambda self: self.data()['params'])
+
+Base.mem_classes.append(DoxyClass)
+
+
+class DoxyFile(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'file'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyFile, self)._parse()
+ self.retrieve_data()
+ self.set_descriptions(self._retrieved_data.compounddef)
+ if self._error:
+ return
+ self.process_memberdefs()
+
+ brief_description = property(lambda self: self.data()['brief_description'])
+ detailed_description = property(lambda self: self.data()['detailed_description'])
+
+Base.mem_classes.append(DoxyFile)
+
+
+class DoxyNamespace(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'namespace'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyNamespace, self)._parse()
+ self.retrieve_data()
+ self.set_descriptions(self._retrieved_data.compounddef)
+ if self._error:
+ return
+ self.process_memberdefs()
+
+Base.mem_classes.append(DoxyNamespace)
+
+
+class DoxyGroup(DoxyCompound):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'group'
+
+ def _parse(self):
+ if self._parsed:
+ return
+ super(DoxyGroup, self)._parse()
+ self.retrieve_data()
+ if self._error:
+ return
+ cdef = self._retrieved_data.compounddef
+ self._data['title'] = description(cdef.title)
+ # Process inner groups
+ grps = cdef.innergroup
+ for grp in grps:
+ converted = DoxyGroup.from_refid(grp.refid, top=self.top)
+ self._members.append(converted)
+ # Process inner classes
+ klasses = cdef.innerclass
+ for kls in klasses:
+ converted = DoxyClass.from_refid(kls.refid, top=self.top)
+ self._members.append(converted)
+ # Process normal members
+ self.process_memberdefs()
+
+ title = property(lambda self: self.data()['title'])
+
+
+Base.mem_classes.append(DoxyGroup)
+
+
+class DoxyFriend(DoxyMember):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kind = 'friend'
+
+Base.mem_classes.append(DoxyFriend)
+
+
+class DoxyOther(Base):
+
+ __module__ = "gnuradio.utils.doxyxml"
+
+ kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum',
+ 'dir', 'page', 'signal', 'slot', 'property'])
+
+ @classmethod
+ def can_parse(cls, obj):
+ return obj.kind in cls.kinds
+
+Base.mem_classes.append(DoxyOther)
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/generated/__init__.py b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/__init__.py
new file mode 100644
index 000000000..39823979f
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/__init__.py
@@ -0,0 +1,7 @@
+"""
+Contains generated files produced by generateDS.py.
+
+These do the real work of parsing the doxygen xml files but the
+resultant classes are not very friendly to navigate so the rest of the
+doxyxml module processes them further.
+"""
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compound.py b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compound.py
new file mode 100644
index 000000000..1522ac23f
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compound.py
@@ -0,0 +1,503 @@
+#!/usr/bin/env python
+
+"""
+Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
+"""
+
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+import sys
+
+import compoundsuper as supermod
+from compoundsuper import MixedContainer
+
+
+class DoxygenTypeSub(supermod.DoxygenType):
+ def __init__(self, version=None, compounddef=None):
+ supermod.DoxygenType.__init__(self, version, compounddef)
+
+ def find(self, details):
+
+ return self.compounddef.find(details)
+
+supermod.DoxygenType.subclass = DoxygenTypeSub
+# end class DoxygenTypeSub
+
+
+class compounddefTypeSub(supermod.compounddefType):
+ def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
+ supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
+
+ def find(self, details):
+
+ if self.id == details.refid:
+ return self
+
+ for sectiondef in self.sectiondef:
+ result = sectiondef.find(details)
+ if result:
+ return result
+
+
+supermod.compounddefType.subclass = compounddefTypeSub
+# end class compounddefTypeSub
+
+
+class listofallmembersTypeSub(supermod.listofallmembersType):
+ def __init__(self, member=None):
+ supermod.listofallmembersType.__init__(self, member)
+supermod.listofallmembersType.subclass = listofallmembersTypeSub
+# end class listofallmembersTypeSub
+
+
+class memberRefTypeSub(supermod.memberRefType):
+ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
+ supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
+supermod.memberRefType.subclass = memberRefTypeSub
+# end class memberRefTypeSub
+
+
+class compoundRefTypeSub(supermod.compoundRefType):
+ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.compoundRefType.__init__(self, mixedclass_, content_)
+supermod.compoundRefType.subclass = compoundRefTypeSub
+# end class compoundRefTypeSub
+
+
+class reimplementTypeSub(supermod.reimplementType):
+ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.reimplementType.__init__(self, mixedclass_, content_)
+supermod.reimplementType.subclass = reimplementTypeSub
+# end class reimplementTypeSub
+
+
+class incTypeSub(supermod.incType):
+ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.incType.__init__(self, mixedclass_, content_)
+supermod.incType.subclass = incTypeSub
+# end class incTypeSub
+
+
+class refTypeSub(supermod.refType):
+ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.refType.__init__(self, mixedclass_, content_)
+supermod.refType.subclass = refTypeSub
+# end class refTypeSub
+
+
+
+class refTextTypeSub(supermod.refTextType):
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.refTextType.__init__(self, mixedclass_, content_)
+
+supermod.refTextType.subclass = refTextTypeSub
+# end class refTextTypeSub
+
+class sectiondefTypeSub(supermod.sectiondefType):
+
+
+ def __init__(self, kind=None, header='', description=None, memberdef=None):
+ supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
+
+ def find(self, details):
+
+ for memberdef in self.memberdef:
+ if memberdef.id == details.refid:
+ return memberdef
+
+ return None
+
+
+supermod.sectiondefType.subclass = sectiondefTypeSub
+# end class sectiondefTypeSub
+
+
+class memberdefTypeSub(supermod.memberdefType):
+ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
+ supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
+supermod.memberdefType.subclass = memberdefTypeSub
+# end class memberdefTypeSub
+
+
+class descriptionTypeSub(supermod.descriptionType):
+ def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
+ supermod.descriptionType.__init__(self, mixedclass_, content_)
+supermod.descriptionType.subclass = descriptionTypeSub
+# end class descriptionTypeSub
+
+
+class enumvalueTypeSub(supermod.enumvalueType):
+ def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
+ supermod.enumvalueType.__init__(self, mixedclass_, content_)
+supermod.enumvalueType.subclass = enumvalueTypeSub
+# end class enumvalueTypeSub
+
+
+class templateparamlistTypeSub(supermod.templateparamlistType):
+ def __init__(self, param=None):
+ supermod.templateparamlistType.__init__(self, param)
+supermod.templateparamlistType.subclass = templateparamlistTypeSub
+# end class templateparamlistTypeSub
+
+
+class paramTypeSub(supermod.paramType):
+ def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
+ supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
+supermod.paramType.subclass = paramTypeSub
+# end class paramTypeSub
+
+
+class linkedTextTypeSub(supermod.linkedTextType):
+ def __init__(self, ref=None, mixedclass_=None, content_=None):
+ supermod.linkedTextType.__init__(self, mixedclass_, content_)
+supermod.linkedTextType.subclass = linkedTextTypeSub
+# end class linkedTextTypeSub
+
+
+class graphTypeSub(supermod.graphType):
+ def __init__(self, node=None):
+ supermod.graphType.__init__(self, node)
+supermod.graphType.subclass = graphTypeSub
+# end class graphTypeSub
+
+
+class nodeTypeSub(supermod.nodeType):
+ def __init__(self, id=None, label='', link=None, childnode=None):
+ supermod.nodeType.__init__(self, id, label, link, childnode)
+supermod.nodeType.subclass = nodeTypeSub
+# end class nodeTypeSub
+
+
+class childnodeTypeSub(supermod.childnodeType):
+ def __init__(self, relation=None, refid=None, edgelabel=None):
+ supermod.childnodeType.__init__(self, relation, refid, edgelabel)
+supermod.childnodeType.subclass = childnodeTypeSub
+# end class childnodeTypeSub
+
+
+class linkTypeSub(supermod.linkType):
+ def __init__(self, refid=None, external=None, valueOf_=''):
+ supermod.linkType.__init__(self, refid, external)
+supermod.linkType.subclass = linkTypeSub
+# end class linkTypeSub
+
+
+class listingTypeSub(supermod.listingType):
+ def __init__(self, codeline=None):
+ supermod.listingType.__init__(self, codeline)
+supermod.listingType.subclass = listingTypeSub
+# end class listingTypeSub
+
+
+class codelineTypeSub(supermod.codelineType):
+ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
+ supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
+supermod.codelineType.subclass = codelineTypeSub
+# end class codelineTypeSub
+
+
+class highlightTypeSub(supermod.highlightType):
+ def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
+ supermod.highlightType.__init__(self, mixedclass_, content_)
+supermod.highlightType.subclass = highlightTypeSub
+# end class highlightTypeSub
+
+
+class referenceTypeSub(supermod.referenceType):
+ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.referenceType.__init__(self, mixedclass_, content_)
+supermod.referenceType.subclass = referenceTypeSub
+# end class referenceTypeSub
+
+
+class locationTypeSub(supermod.locationType):
+ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
+ supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
+supermod.locationType.subclass = locationTypeSub
+# end class locationTypeSub
+
+
+class docSect1TypeSub(supermod.docSect1Type):
+ def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect1Type.__init__(self, mixedclass_, content_)
+supermod.docSect1Type.subclass = docSect1TypeSub
+# end class docSect1TypeSub
+
+
+class docSect2TypeSub(supermod.docSect2Type):
+ def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect2Type.__init__(self, mixedclass_, content_)
+supermod.docSect2Type.subclass = docSect2TypeSub
+# end class docSect2TypeSub
+
+
+class docSect3TypeSub(supermod.docSect3Type):
+ def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect3Type.__init__(self, mixedclass_, content_)
+supermod.docSect3Type.subclass = docSect3TypeSub
+# end class docSect3TypeSub
+
+
+class docSect4TypeSub(supermod.docSect4Type):
+ def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
+ supermod.docSect4Type.__init__(self, mixedclass_, content_)
+supermod.docSect4Type.subclass = docSect4TypeSub
+# end class docSect4TypeSub
+
+
+class docInternalTypeSub(supermod.docInternalType):
+ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
+ supermod.docInternalType.__init__(self, mixedclass_, content_)
+supermod.docInternalType.subclass = docInternalTypeSub
+# end class docInternalTypeSub
+
+
+class docInternalS1TypeSub(supermod.docInternalS1Type):
+ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
+ supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS1Type.subclass = docInternalS1TypeSub
+# end class docInternalS1TypeSub
+
+
+class docInternalS2TypeSub(supermod.docInternalS2Type):
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS2Type.subclass = docInternalS2TypeSub
+# end class docInternalS2TypeSub
+
+
+class docInternalS3TypeSub(supermod.docInternalS3Type):
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS3Type.subclass = docInternalS3TypeSub
+# end class docInternalS3TypeSub
+
+
+class docInternalS4TypeSub(supermod.docInternalS4Type):
+ def __init__(self, para=None, mixedclass_=None, content_=None):
+ supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
+supermod.docInternalS4Type.subclass = docInternalS4TypeSub
+# end class docInternalS4TypeSub
+
+
+class docURLLinkSub(supermod.docURLLink):
+ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docURLLink.__init__(self, mixedclass_, content_)
+supermod.docURLLink.subclass = docURLLinkSub
+# end class docURLLinkSub
+
+
+class docAnchorTypeSub(supermod.docAnchorType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docAnchorType.__init__(self, mixedclass_, content_)
+supermod.docAnchorType.subclass = docAnchorTypeSub
+# end class docAnchorTypeSub
+
+
+class docFormulaTypeSub(supermod.docFormulaType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docFormulaType.__init__(self, mixedclass_, content_)
+supermod.docFormulaType.subclass = docFormulaTypeSub
+# end class docFormulaTypeSub
+
+
+class docIndexEntryTypeSub(supermod.docIndexEntryType):
+ def __init__(self, primaryie='', secondaryie=''):
+ supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
+supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
+# end class docIndexEntryTypeSub
+
+
+class docListTypeSub(supermod.docListType):
+ def __init__(self, listitem=None):
+ supermod.docListType.__init__(self, listitem)
+supermod.docListType.subclass = docListTypeSub
+# end class docListTypeSub
+
+
+class docListItemTypeSub(supermod.docListItemType):
+ def __init__(self, para=None):
+ supermod.docListItemType.__init__(self, para)
+supermod.docListItemType.subclass = docListItemTypeSub
+# end class docListItemTypeSub
+
+
+class docSimpleSectTypeSub(supermod.docSimpleSectType):
+ def __init__(self, kind=None, title=None, para=None):
+ supermod.docSimpleSectType.__init__(self, kind, title, para)
+supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
+# end class docSimpleSectTypeSub
+
+
+class docVarListEntryTypeSub(supermod.docVarListEntryType):
+ def __init__(self, term=None):
+ supermod.docVarListEntryType.__init__(self, term)
+supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
+# end class docVarListEntryTypeSub
+
+
+class docRefTextTypeSub(supermod.docRefTextType):
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docRefTextType.__init__(self, mixedclass_, content_)
+supermod.docRefTextType.subclass = docRefTextTypeSub
+# end class docRefTextTypeSub
+
+
+class docTableTypeSub(supermod.docTableType):
+ def __init__(self, rows=None, cols=None, row=None, caption=None):
+ supermod.docTableType.__init__(self, rows, cols, row, caption)
+supermod.docTableType.subclass = docTableTypeSub
+# end class docTableTypeSub
+
+
+class docRowTypeSub(supermod.docRowType):
+ def __init__(self, entry=None):
+ supermod.docRowType.__init__(self, entry)
+supermod.docRowType.subclass = docRowTypeSub
+# end class docRowTypeSub
+
+
+class docEntryTypeSub(supermod.docEntryType):
+ def __init__(self, thead=None, para=None):
+ supermod.docEntryType.__init__(self, thead, para)
+supermod.docEntryType.subclass = docEntryTypeSub
+# end class docEntryTypeSub
+
+
+class docHeadingTypeSub(supermod.docHeadingType):
+ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docHeadingType.__init__(self, mixedclass_, content_)
+supermod.docHeadingType.subclass = docHeadingTypeSub
+# end class docHeadingTypeSub
+
+
+class docImageTypeSub(supermod.docImageType):
+ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docImageType.__init__(self, mixedclass_, content_)
+supermod.docImageType.subclass = docImageTypeSub
+# end class docImageTypeSub
+
+
+class docDotFileTypeSub(supermod.docDotFileType):
+ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docDotFileType.__init__(self, mixedclass_, content_)
+supermod.docDotFileType.subclass = docDotFileTypeSub
+# end class docDotFileTypeSub
+
+
+class docTocItemTypeSub(supermod.docTocItemType):
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ supermod.docTocItemType.__init__(self, mixedclass_, content_)
+supermod.docTocItemType.subclass = docTocItemTypeSub
+# end class docTocItemTypeSub
+
+
+class docTocListTypeSub(supermod.docTocListType):
+ def __init__(self, tocitem=None):
+ supermod.docTocListType.__init__(self, tocitem)
+supermod.docTocListType.subclass = docTocListTypeSub
+# end class docTocListTypeSub
+
+
+class docLanguageTypeSub(supermod.docLanguageType):
+ def __init__(self, langid=None, para=None):
+ supermod.docLanguageType.__init__(self, langid, para)
+supermod.docLanguageType.subclass = docLanguageTypeSub
+# end class docLanguageTypeSub
+
+
+class docParamListTypeSub(supermod.docParamListType):
+ def __init__(self, kind=None, parameteritem=None):
+ supermod.docParamListType.__init__(self, kind, parameteritem)
+supermod.docParamListType.subclass = docParamListTypeSub
+# end class docParamListTypeSub
+
+
+class docParamListItemSub(supermod.docParamListItem):
+ def __init__(self, parameternamelist=None, parameterdescription=None):
+ supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
+supermod.docParamListItem.subclass = docParamListItemSub
+# end class docParamListItemSub
+
+
+class docParamNameListSub(supermod.docParamNameList):
+ def __init__(self, parametername=None):
+ supermod.docParamNameList.__init__(self, parametername)
+supermod.docParamNameList.subclass = docParamNameListSub
+# end class docParamNameListSub
+
+
+class docParamNameSub(supermod.docParamName):
+ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
+ supermod.docParamName.__init__(self, mixedclass_, content_)
+supermod.docParamName.subclass = docParamNameSub
+# end class docParamNameSub
+
+
+class docXRefSectTypeSub(supermod.docXRefSectType):
+ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
+ supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
+supermod.docXRefSectType.subclass = docXRefSectTypeSub
+# end class docXRefSectTypeSub
+
+
+class docCopyTypeSub(supermod.docCopyType):
+ def __init__(self, link=None, para=None, sect1=None, internal=None):
+ supermod.docCopyType.__init__(self, link, para, sect1, internal)
+supermod.docCopyType.subclass = docCopyTypeSub
+# end class docCopyTypeSub
+
+
+class docCharTypeSub(supermod.docCharType):
+ def __init__(self, char=None, valueOf_=''):
+ supermod.docCharType.__init__(self, char)
+supermod.docCharType.subclass = docCharTypeSub
+# end class docCharTypeSub
+
+class docParaTypeSub(supermod.docParaType):
+ def __init__(self, char=None, valueOf_=''):
+ supermod.docParaType.__init__(self, char)
+
+ self.parameterlist = []
+ self.simplesects = []
+ self.content = []
+
+ def buildChildren(self, child_, nodeName_):
+ supermod.docParaType.buildChildren(self, child_, nodeName_)
+
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == "ref":
+ obj_ = supermod.docRefTextType.factory()
+ obj_.build(child_)
+ self.content.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameterlist':
+ obj_ = supermod.docParamListType.factory()
+ obj_.build(child_)
+ self.parameterlist.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'simplesect':
+ obj_ = supermod.docSimpleSectType.factory()
+ obj_.build(child_)
+ self.simplesects.append(obj_)
+
+
+supermod.docParaType.subclass = docParaTypeSub
+# end class docParaTypeSub
+
+
+
+def parse(inFilename):
+ doc = minidom.parse(inFilename)
+ rootNode = doc.documentElement
+ rootObj = supermod.DoxygenType.factory()
+ rootObj.build(rootNode)
+ return rootObj
+
+
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compoundsuper.py b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compoundsuper.py
new file mode 100644
index 000000000..6255dda16
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/compoundsuper.py
@@ -0,0 +1,8342 @@
+#!/usr/bin/env python
+
+#
+# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
+#
+
+import sys
+import getopt
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+#
+# User methods
+#
+# Calls to the methods in these classes are generated by generateDS.py.
+# You can replace these methods by re-implementing the following class
+# in a module named generatedssuper.py.
+
+try:
+ from generatedssuper import GeneratedsSuper
+except ImportError, exp:
+
+ class GeneratedsSuper:
+ def format_string(self, input_data, input_name=''):
+ return input_data
+ def format_integer(self, input_data, input_name=''):
+ return '%d' % input_data
+ def format_float(self, input_data, input_name=''):
+ return '%f' % input_data
+ def format_double(self, input_data, input_name=''):
+ return '%e' % input_data
+ def format_boolean(self, input_data, input_name=''):
+ return '%s' % input_data
+
+
+#
+# If you have installed IPython you can uncomment and use the following.
+# IPython is available from http://ipython.scipy.org/.
+#
+
+## from IPython.Shell import IPShellEmbed
+## args = ''
+## ipshell = IPShellEmbed(args,
+## banner = 'Dropping into IPython',
+## exit_msg = 'Leaving Interpreter, back to program.')
+
+# Then use the following line where and when you want to drop into the
+# IPython shell:
+# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
+
+#
+# Globals
+#
+
+ExternalEncoding = 'ascii'
+
+#
+# Support/utility functions.
+#
+
+def showIndent(outfile, level):
+ for idx in range(level):
+ outfile.write(' ')
+
+def quote_xml(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ return s1
+
+def quote_attrib(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ if '"' in s1:
+ if "'" in s1:
+ s1 = '"%s"' % s1.replace('"', "&quot;")
+ else:
+ s1 = "'%s'" % s1
+ else:
+ s1 = '"%s"' % s1
+ return s1
+
+def quote_python(inStr):
+ s1 = inStr
+ if s1.find("'") == -1:
+ if s1.find('\n') == -1:
+ return "'%s'" % s1
+ else:
+ return "'''%s'''" % s1
+ else:
+ if s1.find('"') != -1:
+ s1 = s1.replace('"', '\\"')
+ if s1.find('\n') == -1:
+ return '"%s"' % s1
+ else:
+ return '"""%s"""' % s1
+
+
+class MixedContainer:
+ # Constants for category:
+ CategoryNone = 0
+ CategoryText = 1
+ CategorySimple = 2
+ CategoryComplex = 3
+ # Constants for content_type:
+ TypeNone = 0
+ TypeText = 1
+ TypeString = 2
+ TypeInteger = 3
+ TypeFloat = 4
+ TypeDecimal = 5
+ TypeDouble = 6
+ TypeBoolean = 7
+ def __init__(self, category, content_type, name, value):
+ self.category = category
+ self.content_type = content_type
+ self.name = name
+ self.value = value
+ def getCategory(self):
+ return self.category
+ def getContenttype(self, content_type):
+ return self.content_type
+ def getValue(self):
+ return self.value
+ def getName(self):
+ return self.name
+ def export(self, outfile, level, name, namespace):
+ if self.category == MixedContainer.CategoryText:
+ outfile.write(self.value)
+ elif self.category == MixedContainer.CategorySimple:
+ self.exportSimple(outfile, level, name)
+ else: # category == MixedContainer.CategoryComplex
+ self.value.export(outfile, level, namespace,name)
+ def exportSimple(self, outfile, level, name):
+ if self.content_type == MixedContainer.TypeString:
+ outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeInteger or \
+ self.content_type == MixedContainer.TypeBoolean:
+ outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeFloat or \
+ self.content_type == MixedContainer.TypeDecimal:
+ outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeDouble:
+ outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
+ def exportLiteral(self, outfile, level, name):
+ if self.category == MixedContainer.CategoryText:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ elif self.category == MixedContainer.CategorySimple:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ else: # category == MixedContainer.CategoryComplex
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s",\n' % \
+ (self.category, self.content_type, self.name,))
+ self.value.exportLiteral(outfile, level + 1)
+ showIndent(outfile, level)
+ outfile.write(')\n')
+
+
+class _MemberSpec(object):
+ def __init__(self, name='', data_type='', container=0):
+ self.name = name
+ self.data_type = data_type
+ self.container = container
+ def set_name(self, name): self.name = name
+ def get_name(self): return self.name
+ def set_data_type(self, data_type): self.data_type = data_type
+ def get_data_type(self): return self.data_type
+ def set_container(self, container): self.container = container
+ def get_container(self): return self.container
+
+
+#
+# Data representation classes.
+#
+
+class DoxygenType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, version=None, compounddef=None):
+ self.version = version
+ self.compounddef = compounddef
+ def factory(*args_, **kwargs_):
+ if DoxygenType.subclass:
+ return DoxygenType.subclass(*args_, **kwargs_)
+ else:
+ return DoxygenType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compounddef(self): return self.compounddef
+ def set_compounddef(self, compounddef): self.compounddef = compounddef
+ def get_version(self): return self.version
+ def set_version(self, version): self.version = version
+ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
+ outfile.write(' version=%s' % (quote_attrib(self.version), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
+ if self.compounddef:
+ self.compounddef.export(outfile, level, namespace_, name_='compounddef')
+ def hasContent_(self):
+ if (
+ self.compounddef is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='DoxygenType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.version is not None:
+ showIndent(outfile, level)
+ outfile.write('version = "%s",\n' % (self.version,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.compounddef:
+ showIndent(outfile, level)
+ outfile.write('compounddef=model_.compounddefType(\n')
+ self.compounddef.exportLiteral(outfile, level, name_='compounddef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('version'):
+ self.version = attrs.get('version').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compounddef':
+ obj_ = compounddefType.factory()
+ obj_.build(child_)
+ self.set_compounddef(obj_)
+# end class DoxygenType
+
+
+class compounddefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
+ self.kind = kind
+ self.prot = prot
+ self.id = id
+ self.compoundname = compoundname
+ self.title = title
+ if basecompoundref is None:
+ self.basecompoundref = []
+ else:
+ self.basecompoundref = basecompoundref
+ if derivedcompoundref is None:
+ self.derivedcompoundref = []
+ else:
+ self.derivedcompoundref = derivedcompoundref
+ if includes is None:
+ self.includes = []
+ else:
+ self.includes = includes
+ if includedby is None:
+ self.includedby = []
+ else:
+ self.includedby = includedby
+ self.incdepgraph = incdepgraph
+ self.invincdepgraph = invincdepgraph
+ if innerdir is None:
+ self.innerdir = []
+ else:
+ self.innerdir = innerdir
+ if innerfile is None:
+ self.innerfile = []
+ else:
+ self.innerfile = innerfile
+ if innerclass is None:
+ self.innerclass = []
+ else:
+ self.innerclass = innerclass
+ if innernamespace is None:
+ self.innernamespace = []
+ else:
+ self.innernamespace = innernamespace
+ if innerpage is None:
+ self.innerpage = []
+ else:
+ self.innerpage = innerpage
+ if innergroup is None:
+ self.innergroup = []
+ else:
+ self.innergroup = innergroup
+ self.templateparamlist = templateparamlist
+ if sectiondef is None:
+ self.sectiondef = []
+ else:
+ self.sectiondef = sectiondef
+ self.briefdescription = briefdescription
+ self.detaileddescription = detaileddescription
+ self.inheritancegraph = inheritancegraph
+ self.collaborationgraph = collaborationgraph
+ self.programlisting = programlisting
+ self.location = location
+ self.listofallmembers = listofallmembers
+ def factory(*args_, **kwargs_):
+ if compounddefType.subclass:
+ return compounddefType.subclass(*args_, **kwargs_)
+ else:
+ return compounddefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compoundname(self): return self.compoundname
+ def set_compoundname(self, compoundname): self.compoundname = compoundname
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_basecompoundref(self): return self.basecompoundref
+ def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
+ def add_basecompoundref(self, value): self.basecompoundref.append(value)
+ def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
+ def get_derivedcompoundref(self): return self.derivedcompoundref
+ def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
+ def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
+ def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
+ def get_includes(self): return self.includes
+ def set_includes(self, includes): self.includes = includes
+ def add_includes(self, value): self.includes.append(value)
+ def insert_includes(self, index, value): self.includes[index] = value
+ def get_includedby(self): return self.includedby
+ def set_includedby(self, includedby): self.includedby = includedby
+ def add_includedby(self, value): self.includedby.append(value)
+ def insert_includedby(self, index, value): self.includedby[index] = value
+ def get_incdepgraph(self): return self.incdepgraph
+ def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
+ def get_invincdepgraph(self): return self.invincdepgraph
+ def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
+ def get_innerdir(self): return self.innerdir
+ def set_innerdir(self, innerdir): self.innerdir = innerdir
+ def add_innerdir(self, value): self.innerdir.append(value)
+ def insert_innerdir(self, index, value): self.innerdir[index] = value
+ def get_innerfile(self): return self.innerfile
+ def set_innerfile(self, innerfile): self.innerfile = innerfile
+ def add_innerfile(self, value): self.innerfile.append(value)
+ def insert_innerfile(self, index, value): self.innerfile[index] = value
+ def get_innerclass(self): return self.innerclass
+ def set_innerclass(self, innerclass): self.innerclass = innerclass
+ def add_innerclass(self, value): self.innerclass.append(value)
+ def insert_innerclass(self, index, value): self.innerclass[index] = value
+ def get_innernamespace(self): return self.innernamespace
+ def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
+ def add_innernamespace(self, value): self.innernamespace.append(value)
+ def insert_innernamespace(self, index, value): self.innernamespace[index] = value
+ def get_innerpage(self): return self.innerpage
+ def set_innerpage(self, innerpage): self.innerpage = innerpage
+ def add_innerpage(self, value): self.innerpage.append(value)
+ def insert_innerpage(self, index, value): self.innerpage[index] = value
+ def get_innergroup(self): return self.innergroup
+ def set_innergroup(self, innergroup): self.innergroup = innergroup
+ def add_innergroup(self, value): self.innergroup.append(value)
+ def insert_innergroup(self, index, value): self.innergroup[index] = value
+ def get_templateparamlist(self): return self.templateparamlist
+ def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def get_sectiondef(self): return self.sectiondef
+ def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
+ def add_sectiondef(self, value): self.sectiondef.append(value)
+ def insert_sectiondef(self, index, value): self.sectiondef[index] = value
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_inheritancegraph(self): return self.inheritancegraph
+ def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
+ def get_collaborationgraph(self): return self.collaborationgraph
+ def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
+ def get_programlisting(self): return self.programlisting
+ def set_programlisting(self, programlisting): self.programlisting = programlisting
+ def get_location(self): return self.location
+ def set_location(self, location): self.location = location
+ def get_listofallmembers(self): return self.listofallmembers
+ def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
+ if self.compoundname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
+ if self.title is not None:
+ showIndent(outfile, level)
+ outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
+ for basecompoundref_ in self.basecompoundref:
+ basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
+ for derivedcompoundref_ in self.derivedcompoundref:
+ derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
+ for includes_ in self.includes:
+ includes_.export(outfile, level, namespace_, name_='includes')
+ for includedby_ in self.includedby:
+ includedby_.export(outfile, level, namespace_, name_='includedby')
+ if self.incdepgraph:
+ self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
+ if self.invincdepgraph:
+ self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
+ for innerdir_ in self.innerdir:
+ innerdir_.export(outfile, level, namespace_, name_='innerdir')
+ for innerfile_ in self.innerfile:
+ innerfile_.export(outfile, level, namespace_, name_='innerfile')
+ for innerclass_ in self.innerclass:
+ innerclass_.export(outfile, level, namespace_, name_='innerclass')
+ for innernamespace_ in self.innernamespace:
+ innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
+ for innerpage_ in self.innerpage:
+ innerpage_.export(outfile, level, namespace_, name_='innerpage')
+ for innergroup_ in self.innergroup:
+ innergroup_.export(outfile, level, namespace_, name_='innergroup')
+ if self.templateparamlist:
+ self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ for sectiondef_ in self.sectiondef:
+ sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ if self.detaileddescription:
+ self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ if self.inheritancegraph:
+ self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
+ if self.collaborationgraph:
+ self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
+ if self.programlisting:
+ self.programlisting.export(outfile, level, namespace_, name_='programlisting')
+ if self.location:
+ self.location.export(outfile, level, namespace_, name_='location')
+ if self.listofallmembers:
+ self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
+ def hasContent_(self):
+ if (
+ self.compoundname is not None or
+ self.title is not None or
+ self.basecompoundref is not None or
+ self.derivedcompoundref is not None or
+ self.includes is not None or
+ self.includedby is not None or
+ self.incdepgraph is not None or
+ self.invincdepgraph is not None or
+ self.innerdir is not None or
+ self.innerfile is not None or
+ self.innerclass is not None or
+ self.innernamespace is not None or
+ self.innerpage is not None or
+ self.innergroup is not None or
+ self.templateparamlist is not None or
+ self.sectiondef is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None or
+ self.inheritancegraph is not None or
+ self.collaborationgraph is not None or
+ self.programlisting is not None or
+ self.location is not None or
+ self.listofallmembers is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='compounddefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
+ if self.title:
+ showIndent(outfile, level)
+ outfile.write('title=model_.xsd_string(\n')
+ self.title.exportLiteral(outfile, level, name_='title')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('basecompoundref=[\n')
+ level += 1
+ for basecompoundref in self.basecompoundref:
+ showIndent(outfile, level)
+ outfile.write('model_.basecompoundref(\n')
+ basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('derivedcompoundref=[\n')
+ level += 1
+ for derivedcompoundref in self.derivedcompoundref:
+ showIndent(outfile, level)
+ outfile.write('model_.derivedcompoundref(\n')
+ derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('includes=[\n')
+ level += 1
+ for includes in self.includes:
+ showIndent(outfile, level)
+ outfile.write('model_.includes(\n')
+ includes.exportLiteral(outfile, level, name_='includes')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('includedby=[\n')
+ level += 1
+ for includedby in self.includedby:
+ showIndent(outfile, level)
+ outfile.write('model_.includedby(\n')
+ includedby.exportLiteral(outfile, level, name_='includedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.incdepgraph:
+ showIndent(outfile, level)
+ outfile.write('incdepgraph=model_.graphType(\n')
+ self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.invincdepgraph:
+ showIndent(outfile, level)
+ outfile.write('invincdepgraph=model_.graphType(\n')
+ self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('innerdir=[\n')
+ level += 1
+ for innerdir in self.innerdir:
+ showIndent(outfile, level)
+ outfile.write('model_.innerdir(\n')
+ innerdir.exportLiteral(outfile, level, name_='innerdir')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerfile=[\n')
+ level += 1
+ for innerfile in self.innerfile:
+ showIndent(outfile, level)
+ outfile.write('model_.innerfile(\n')
+ innerfile.exportLiteral(outfile, level, name_='innerfile')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerclass=[\n')
+ level += 1
+ for innerclass in self.innerclass:
+ showIndent(outfile, level)
+ outfile.write('model_.innerclass(\n')
+ innerclass.exportLiteral(outfile, level, name_='innerclass')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innernamespace=[\n')
+ level += 1
+ for innernamespace in self.innernamespace:
+ showIndent(outfile, level)
+ outfile.write('model_.innernamespace(\n')
+ innernamespace.exportLiteral(outfile, level, name_='innernamespace')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innerpage=[\n')
+ level += 1
+ for innerpage in self.innerpage:
+ showIndent(outfile, level)
+ outfile.write('model_.innerpage(\n')
+ innerpage.exportLiteral(outfile, level, name_='innerpage')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('innergroup=[\n')
+ level += 1
+ for innergroup in self.innergroup:
+ showIndent(outfile, level)
+ outfile.write('model_.innergroup(\n')
+ innergroup.exportLiteral(outfile, level, name_='innergroup')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.templateparamlist:
+ showIndent(outfile, level)
+ outfile.write('templateparamlist=model_.templateparamlistType(\n')
+ self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('sectiondef=[\n')
+ level += 1
+ for sectiondef in self.sectiondef:
+ showIndent(outfile, level)
+ outfile.write('model_.sectiondef(\n')
+ sectiondef.exportLiteral(outfile, level, name_='sectiondef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.detaileddescription:
+ showIndent(outfile, level)
+ outfile.write('detaileddescription=model_.descriptionType(\n')
+ self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.inheritancegraph:
+ showIndent(outfile, level)
+ outfile.write('inheritancegraph=model_.graphType(\n')
+ self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.collaborationgraph:
+ showIndent(outfile, level)
+ outfile.write('collaborationgraph=model_.graphType(\n')
+ self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.programlisting:
+ showIndent(outfile, level)
+ outfile.write('programlisting=model_.listingType(\n')
+ self.programlisting.exportLiteral(outfile, level, name_='programlisting')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.location:
+ showIndent(outfile, level)
+ outfile.write('location=model_.locationType(\n')
+ self.location.exportLiteral(outfile, level, name_='location')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.listofallmembers:
+ showIndent(outfile, level)
+ outfile.write('listofallmembers=model_.listofallmembersType(\n')
+ self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compoundname':
+ compoundname_ = ''
+ for text__content_ in child_.childNodes:
+ compoundname_ += text__content_.nodeValue
+ self.compoundname = compoundname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_title(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'basecompoundref':
+ obj_ = compoundRefType.factory()
+ obj_.build(child_)
+ self.basecompoundref.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'derivedcompoundref':
+ obj_ = compoundRefType.factory()
+ obj_.build(child_)
+ self.derivedcompoundref.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'includes':
+ obj_ = incType.factory()
+ obj_.build(child_)
+ self.includes.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'includedby':
+ obj_ = incType.factory()
+ obj_.build(child_)
+ self.includedby.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'incdepgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_incdepgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'invincdepgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_invincdepgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerdir':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerdir.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerfile':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerfile.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerclass':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerclass.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innernamespace':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innernamespace.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innerpage':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innerpage.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'innergroup':
+ obj_ = refType.factory()
+ obj_.build(child_)
+ self.innergroup.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'templateparamlist':
+ obj_ = templateparamlistType.factory()
+ obj_.build(child_)
+ self.set_templateparamlist(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sectiondef':
+ obj_ = sectiondefType.factory()
+ obj_.build(child_)
+ self.sectiondef.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_detaileddescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'inheritancegraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_inheritancegraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'collaborationgraph':
+ obj_ = graphType.factory()
+ obj_.build(child_)
+ self.set_collaborationgraph(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'programlisting':
+ obj_ = listingType.factory()
+ obj_.build(child_)
+ self.set_programlisting(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'location':
+ obj_ = locationType.factory()
+ obj_.build(child_)
+ self.set_location(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'listofallmembers':
+ obj_ = listofallmembersType.factory()
+ obj_.build(child_)
+ self.set_listofallmembers(obj_)
+# end class compounddefType
+
+
+class listofallmembersType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, member=None):
+ if member is None:
+ self.member = []
+ else:
+ self.member = member
+ def factory(*args_, **kwargs_):
+ if listofallmembersType.subclass:
+ return listofallmembersType.subclass(*args_, **kwargs_)
+ else:
+ return listofallmembersType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_member(self): return self.member
+ def set_member(self, member): self.member = member
+ def add_member(self, value): self.member.append(value)
+ def insert_member(self, index, value): self.member[index] = value
+ def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
+ for member_ in self.member:
+ member_.export(outfile, level, namespace_, name_='member')
+ def hasContent_(self):
+ if (
+ self.member is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='listofallmembersType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('member=[\n')
+ level += 1
+ for member in self.member:
+ showIndent(outfile, level)
+ outfile.write('model_.member(\n')
+ member.exportLiteral(outfile, level, name_='member')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'member':
+ obj_ = memberRefType.factory()
+ obj_.build(child_)
+ self.member.append(obj_)
+# end class listofallmembersType
+
+
+class memberRefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
+ self.virt = virt
+ self.prot = prot
+ self.refid = refid
+ self.ambiguityscope = ambiguityscope
+ self.scope = scope
+ self.name = name
+ def factory(*args_, **kwargs_):
+ if memberRefType.subclass:
+ return memberRefType.subclass(*args_, **kwargs_)
+ else:
+ return memberRefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_scope(self): return self.scope
+ def set_scope(self, scope): self.scope = scope
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_ambiguityscope(self): return self.ambiguityscope
+ def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
+ def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.ambiguityscope is not None:
+ outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
+ if self.scope is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ def hasContent_(self):
+ if (
+ self.scope is not None or
+ self.name is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='memberRefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.ambiguityscope is not None:
+ showIndent(outfile, level)
+ outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('ambiguityscope'):
+ self.ambiguityscope = attrs.get('ambiguityscope').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'scope':
+ scope_ = ''
+ for text__content_ in child_.childNodes:
+ scope_ += text__content_.nodeValue
+ self.scope = scope_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+# end class memberRefType
+
+
+class scope(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if scope.subclass:
+ return scope.subclass(*args_, **kwargs_)
+ else:
+ return scope(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='scope')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='scope'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='scope'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class scope
+
+
+class name(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if name.subclass:
+ return name.subclass(*args_, **kwargs_)
+ else:
+ return name(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='name')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='name'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='name'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='name'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class name
+
+
+class compoundRefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.virt = virt
+ self.prot = prot
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if compoundRefType.subclass:
+ return compoundRefType.subclass(*args_, **kwargs_)
+ else:
+ return compoundRefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='compoundRefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class compoundRefType
+
+
+class reimplementType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if reimplementType.subclass:
+ return reimplementType.subclass(*args_, **kwargs_)
+ else:
+ return reimplementType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='reimplementType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class reimplementType
+
+
+class incType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.local = local
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if incType.subclass:
+ return incType.subclass(*args_, **kwargs_)
+ else:
+ return incType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_local(self): return self.local
+ def set_local(self, local): self.local = local
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='incType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
+ if self.local is not None:
+ outfile.write(' local=%s' % (quote_attrib(self.local), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='incType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='incType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.local is not None:
+ showIndent(outfile, level)
+ outfile.write('local = "%s",\n' % (self.local,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('local'):
+ self.local = attrs.get('local').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class incType
+
+
+class refType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
+ self.prot = prot
+ self.refid = refid
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if refType.subclass:
+ return refType.subclass(*args_, **kwargs_)
+ else:
+ return refType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='refType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='refType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='refType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class refType
+
+
+class refTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ self.kindref = kindref
+ self.external = external
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if refTextType.subclass:
+ return refTextType.subclass(*args_, **kwargs_)
+ else:
+ return refTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_kindref(self): return self.kindref
+ def set_kindref(self, kindref): self.kindref = kindref
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='refTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.kindref is not None:
+ outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='refTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.kindref is not None:
+ showIndent(outfile, level)
+ outfile.write('kindref = "%s",\n' % (self.kindref,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('kindref'):
+ self.kindref = attrs.get('kindref').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class refTextType
+
+
+class sectiondefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, header=None, description=None, memberdef=None):
+ self.kind = kind
+ self.header = header
+ self.description = description
+ if memberdef is None:
+ self.memberdef = []
+ else:
+ self.memberdef = memberdef
+ def factory(*args_, **kwargs_):
+ if sectiondefType.subclass:
+ return sectiondefType.subclass(*args_, **kwargs_)
+ else:
+ return sectiondefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_header(self): return self.header
+ def set_header(self, header): self.header = header
+ def get_description(self): return self.description
+ def set_description(self, description): self.description = description
+ def get_memberdef(self): return self.memberdef
+ def set_memberdef(self, memberdef): self.memberdef = memberdef
+ def add_memberdef(self, value): self.memberdef.append(value)
+ def insert_memberdef(self, index, value): self.memberdef[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
+ if self.header is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
+ if self.description:
+ self.description.export(outfile, level, namespace_, name_='description')
+ for memberdef_ in self.memberdef:
+ memberdef_.export(outfile, level, namespace_, name_='memberdef')
+ def hasContent_(self):
+ if (
+ self.header is not None or
+ self.description is not None or
+ self.memberdef is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='sectiondefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
+ if self.description:
+ showIndent(outfile, level)
+ outfile.write('description=model_.descriptionType(\n')
+ self.description.exportLiteral(outfile, level, name_='description')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('memberdef=[\n')
+ level += 1
+ for memberdef in self.memberdef:
+ showIndent(outfile, level)
+ outfile.write('model_.memberdef(\n')
+ memberdef.exportLiteral(outfile, level, name_='memberdef')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'header':
+ header_ = ''
+ for text__content_ in child_.childNodes:
+ header_ += text__content_.nodeValue
+ self.header = header_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'description':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_description(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'memberdef':
+ obj_ = memberdefType.factory()
+ obj_.build(child_)
+ self.memberdef.append(obj_)
+# end class sectiondefType
+
+
+class memberdefType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
+ self.initonly = initonly
+ self.kind = kind
+ self.volatile = volatile
+ self.const = const
+ self.raisexx = raisexx
+ self.virt = virt
+ self.readable = readable
+ self.prot = prot
+ self.explicit = explicit
+ self.new = new
+ self.final = final
+ self.writable = writable
+ self.add = add
+ self.static = static
+ self.remove = remove
+ self.sealed = sealed
+ self.mutable = mutable
+ self.gettable = gettable
+ self.inline = inline
+ self.settable = settable
+ self.id = id
+ self.templateparamlist = templateparamlist
+ self.type_ = type_
+ self.definition = definition
+ self.argsstring = argsstring
+ self.name = name
+ self.read = read
+ self.write = write
+ self.bitfield = bitfield
+ if reimplements is None:
+ self.reimplements = []
+ else:
+ self.reimplements = reimplements
+ if reimplementedby is None:
+ self.reimplementedby = []
+ else:
+ self.reimplementedby = reimplementedby
+ if param is None:
+ self.param = []
+ else:
+ self.param = param
+ if enumvalue is None:
+ self.enumvalue = []
+ else:
+ self.enumvalue = enumvalue
+ self.initializer = initializer
+ self.exceptions = exceptions
+ self.briefdescription = briefdescription
+ self.detaileddescription = detaileddescription
+ self.inbodydescription = inbodydescription
+ self.location = location
+ if references is None:
+ self.references = []
+ else:
+ self.references = references
+ if referencedby is None:
+ self.referencedby = []
+ else:
+ self.referencedby = referencedby
+ def factory(*args_, **kwargs_):
+ if memberdefType.subclass:
+ return memberdefType.subclass(*args_, **kwargs_)
+ else:
+ return memberdefType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_templateparamlist(self): return self.templateparamlist
+ def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_definition(self): return self.definition
+ def set_definition(self, definition): self.definition = definition
+ def get_argsstring(self): return self.argsstring
+ def set_argsstring(self, argsstring): self.argsstring = argsstring
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_read(self): return self.read
+ def set_read(self, read): self.read = read
+ def get_write(self): return self.write
+ def set_write(self, write): self.write = write
+ def get_bitfield(self): return self.bitfield
+ def set_bitfield(self, bitfield): self.bitfield = bitfield
+ def get_reimplements(self): return self.reimplements
+ def set_reimplements(self, reimplements): self.reimplements = reimplements
+ def add_reimplements(self, value): self.reimplements.append(value)
+ def insert_reimplements(self, index, value): self.reimplements[index] = value
+ def get_reimplementedby(self): return self.reimplementedby
+ def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
+ def add_reimplementedby(self, value): self.reimplementedby.append(value)
+ def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
+ def get_param(self): return self.param
+ def set_param(self, param): self.param = param
+ def add_param(self, value): self.param.append(value)
+ def insert_param(self, index, value): self.param[index] = value
+ def get_enumvalue(self): return self.enumvalue
+ def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue
+ def add_enumvalue(self, value): self.enumvalue.append(value)
+ def insert_enumvalue(self, index, value): self.enumvalue[index] = value
+ def get_initializer(self): return self.initializer
+ def set_initializer(self, initializer): self.initializer = initializer
+ def get_exceptions(self): return self.exceptions
+ def set_exceptions(self, exceptions): self.exceptions = exceptions
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_inbodydescription(self): return self.inbodydescription
+ def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
+ def get_location(self): return self.location
+ def set_location(self, location): self.location = location
+ def get_references(self): return self.references
+ def set_references(self, references): self.references = references
+ def add_references(self, value): self.references.append(value)
+ def insert_references(self, index, value): self.references[index] = value
+ def get_referencedby(self): return self.referencedby
+ def set_referencedby(self, referencedby): self.referencedby = referencedby
+ def add_referencedby(self, value): self.referencedby.append(value)
+ def insert_referencedby(self, index, value): self.referencedby[index] = value
+ def get_initonly(self): return self.initonly
+ def set_initonly(self, initonly): self.initonly = initonly
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_volatile(self): return self.volatile
+ def set_volatile(self, volatile): self.volatile = volatile
+ def get_const(self): return self.const
+ def set_const(self, const): self.const = const
+ def get_raise(self): return self.raisexx
+ def set_raise(self, raisexx): self.raisexx = raisexx
+ def get_virt(self): return self.virt
+ def set_virt(self, virt): self.virt = virt
+ def get_readable(self): return self.readable
+ def set_readable(self, readable): self.readable = readable
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_explicit(self): return self.explicit
+ def set_explicit(self, explicit): self.explicit = explicit
+ def get_new(self): return self.new
+ def set_new(self, new): self.new = new
+ def get_final(self): return self.final
+ def set_final(self, final): self.final = final
+ def get_writable(self): return self.writable
+ def set_writable(self, writable): self.writable = writable
+ def get_add(self): return self.add
+ def set_add(self, add): self.add = add
+ def get_static(self): return self.static
+ def set_static(self, static): self.static = static
+ def get_remove(self): return self.remove
+ def set_remove(self, remove): self.remove = remove
+ def get_sealed(self): return self.sealed
+ def set_sealed(self, sealed): self.sealed = sealed
+ def get_mutable(self): return self.mutable
+ def set_mutable(self, mutable): self.mutable = mutable
+ def get_gettable(self): return self.gettable
+ def set_gettable(self, gettable): self.gettable = gettable
+ def get_inline(self): return self.inline
+ def set_inline(self, inline): self.inline = inline
+ def get_settable(self): return self.settable
+ def set_settable(self, settable): self.settable = settable
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
+ if self.initonly is not None:
+ outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ if self.volatile is not None:
+ outfile.write(' volatile=%s' % (quote_attrib(self.volatile), ))
+ if self.const is not None:
+ outfile.write(' const=%s' % (quote_attrib(self.const), ))
+ if self.raisexx is not None:
+ outfile.write(' raise=%s' % (quote_attrib(self.raisexx), ))
+ if self.virt is not None:
+ outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
+ if self.readable is not None:
+ outfile.write(' readable=%s' % (quote_attrib(self.readable), ))
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.explicit is not None:
+ outfile.write(' explicit=%s' % (quote_attrib(self.explicit), ))
+ if self.new is not None:
+ outfile.write(' new=%s' % (quote_attrib(self.new), ))
+ if self.final is not None:
+ outfile.write(' final=%s' % (quote_attrib(self.final), ))
+ if self.writable is not None:
+ outfile.write(' writable=%s' % (quote_attrib(self.writable), ))
+ if self.add is not None:
+ outfile.write(' add=%s' % (quote_attrib(self.add), ))
+ if self.static is not None:
+ outfile.write(' static=%s' % (quote_attrib(self.static), ))
+ if self.remove is not None:
+ outfile.write(' remove=%s' % (quote_attrib(self.remove), ))
+ if self.sealed is not None:
+ outfile.write(' sealed=%s' % (quote_attrib(self.sealed), ))
+ if self.mutable is not None:
+ outfile.write(' mutable=%s' % (quote_attrib(self.mutable), ))
+ if self.gettable is not None:
+ outfile.write(' gettable=%s' % (quote_attrib(self.gettable), ))
+ if self.inline is not None:
+ outfile.write(' inline=%s' % (quote_attrib(self.inline), ))
+ if self.settable is not None:
+ outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
+ if self.templateparamlist:
+ self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
+ if self.type_:
+ self.type_.export(outfile, level, namespace_, name_='type')
+ if self.definition is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
+ if self.argsstring is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ if self.read is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
+ if self.write is not None:
+ showIndent(outfile, level)
+ outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
+ if self.bitfield is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
+ for reimplements_ in self.reimplements:
+ reimplements_.export(outfile, level, namespace_, name_='reimplements')
+ for reimplementedby_ in self.reimplementedby:
+ reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
+ for param_ in self.param:
+ param_.export(outfile, level, namespace_, name_='param')
+ for enumvalue_ in self.enumvalue:
+ enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
+ if self.initializer:
+ self.initializer.export(outfile, level, namespace_, name_='initializer')
+ if self.exceptions:
+ self.exceptions.export(outfile, level, namespace_, name_='exceptions')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ if self.detaileddescription:
+ self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
+ if self.inbodydescription:
+ self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
+ if self.location:
+ self.location.export(outfile, level, namespace_, name_='location', )
+ for references_ in self.references:
+ references_.export(outfile, level, namespace_, name_='references')
+ for referencedby_ in self.referencedby:
+ referencedby_.export(outfile, level, namespace_, name_='referencedby')
+ def hasContent_(self):
+ if (
+ self.templateparamlist is not None or
+ self.type_ is not None or
+ self.definition is not None or
+ self.argsstring is not None or
+ self.name is not None or
+ self.read is not None or
+ self.write is not None or
+ self.bitfield is not None or
+ self.reimplements is not None or
+ self.reimplementedby is not None or
+ self.param is not None or
+ self.enumvalue is not None or
+ self.initializer is not None or
+ self.exceptions is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None or
+ self.inbodydescription is not None or
+ self.location is not None or
+ self.references is not None or
+ self.referencedby is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='memberdefType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.initonly is not None:
+ showIndent(outfile, level)
+ outfile.write('initonly = "%s",\n' % (self.initonly,))
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.volatile is not None:
+ showIndent(outfile, level)
+ outfile.write('volatile = "%s",\n' % (self.volatile,))
+ if self.const is not None:
+ showIndent(outfile, level)
+ outfile.write('const = "%s",\n' % (self.const,))
+ if self.raisexx is not None:
+ showIndent(outfile, level)
+ outfile.write('raisexx = "%s",\n' % (self.raisexx,))
+ if self.virt is not None:
+ showIndent(outfile, level)
+ outfile.write('virt = "%s",\n' % (self.virt,))
+ if self.readable is not None:
+ showIndent(outfile, level)
+ outfile.write('readable = "%s",\n' % (self.readable,))
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.explicit is not None:
+ showIndent(outfile, level)
+ outfile.write('explicit = "%s",\n' % (self.explicit,))
+ if self.new is not None:
+ showIndent(outfile, level)
+ outfile.write('new = "%s",\n' % (self.new,))
+ if self.final is not None:
+ showIndent(outfile, level)
+ outfile.write('final = "%s",\n' % (self.final,))
+ if self.writable is not None:
+ showIndent(outfile, level)
+ outfile.write('writable = "%s",\n' % (self.writable,))
+ if self.add is not None:
+ showIndent(outfile, level)
+ outfile.write('add = "%s",\n' % (self.add,))
+ if self.static is not None:
+ showIndent(outfile, level)
+ outfile.write('static = "%s",\n' % (self.static,))
+ if self.remove is not None:
+ showIndent(outfile, level)
+ outfile.write('remove = "%s",\n' % (self.remove,))
+ if self.sealed is not None:
+ showIndent(outfile, level)
+ outfile.write('sealed = "%s",\n' % (self.sealed,))
+ if self.mutable is not None:
+ showIndent(outfile, level)
+ outfile.write('mutable = "%s",\n' % (self.mutable,))
+ if self.gettable is not None:
+ showIndent(outfile, level)
+ outfile.write('gettable = "%s",\n' % (self.gettable,))
+ if self.inline is not None:
+ showIndent(outfile, level)
+ outfile.write('inline = "%s",\n' % (self.inline,))
+ if self.settable is not None:
+ showIndent(outfile, level)
+ outfile.write('settable = "%s",\n' % (self.settable,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.templateparamlist:
+ showIndent(outfile, level)
+ outfile.write('templateparamlist=model_.templateparamlistType(\n')
+ self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.type_:
+ showIndent(outfile, level)
+ outfile.write('type_=model_.linkedTextType(\n')
+ self.type_.exportLiteral(outfile, level, name_='type')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('reimplements=[\n')
+ level += 1
+ for reimplements in self.reimplements:
+ showIndent(outfile, level)
+ outfile.write('model_.reimplements(\n')
+ reimplements.exportLiteral(outfile, level, name_='reimplements')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('reimplementedby=[\n')
+ level += 1
+ for reimplementedby in self.reimplementedby:
+ showIndent(outfile, level)
+ outfile.write('model_.reimplementedby(\n')
+ reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('param=[\n')
+ level += 1
+ for param in self.param:
+ showIndent(outfile, level)
+ outfile.write('model_.param(\n')
+ param.exportLiteral(outfile, level, name_='param')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('enumvalue=[\n')
+ level += 1
+ for enumvalue in self.enumvalue:
+ showIndent(outfile, level)
+ outfile.write('model_.enumvalue(\n')
+ enumvalue.exportLiteral(outfile, level, name_='enumvalue')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.initializer:
+ showIndent(outfile, level)
+ outfile.write('initializer=model_.linkedTextType(\n')
+ self.initializer.exportLiteral(outfile, level, name_='initializer')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.exceptions:
+ showIndent(outfile, level)
+ outfile.write('exceptions=model_.linkedTextType(\n')
+ self.exceptions.exportLiteral(outfile, level, name_='exceptions')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.detaileddescription:
+ showIndent(outfile, level)
+ outfile.write('detaileddescription=model_.descriptionType(\n')
+ self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.inbodydescription:
+ showIndent(outfile, level)
+ outfile.write('inbodydescription=model_.descriptionType(\n')
+ self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.location:
+ showIndent(outfile, level)
+ outfile.write('location=model_.locationType(\n')
+ self.location.exportLiteral(outfile, level, name_='location')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('references=[\n')
+ level += 1
+ for references in self.references:
+ showIndent(outfile, level)
+ outfile.write('model_.references(\n')
+ references.exportLiteral(outfile, level, name_='references')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('referencedby=[\n')
+ level += 1
+ for referencedby in self.referencedby:
+ showIndent(outfile, level)
+ outfile.write('model_.referencedby(\n')
+ referencedby.exportLiteral(outfile, level, name_='referencedby')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('initonly'):
+ self.initonly = attrs.get('initonly').value
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('volatile'):
+ self.volatile = attrs.get('volatile').value
+ if attrs.get('const'):
+ self.const = attrs.get('const').value
+ if attrs.get('raise'):
+ self.raisexx = attrs.get('raise').value
+ if attrs.get('virt'):
+ self.virt = attrs.get('virt').value
+ if attrs.get('readable'):
+ self.readable = attrs.get('readable').value
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('explicit'):
+ self.explicit = attrs.get('explicit').value
+ if attrs.get('new'):
+ self.new = attrs.get('new').value
+ if attrs.get('final'):
+ self.final = attrs.get('final').value
+ if attrs.get('writable'):
+ self.writable = attrs.get('writable').value
+ if attrs.get('add'):
+ self.add = attrs.get('add').value
+ if attrs.get('static'):
+ self.static = attrs.get('static').value
+ if attrs.get('remove'):
+ self.remove = attrs.get('remove').value
+ if attrs.get('sealed'):
+ self.sealed = attrs.get('sealed').value
+ if attrs.get('mutable'):
+ self.mutable = attrs.get('mutable').value
+ if attrs.get('gettable'):
+ self.gettable = attrs.get('gettable').value
+ if attrs.get('inline'):
+ self.inline = attrs.get('inline').value
+ if attrs.get('settable'):
+ self.settable = attrs.get('settable').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'templateparamlist':
+ obj_ = templateparamlistType.factory()
+ obj_.build(child_)
+ self.set_templateparamlist(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'type':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_type(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'definition':
+ definition_ = ''
+ for text__content_ in child_.childNodes:
+ definition_ += text__content_.nodeValue
+ self.definition = definition_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'argsstring':
+ argsstring_ = ''
+ for text__content_ in child_.childNodes:
+ argsstring_ += text__content_.nodeValue
+ self.argsstring = argsstring_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'read':
+ read_ = ''
+ for text__content_ in child_.childNodes:
+ read_ += text__content_.nodeValue
+ self.read = read_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'write':
+ write_ = ''
+ for text__content_ in child_.childNodes:
+ write_ += text__content_.nodeValue
+ self.write = write_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'bitfield':
+ bitfield_ = ''
+ for text__content_ in child_.childNodes:
+ bitfield_ += text__content_.nodeValue
+ self.bitfield = bitfield_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'reimplements':
+ obj_ = reimplementType.factory()
+ obj_.build(child_)
+ self.reimplements.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'reimplementedby':
+ obj_ = reimplementType.factory()
+ obj_.build(child_)
+ self.reimplementedby.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'param':
+ obj_ = paramType.factory()
+ obj_.build(child_)
+ self.param.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'enumvalue':
+ obj_ = enumvalueType.factory()
+ obj_.build(child_)
+ self.enumvalue.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'initializer':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_initializer(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'exceptions':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_exceptions(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_detaileddescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'inbodydescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_inbodydescription(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'location':
+ obj_ = locationType.factory()
+ obj_.build(child_)
+ self.set_location(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'references':
+ obj_ = referenceType.factory()
+ obj_.build(child_)
+ self.references.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'referencedby':
+ obj_ = referenceType.factory()
+ obj_.build(child_)
+ self.referencedby.append(obj_)
+# end class memberdefType
+
+
+class definition(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if definition.subclass:
+ return definition.subclass(*args_, **kwargs_)
+ else:
+ return definition(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='definition')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='definition'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='definition'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class definition
+
+
+class argsstring(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if argsstring.subclass:
+ return argsstring.subclass(*args_, **kwargs_)
+ else:
+ return argsstring(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='argsstring')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='argsstring'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class argsstring
+
+
+class read(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if read.subclass:
+ return read.subclass(*args_, **kwargs_)
+ else:
+ return read(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='read')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='read'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='read'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='read'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class read
+
+
+class write(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if write.subclass:
+ return write.subclass(*args_, **kwargs_)
+ else:
+ return write(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='write')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='write'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='write'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='write'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class write
+
+
+class bitfield(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if bitfield.subclass:
+ return bitfield.subclass(*args_, **kwargs_)
+ else:
+ return bitfield(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='bitfield')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='bitfield'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class bitfield
+
+
+class descriptionType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if descriptionType.subclass:
+ return descriptionType.subclass(*args_, **kwargs_)
+ else:
+ return descriptionType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect1 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='descriptionType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ childobj_ = docSect1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect1', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class descriptionType
+
+
+class enumvalueType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
+ self.prot = prot
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if enumvalueType.subclass:
+ return enumvalueType.subclass(*args_, **kwargs_)
+ else:
+ return enumvalueType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_initializer(self): return self.initializer
+ def set_initializer(self, initializer): self.initializer = initializer
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def get_detaileddescription(self): return self.detaileddescription
+ def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
+ def get_prot(self): return self.prot
+ def set_prot(self, prot): self.prot = prot
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
+ if self.prot is not None:
+ outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.name is not None or
+ self.initializer is not None or
+ self.briefdescription is not None or
+ self.detaileddescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='enumvalueType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.prot is not None:
+ showIndent(outfile, level)
+ outfile.write('prot = "%s",\n' % (self.prot,))
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('prot'):
+ self.prot = attrs.get('prot').value
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ value_ = []
+ for text_ in child_.childNodes:
+ value_.append(text_.nodeValue)
+ valuestr_ = ''.join(value_)
+ obj_ = self.mixedclass_(MixedContainer.CategorySimple,
+ MixedContainer.TypeString, 'name', valuestr_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'initializer':
+ childobj_ = linkedTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'initializer', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ childobj_ = descriptionType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'briefdescription', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'detaileddescription':
+ childobj_ = descriptionType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'detaileddescription', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class enumvalueType
+
+
+class templateparamlistType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, param=None):
+ if param is None:
+ self.param = []
+ else:
+ self.param = param
+ def factory(*args_, **kwargs_):
+ if templateparamlistType.subclass:
+ return templateparamlistType.subclass(*args_, **kwargs_)
+ else:
+ return templateparamlistType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_param(self): return self.param
+ def set_param(self, param): self.param = param
+ def add_param(self, value): self.param.append(value)
+ def insert_param(self, index, value): self.param[index] = value
+ def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
+ for param_ in self.param:
+ param_.export(outfile, level, namespace_, name_='param')
+ def hasContent_(self):
+ if (
+ self.param is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='templateparamlistType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('param=[\n')
+ level += 1
+ for param in self.param:
+ showIndent(outfile, level)
+ outfile.write('model_.param(\n')
+ param.exportLiteral(outfile, level, name_='param')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'param':
+ obj_ = paramType.factory()
+ obj_.build(child_)
+ self.param.append(obj_)
+# end class templateparamlistType
+
+
+class paramType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
+ self.type_ = type_
+ self.declname = declname
+ self.defname = defname
+ self.array = array
+ self.defval = defval
+ self.briefdescription = briefdescription
+ def factory(*args_, **kwargs_):
+ if paramType.subclass:
+ return paramType.subclass(*args_, **kwargs_)
+ else:
+ return paramType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_declname(self): return self.declname
+ def set_declname(self, declname): self.declname = declname
+ def get_defname(self): return self.defname
+ def set_defname(self, defname): self.defname = defname
+ def get_array(self): return self.array
+ def set_array(self, array): self.array = array
+ def get_defval(self): return self.defval
+ def set_defval(self, defval): self.defval = defval
+ def get_briefdescription(self): return self.briefdescription
+ def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
+ def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='paramType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
+ if self.type_:
+ self.type_.export(outfile, level, namespace_, name_='type')
+ if self.declname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
+ if self.defname is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
+ if self.array is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
+ if self.defval:
+ self.defval.export(outfile, level, namespace_, name_='defval')
+ if self.briefdescription:
+ self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
+ def hasContent_(self):
+ if (
+ self.type_ is not None or
+ self.declname is not None or
+ self.defname is not None or
+ self.array is not None or
+ self.defval is not None or
+ self.briefdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='paramType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.type_:
+ showIndent(outfile, level)
+ outfile.write('type_=model_.linkedTextType(\n')
+ self.type_.exportLiteral(outfile, level, name_='type')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
+ if self.defval:
+ showIndent(outfile, level)
+ outfile.write('defval=model_.linkedTextType(\n')
+ self.defval.exportLiteral(outfile, level, name_='defval')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ if self.briefdescription:
+ showIndent(outfile, level)
+ outfile.write('briefdescription=model_.descriptionType(\n')
+ self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'type':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_type(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'declname':
+ declname_ = ''
+ for text__content_ in child_.childNodes:
+ declname_ += text__content_.nodeValue
+ self.declname = declname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'defname':
+ defname_ = ''
+ for text__content_ in child_.childNodes:
+ defname_ += text__content_.nodeValue
+ self.defname = defname_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'array':
+ array_ = ''
+ for text__content_ in child_.childNodes:
+ array_ += text__content_.nodeValue
+ self.array = array_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'defval':
+ obj_ = linkedTextType.factory()
+ obj_.build(child_)
+ self.set_defval(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'briefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_briefdescription(obj_)
+# end class paramType
+
+
+class declname(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if declname.subclass:
+ return declname.subclass(*args_, **kwargs_)
+ else:
+ return declname(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='declname')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='declname'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='declname'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class declname
+
+
+class defname(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if defname.subclass:
+ return defname.subclass(*args_, **kwargs_)
+ else:
+ return defname(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='defname')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='defname'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='defname'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class defname
+
+
+class array(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if array.subclass:
+ return array.subclass(*args_, **kwargs_)
+ else:
+ return array(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='array')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='array'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='array'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='array'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class array
+
+
+class linkedTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, ref=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if linkedTextType.subclass:
+ return linkedTextType.subclass(*args_, **kwargs_)
+ else:
+ return linkedTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def add_ref(self, value): self.ref.append(value)
+ def insert_ref(self, index, value): self.ref[index] = value
+ def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='linkedTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class linkedTextType
+
+
+class graphType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, node=None):
+ if node is None:
+ self.node = []
+ else:
+ self.node = node
+ def factory(*args_, **kwargs_):
+ if graphType.subclass:
+ return graphType.subclass(*args_, **kwargs_)
+ else:
+ return graphType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_node(self): return self.node
+ def set_node(self, node): self.node = node
+ def add_node(self, value): self.node.append(value)
+ def insert_node(self, index, value): self.node[index] = value
+ def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='graphType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
+ for node_ in self.node:
+ node_.export(outfile, level, namespace_, name_='node')
+ def hasContent_(self):
+ if (
+ self.node is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='graphType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('node=[\n')
+ level += 1
+ for node in self.node:
+ showIndent(outfile, level)
+ outfile.write('model_.node(\n')
+ node.exportLiteral(outfile, level, name_='node')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'node':
+ obj_ = nodeType.factory()
+ obj_.build(child_)
+ self.node.append(obj_)
+# end class graphType
+
+
+class nodeType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, label=None, link=None, childnode=None):
+ self.id = id
+ self.label = label
+ self.link = link
+ if childnode is None:
+ self.childnode = []
+ else:
+ self.childnode = childnode
+ def factory(*args_, **kwargs_):
+ if nodeType.subclass:
+ return nodeType.subclass(*args_, **kwargs_)
+ else:
+ return nodeType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_label(self): return self.label
+ def set_label(self, label): self.label = label
+ def get_link(self): return self.link
+ def set_link(self, link): self.link = link
+ def get_childnode(self): return self.childnode
+ def set_childnode(self, childnode): self.childnode = childnode
+ def add_childnode(self, value): self.childnode.append(value)
+ def insert_childnode(self, index, value): self.childnode[index] = value
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='nodeType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
+ if self.label is not None:
+ showIndent(outfile, level)
+ outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
+ if self.link:
+ self.link.export(outfile, level, namespace_, name_='link')
+ for childnode_ in self.childnode:
+ childnode_.export(outfile, level, namespace_, name_='childnode')
+ def hasContent_(self):
+ if (
+ self.label is not None or
+ self.link is not None or
+ self.childnode is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='nodeType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
+ if self.link:
+ showIndent(outfile, level)
+ outfile.write('link=model_.linkType(\n')
+ self.link.exportLiteral(outfile, level, name_='link')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('childnode=[\n')
+ level += 1
+ for childnode in self.childnode:
+ showIndent(outfile, level)
+ outfile.write('model_.childnode(\n')
+ childnode.exportLiteral(outfile, level, name_='childnode')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'label':
+ label_ = ''
+ for text__content_ in child_.childNodes:
+ label_ += text__content_.nodeValue
+ self.label = label_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'link':
+ obj_ = linkType.factory()
+ obj_.build(child_)
+ self.set_link(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'childnode':
+ obj_ = childnodeType.factory()
+ obj_.build(child_)
+ self.childnode.append(obj_)
+# end class nodeType
+
+
+class label(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if label.subclass:
+ return label.subclass(*args_, **kwargs_)
+ else:
+ return label(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='label')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='label'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='label'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='label'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class label
+
+
+class childnodeType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, relation=None, refid=None, edgelabel=None):
+ self.relation = relation
+ self.refid = refid
+ if edgelabel is None:
+ self.edgelabel = []
+ else:
+ self.edgelabel = edgelabel
+ def factory(*args_, **kwargs_):
+ if childnodeType.subclass:
+ return childnodeType.subclass(*args_, **kwargs_)
+ else:
+ return childnodeType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_edgelabel(self): return self.edgelabel
+ def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel
+ def add_edgelabel(self, value): self.edgelabel.append(value)
+ def insert_edgelabel(self, index, value): self.edgelabel[index] = value
+ def get_relation(self): return self.relation
+ def set_relation(self, relation): self.relation = relation
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
+ if self.relation is not None:
+ outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
+ for edgelabel_ in self.edgelabel:
+ showIndent(outfile, level)
+ outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
+ def hasContent_(self):
+ if (
+ self.edgelabel is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='childnodeType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.relation is not None:
+ showIndent(outfile, level)
+ outfile.write('relation = "%s",\n' % (self.relation,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('edgelabel=[\n')
+ level += 1
+ for edgelabel in self.edgelabel:
+ showIndent(outfile, level)
+ outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('relation'):
+ self.relation = attrs.get('relation').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'edgelabel':
+ edgelabel_ = ''
+ for text__content_ in child_.childNodes:
+ edgelabel_ += text__content_.nodeValue
+ self.edgelabel.append(edgelabel_)
+# end class childnodeType
+
+
+class edgelabel(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if edgelabel.subclass:
+ return edgelabel.subclass(*args_, **kwargs_)
+ else:
+ return edgelabel(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='edgelabel')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='edgelabel'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class edgelabel
+
+
+class linkType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, external=None, valueOf_=''):
+ self.refid = refid
+ self.external = external
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if linkType.subclass:
+ return linkType.subclass(*args_, **kwargs_)
+ else:
+ return linkType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='linkType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='linkType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class linkType
+
+
+class listingType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, codeline=None):
+ if codeline is None:
+ self.codeline = []
+ else:
+ self.codeline = codeline
+ def factory(*args_, **kwargs_):
+ if listingType.subclass:
+ return listingType.subclass(*args_, **kwargs_)
+ else:
+ return listingType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_codeline(self): return self.codeline
+ def set_codeline(self, codeline): self.codeline = codeline
+ def add_codeline(self, value): self.codeline.append(value)
+ def insert_codeline(self, index, value): self.codeline[index] = value
+ def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='listingType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
+ for codeline_ in self.codeline:
+ codeline_.export(outfile, level, namespace_, name_='codeline')
+ def hasContent_(self):
+ if (
+ self.codeline is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='listingType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('codeline=[\n')
+ level += 1
+ for codeline in self.codeline:
+ showIndent(outfile, level)
+ outfile.write('model_.codeline(\n')
+ codeline.exportLiteral(outfile, level, name_='codeline')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'codeline':
+ obj_ = codelineType.factory()
+ obj_.build(child_)
+ self.codeline.append(obj_)
+# end class listingType
+
+
+class codelineType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
+ self.external = external
+ self.lineno = lineno
+ self.refkind = refkind
+ self.refid = refid
+ if highlight is None:
+ self.highlight = []
+ else:
+ self.highlight = highlight
+ def factory(*args_, **kwargs_):
+ if codelineType.subclass:
+ return codelineType.subclass(*args_, **kwargs_)
+ else:
+ return codelineType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_highlight(self): return self.highlight
+ def set_highlight(self, highlight): self.highlight = highlight
+ def add_highlight(self, value): self.highlight.append(value)
+ def insert_highlight(self, index, value): self.highlight[index] = value
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def get_lineno(self): return self.lineno
+ def set_lineno(self, lineno): self.lineno = lineno
+ def get_refkind(self): return self.refkind
+ def set_refkind(self, refkind): self.refkind = refkind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='codelineType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
+ if self.external is not None:
+ outfile.write(' external=%s' % (quote_attrib(self.external), ))
+ if self.lineno is not None:
+ outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
+ if self.refkind is not None:
+ outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
+ for highlight_ in self.highlight:
+ highlight_.export(outfile, level, namespace_, name_='highlight')
+ def hasContent_(self):
+ if (
+ self.highlight is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='codelineType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = "%s",\n' % (self.external,))
+ if self.lineno is not None:
+ showIndent(outfile, level)
+ outfile.write('lineno = %s,\n' % (self.lineno,))
+ if self.refkind is not None:
+ showIndent(outfile, level)
+ outfile.write('refkind = "%s",\n' % (self.refkind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('highlight=[\n')
+ level += 1
+ for highlight in self.highlight:
+ showIndent(outfile, level)
+ outfile.write('model_.highlight(\n')
+ highlight.exportLiteral(outfile, level, name_='highlight')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ if attrs.get('lineno'):
+ try:
+ self.lineno = int(attrs.get('lineno').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (lineno): %s' % exp)
+ if attrs.get('refkind'):
+ self.refkind = attrs.get('refkind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'highlight':
+ obj_ = highlightType.factory()
+ obj_.build(child_)
+ self.highlight.append(obj_)
+# end class codelineType
+
+
+class highlightType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
+ self.classxx = classxx
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if highlightType.subclass:
+ return highlightType.subclass(*args_, **kwargs_)
+ else:
+ return highlightType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_sp(self): return self.sp
+ def set_sp(self, sp): self.sp = sp
+ def add_sp(self, value): self.sp.append(value)
+ def insert_sp(self, index, value): self.sp[index] = value
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def add_ref(self, value): self.ref.append(value)
+ def insert_ref(self, index, value): self.ref[index] = value
+ def get_class(self): return self.classxx
+ def set_class(self, classxx): self.classxx = classxx
+ def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='highlightType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
+ if self.classxx is not None:
+ outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.sp is not None or
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='highlightType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.classxx is not None:
+ showIndent(outfile, level)
+ outfile.write('classxx = "%s",\n' % (self.classxx,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('class'):
+ self.classxx = attrs.get('class').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sp':
+ value_ = []
+ for text_ in child_.childNodes:
+ value_.append(text_.nodeValue)
+ valuestr_ = ''.join(value_)
+ obj_ = self.mixedclass_(MixedContainer.CategorySimple,
+ MixedContainer.TypeString, 'sp', valuestr_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class highlightType
+
+
+class sp(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if sp.subclass:
+ return sp.subclass(*args_, **kwargs_)
+ else:
+ return sp(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='sp')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='sp'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='sp'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class sp
+
+
+class referenceType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
+ self.endline = endline
+ self.startline = startline
+ self.refid = refid
+ self.compoundref = compoundref
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if referenceType.subclass:
+ return referenceType.subclass(*args_, **kwargs_)
+ else:
+ return referenceType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_endline(self): return self.endline
+ def set_endline(self, endline): self.endline = endline
+ def get_startline(self): return self.startline
+ def set_startline(self, startline): self.startline = startline
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_compoundref(self): return self.compoundref
+ def set_compoundref(self, compoundref): self.compoundref = compoundref
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='referenceType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
+ if self.endline is not None:
+ outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
+ if self.startline is not None:
+ outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.compoundref is not None:
+ outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='referenceType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.endline is not None:
+ showIndent(outfile, level)
+ outfile.write('endline = %s,\n' % (self.endline,))
+ if self.startline is not None:
+ showIndent(outfile, level)
+ outfile.write('startline = %s,\n' % (self.startline,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.compoundref is not None:
+ showIndent(outfile, level)
+ outfile.write('compoundref = %s,\n' % (self.compoundref,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('endline'):
+ try:
+ self.endline = int(attrs.get('endline').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (endline): %s' % exp)
+ if attrs.get('startline'):
+ try:
+ self.startline = int(attrs.get('startline').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (startline): %s' % exp)
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('compoundref'):
+ self.compoundref = attrs.get('compoundref').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class referenceType
+
+
+class locationType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
+ self.bodystart = bodystart
+ self.line = line
+ self.bodyend = bodyend
+ self.bodyfile = bodyfile
+ self.file = file
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if locationType.subclass:
+ return locationType.subclass(*args_, **kwargs_)
+ else:
+ return locationType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_bodystart(self): return self.bodystart
+ def set_bodystart(self, bodystart): self.bodystart = bodystart
+ def get_line(self): return self.line
+ def set_line(self, line): self.line = line
+ def get_bodyend(self): return self.bodyend
+ def set_bodyend(self, bodyend): self.bodyend = bodyend
+ def get_bodyfile(self): return self.bodyfile
+ def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile
+ def get_file(self): return self.file
+ def set_file(self, file): self.file = file
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='locationType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
+ if self.bodystart is not None:
+ outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
+ if self.line is not None:
+ outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
+ if self.bodyend is not None:
+ outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
+ if self.bodyfile is not None:
+ outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
+ if self.file is not None:
+ outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='locationType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.bodystart is not None:
+ showIndent(outfile, level)
+ outfile.write('bodystart = %s,\n' % (self.bodystart,))
+ if self.line is not None:
+ showIndent(outfile, level)
+ outfile.write('line = %s,\n' % (self.line,))
+ if self.bodyend is not None:
+ showIndent(outfile, level)
+ outfile.write('bodyend = %s,\n' % (self.bodyend,))
+ if self.bodyfile is not None:
+ showIndent(outfile, level)
+ outfile.write('bodyfile = %s,\n' % (self.bodyfile,))
+ if self.file is not None:
+ showIndent(outfile, level)
+ outfile.write('file = %s,\n' % (self.file,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('bodystart'):
+ try:
+ self.bodystart = int(attrs.get('bodystart').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (bodystart): %s' % exp)
+ if attrs.get('line'):
+ try:
+ self.line = int(attrs.get('line').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (line): %s' % exp)
+ if attrs.get('bodyend'):
+ try:
+ self.bodyend = int(attrs.get('bodyend').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (bodyend): %s' % exp)
+ if attrs.get('bodyfile'):
+ self.bodyfile = attrs.get('bodyfile').value
+ if attrs.get('file'):
+ self.file = attrs.get('file').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class locationType
+
+
+class docSect1Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect1Type.subclass:
+ return docSect1Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect1Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect2(self): return self.sect2
+ def set_sect2(self, sect2): self.sect2 = sect2
+ def add_sect2(self, value): self.sect2.append(value)
+ def insert_sect2(self, index, value): self.sect2[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect1Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect2 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect1Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect2':
+ childobj_ = docSect2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect2', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect1Type
+
+
+class docSect2Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect2Type.subclass:
+ return docSect2Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect2Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect2Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect3 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect2Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect2Type
+
+
+class docSect3Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect3Type.subclass:
+ return docSect3Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect3Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect4(self): return self.sect4
+ def set_sect4(self, sect4): self.sect4 = sect4
+ def add_sect4(self, value): self.sect4.append(value)
+ def insert_sect4(self, index, value): self.sect4[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect3Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.sect4 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect3Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect4':
+ childobj_ = docSect4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect4', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect3Type
+
+
+class docSect4Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docSect4Type.subclass:
+ return docSect4Type.subclass(*args_, **kwargs_)
+ else:
+ return docSect4Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSect4Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSect4Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ childobj_ = docTitleType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'title', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ childobj_ = docInternalS4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'internal', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docSect4Type
+
+
+class docInternalType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalType.subclass:
+ return docInternalType.subclass(*args_, **kwargs_)
+ else:
+ return docInternalType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect1 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ childobj_ = docSect1Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect1', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalType
+
+
+class docInternalS1Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS1Type.subclass:
+ return docInternalS1Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS1Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect2(self): return self.sect2
+ def set_sect2(self, sect2): self.sect2 = sect2
+ def add_sect2(self, value): self.sect2.append(value)
+ def insert_sect2(self, index, value): self.sect2[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect2 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect2':
+ childobj_ = docSect2Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect2', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS1Type
+
+
+class docInternalS2Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS2Type.subclass:
+ return docInternalS2Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS2Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect3 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect3Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS2Type
+
+
+class docInternalS3Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS3Type.subclass:
+ return docInternalS3Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS3Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect3(self): return self.sect3
+ def set_sect3(self, sect3): self.sect3 = sect3
+ def add_sect3(self, value): self.sect3.append(value)
+ def insert_sect3(self, index, value): self.sect3[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect3 is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect3':
+ childobj_ = docSect4Type.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'sect3', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS3Type
+
+
+class docInternalS4Type(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None, mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docInternalS4Type.subclass:
+ return docInternalS4Type.subclass(*args_, **kwargs_)
+ else:
+ return docInternalS4Type(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ childobj_ = docParaType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'para', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docInternalS4Type
+
+
+class docTitleType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docTitleType.subclass:
+ return docTitleType.subclass(*args_, **kwargs_)
+ else:
+ return docTitleType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTitleType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTitleType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docTitleType
+
+
+class docParaType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docParaType.subclass:
+ return docParaType.subclass(*args_, **kwargs_)
+ else:
+ return docParaType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParaType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParaType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docParaType
+
+
+class docMarkupType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docMarkupType.subclass:
+ return docMarkupType.subclass(*args_, **kwargs_)
+ else:
+ return docMarkupType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docMarkupType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docMarkupType
+
+
+class docURLLink(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
+ self.url = url
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docURLLink.subclass:
+ return docURLLink.subclass(*args_, **kwargs_)
+ else:
+ return docURLLink(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_url(self): return self.url
+ def set_url(self, url): self.url = url
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docURLLink')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
+ if self.url is not None:
+ outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docURLLink'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.url is not None:
+ showIndent(outfile, level)
+ outfile.write('url = %s,\n' % (self.url,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('url'):
+ self.url = attrs.get('url').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docURLLink
+
+
+class docAnchorType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docAnchorType.subclass:
+ return docAnchorType.subclass(*args_, **kwargs_)
+ else:
+ return docAnchorType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docAnchorType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docAnchorType
+
+
+class docFormulaType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docFormulaType.subclass:
+ return docFormulaType.subclass(*args_, **kwargs_)
+ else:
+ return docFormulaType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docFormulaType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docFormulaType
+
+
+class docIndexEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, primaryie=None, secondaryie=None):
+ self.primaryie = primaryie
+ self.secondaryie = secondaryie
+ def factory(*args_, **kwargs_):
+ if docIndexEntryType.subclass:
+ return docIndexEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docIndexEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_primaryie(self): return self.primaryie
+ def set_primaryie(self, primaryie): self.primaryie = primaryie
+ def get_secondaryie(self): return self.secondaryie
+ def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
+ def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
+ if self.primaryie is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
+ if self.secondaryie is not None:
+ showIndent(outfile, level)
+ outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
+ def hasContent_(self):
+ if (
+ self.primaryie is not None or
+ self.secondaryie is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'primaryie':
+ primaryie_ = ''
+ for text__content_ in child_.childNodes:
+ primaryie_ += text__content_.nodeValue
+ self.primaryie = primaryie_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'secondaryie':
+ secondaryie_ = ''
+ for text__content_ in child_.childNodes:
+ secondaryie_ += text__content_.nodeValue
+ self.secondaryie = secondaryie_
+# end class docIndexEntryType
+
+
+class docListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, listitem=None):
+ if listitem is None:
+ self.listitem = []
+ else:
+ self.listitem = listitem
+ def factory(*args_, **kwargs_):
+ if docListType.subclass:
+ return docListType.subclass(*args_, **kwargs_)
+ else:
+ return docListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_listitem(self): return self.listitem
+ def set_listitem(self, listitem): self.listitem = listitem
+ def add_listitem(self, value): self.listitem.append(value)
+ def insert_listitem(self, index, value): self.listitem[index] = value
+ def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
+ for listitem_ in self.listitem:
+ listitem_.export(outfile, level, namespace_, name_='listitem')
+ def hasContent_(self):
+ if (
+ self.listitem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('listitem=[\n')
+ level += 1
+ for listitem in self.listitem:
+ showIndent(outfile, level)
+ outfile.write('model_.listitem(\n')
+ listitem.exportLiteral(outfile, level, name_='listitem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'listitem':
+ obj_ = docListItemType.factory()
+ obj_.build(child_)
+ self.listitem.append(obj_)
+# end class docListType
+
+
+class docListItemType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, para=None):
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docListItemType.subclass:
+ return docListItemType.subclass(*args_, **kwargs_)
+ else:
+ return docListItemType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docListItemType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docListItemType
+
+
+class docSimpleSectType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, title=None, para=None):
+ self.kind = kind
+ self.title = title
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docSimpleSectType.subclass:
+ return docSimpleSectType.subclass(*args_, **kwargs_)
+ else:
+ return docSimpleSectType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_title(self): return self.title
+ def set_title(self, title): self.title = title
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
+ if self.title:
+ self.title.export(outfile, level, namespace_, name_='title')
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.title is not None or
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.title:
+ showIndent(outfile, level)
+ outfile.write('title=model_.docTitleType(\n')
+ self.title.exportLiteral(outfile, level, name_='title')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'title':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_title(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docSimpleSectType
+
+
+class docVarListEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, term=None):
+ self.term = term
+ def factory(*args_, **kwargs_):
+ if docVarListEntryType.subclass:
+ return docVarListEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docVarListEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_term(self): return self.term
+ def set_term(self, term): self.term = term
+ def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
+ if self.term:
+ self.term.export(outfile, level, namespace_, name_='term', )
+ def hasContent_(self):
+ if (
+ self.term is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ if self.term:
+ showIndent(outfile, level)
+ outfile.write('term=model_.docTitleType(\n')
+ self.term.exportLiteral(outfile, level, name_='term')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'term':
+ obj_ = docTitleType.factory()
+ obj_.build(child_)
+ self.set_term(obj_)
+# end class docVarListEntryType
+
+
+class docVariableListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docVariableListType.subclass:
+ return docVariableListType.subclass(*args_, **kwargs_)
+ else:
+ return docVariableListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docVariableListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docVariableListType
+
+
+class docRefTextType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
+ self.refid = refid
+ self.kindref = kindref
+ self.external = external
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docRefTextType.subclass:
+ return docRefTextType.subclass(*args_, **kwargs_)
+ else:
+ return docRefTextType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def get_kindref(self): return self.kindref
+ def set_kindref(self, kindref): self.kindref = kindref
+ def get_external(self): return self.external
+ def set_external(self, external): self.external = external
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
+ if self.refid is not None:
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ if self.kindref is not None:
+ outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
+ if self.external is not None:
+ outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docRefTextType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ if self.kindref is not None:
+ showIndent(outfile, level)
+ outfile.write('kindref = "%s",\n' % (self.kindref,))
+ if self.external is not None:
+ showIndent(outfile, level)
+ outfile.write('external = %s,\n' % (self.external,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ if attrs.get('kindref'):
+ self.kindref = attrs.get('kindref').value
+ if attrs.get('external'):
+ self.external = attrs.get('external').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docRefTextType
+
+
+class docTableType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, rows=None, cols=None, row=None, caption=None):
+ self.rows = rows
+ self.cols = cols
+ if row is None:
+ self.row = []
+ else:
+ self.row = row
+ self.caption = caption
+ def factory(*args_, **kwargs_):
+ if docTableType.subclass:
+ return docTableType.subclass(*args_, **kwargs_)
+ else:
+ return docTableType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_row(self): return self.row
+ def set_row(self, row): self.row = row
+ def add_row(self, value): self.row.append(value)
+ def insert_row(self, index, value): self.row[index] = value
+ def get_caption(self): return self.caption
+ def set_caption(self, caption): self.caption = caption
+ def get_rows(self): return self.rows
+ def set_rows(self, rows): self.rows = rows
+ def get_cols(self): return self.cols
+ def set_cols(self, cols): self.cols = cols
+ def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTableType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
+ if self.rows is not None:
+ outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
+ if self.cols is not None:
+ outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
+ def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
+ for row_ in self.row:
+ row_.export(outfile, level, namespace_, name_='row')
+ if self.caption:
+ self.caption.export(outfile, level, namespace_, name_='caption')
+ def hasContent_(self):
+ if (
+ self.row is not None or
+ self.caption is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTableType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.rows is not None:
+ showIndent(outfile, level)
+ outfile.write('rows = %s,\n' % (self.rows,))
+ if self.cols is not None:
+ showIndent(outfile, level)
+ outfile.write('cols = %s,\n' % (self.cols,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('row=[\n')
+ level += 1
+ for row in self.row:
+ showIndent(outfile, level)
+ outfile.write('model_.row(\n')
+ row.exportLiteral(outfile, level, name_='row')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.caption:
+ showIndent(outfile, level)
+ outfile.write('caption=model_.docCaptionType(\n')
+ self.caption.exportLiteral(outfile, level, name_='caption')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('rows'):
+ try:
+ self.rows = int(attrs.get('rows').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (rows): %s' % exp)
+ if attrs.get('cols'):
+ try:
+ self.cols = int(attrs.get('cols').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (cols): %s' % exp)
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'row':
+ obj_ = docRowType.factory()
+ obj_.build(child_)
+ self.row.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'caption':
+ obj_ = docCaptionType.factory()
+ obj_.build(child_)
+ self.set_caption(obj_)
+# end class docTableType
+
+
+class docRowType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, entry=None):
+ if entry is None:
+ self.entry = []
+ else:
+ self.entry = entry
+ def factory(*args_, **kwargs_):
+ if docRowType.subclass:
+ return docRowType.subclass(*args_, **kwargs_)
+ else:
+ return docRowType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_entry(self): return self.entry
+ def set_entry(self, entry): self.entry = entry
+ def add_entry(self, value): self.entry.append(value)
+ def insert_entry(self, index, value): self.entry[index] = value
+ def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docRowType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
+ for entry_ in self.entry:
+ entry_.export(outfile, level, namespace_, name_='entry')
+ def hasContent_(self):
+ if (
+ self.entry is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docRowType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('entry=[\n')
+ level += 1
+ for entry in self.entry:
+ showIndent(outfile, level)
+ outfile.write('model_.entry(\n')
+ entry.exportLiteral(outfile, level, name_='entry')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'entry':
+ obj_ = docEntryType.factory()
+ obj_.build(child_)
+ self.entry.append(obj_)
+# end class docRowType
+
+
+class docEntryType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, thead=None, para=None):
+ self.thead = thead
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docEntryType.subclass:
+ return docEntryType.subclass(*args_, **kwargs_)
+ else:
+ return docEntryType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_thead(self): return self.thead
+ def set_thead(self, thead): self.thead = thead
+ def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docEntryType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
+ if self.thead is not None:
+ outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docEntryType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.thead is not None:
+ showIndent(outfile, level)
+ outfile.write('thead = "%s",\n' % (self.thead,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('thead'):
+ self.thead = attrs.get('thead').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docEntryType
+
+
+class docCaptionType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_='', mixedclass_=None, content_=None):
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docCaptionType.subclass:
+ return docCaptionType.subclass(*args_, **kwargs_)
+ else:
+ return docCaptionType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCaptionType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docCaptionType
+
+
+class docHeadingType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
+ self.level = level
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docHeadingType.subclass:
+ return docHeadingType.subclass(*args_, **kwargs_)
+ else:
+ return docHeadingType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_level(self): return self.level
+ def set_level(self, level): self.level = level
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
+ if self.level is not None:
+ outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
+ def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docHeadingType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.level is not None:
+ showIndent(outfile, level)
+ outfile.write('level = %s,\n' % (self.level,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('level'):
+ try:
+ self.level = int(attrs.get('level').value)
+ except ValueError, exp:
+ raise ValueError('Bad integer attribute (level): %s' % exp)
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docHeadingType
+
+
+class docImageType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
+ self.width = width
+ self.type_ = type_
+ self.name = name
+ self.height = height
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docImageType.subclass:
+ return docImageType.subclass(*args_, **kwargs_)
+ else:
+ return docImageType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_width(self): return self.width
+ def set_width(self, width): self.width = width
+ def get_type(self): return self.type_
+ def set_type(self, type_): self.type_ = type_
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_height(self): return self.height
+ def set_height(self, height): self.height = height
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docImageType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
+ if self.width is not None:
+ outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
+ if self.type_ is not None:
+ outfile.write(' type=%s' % (quote_attrib(self.type_), ))
+ if self.name is not None:
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ if self.height is not None:
+ outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docImageType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.width is not None:
+ showIndent(outfile, level)
+ outfile.write('width = %s,\n' % (self.width,))
+ if self.type_ is not None:
+ showIndent(outfile, level)
+ outfile.write('type_ = "%s",\n' % (self.type_,))
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('name = %s,\n' % (self.name,))
+ if self.height is not None:
+ showIndent(outfile, level)
+ outfile.write('height = %s,\n' % (self.height,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('width'):
+ self.width = attrs.get('width').value
+ if attrs.get('type'):
+ self.type_ = attrs.get('type').value
+ if attrs.get('name'):
+ self.name = attrs.get('name').value
+ if attrs.get('height'):
+ self.height = attrs.get('height').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docImageType
+
+
+class docDotFileType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
+ self.name = name
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docDotFileType.subclass:
+ return docDotFileType.subclass(*args_, **kwargs_)
+ else:
+ return docDotFileType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
+ if self.name is not None:
+ outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docDotFileType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('name = %s,\n' % (self.name,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('name'):
+ self.name = attrs.get('name').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docDotFileType
+
+
+class docTocItemType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
+ self.id = id
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docTocItemType.subclass:
+ return docTocItemType.subclass(*args_, **kwargs_)
+ else:
+ return docTocItemType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTocItemType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docTocItemType
+
+
+class docTocListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, tocitem=None):
+ if tocitem is None:
+ self.tocitem = []
+ else:
+ self.tocitem = tocitem
+ def factory(*args_, **kwargs_):
+ if docTocListType.subclass:
+ return docTocListType.subclass(*args_, **kwargs_)
+ else:
+ return docTocListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_tocitem(self): return self.tocitem
+ def set_tocitem(self, tocitem): self.tocitem = tocitem
+ def add_tocitem(self, value): self.tocitem.append(value)
+ def insert_tocitem(self, index, value): self.tocitem[index] = value
+ def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
+ for tocitem_ in self.tocitem:
+ tocitem_.export(outfile, level, namespace_, name_='tocitem')
+ def hasContent_(self):
+ if (
+ self.tocitem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docTocListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('tocitem=[\n')
+ level += 1
+ for tocitem in self.tocitem:
+ showIndent(outfile, level)
+ outfile.write('model_.tocitem(\n')
+ tocitem.exportLiteral(outfile, level, name_='tocitem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'tocitem':
+ obj_ = docTocItemType.factory()
+ obj_.build(child_)
+ self.tocitem.append(obj_)
+# end class docTocListType
+
+
+class docLanguageType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, langid=None, para=None):
+ self.langid = langid
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ def factory(*args_, **kwargs_):
+ if docLanguageType.subclass:
+ return docLanguageType.subclass(*args_, **kwargs_)
+ else:
+ return docLanguageType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_langid(self): return self.langid
+ def set_langid(self, langid): self.langid = langid
+ def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
+ if self.langid is not None:
+ outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ def hasContent_(self):
+ if (
+ self.para is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docLanguageType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.langid is not None:
+ showIndent(outfile, level)
+ outfile.write('langid = %s,\n' % (self.langid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('langid'):
+ self.langid = attrs.get('langid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+# end class docLanguageType
+
+
+class docParamListType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, parameteritem=None):
+ self.kind = kind
+ if parameteritem is None:
+ self.parameteritem = []
+ else:
+ self.parameteritem = parameteritem
+ def factory(*args_, **kwargs_):
+ if docParamListType.subclass:
+ return docParamListType.subclass(*args_, **kwargs_)
+ else:
+ return docParamListType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parameteritem(self): return self.parameteritem
+ def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
+ def add_parameteritem(self, value): self.parameteritem.append(value)
+ def insert_parameteritem(self, index, value): self.parameteritem[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
+ if self.kind is not None:
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
+ for parameteritem_ in self.parameteritem:
+ parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
+ def hasContent_(self):
+ if (
+ self.parameteritem is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamListType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parameteritem=[\n')
+ level += 1
+ for parameteritem in self.parameteritem:
+ showIndent(outfile, level)
+ outfile.write('model_.parameteritem(\n')
+ parameteritem.exportLiteral(outfile, level, name_='parameteritem')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameteritem':
+ obj_ = docParamListItem.factory()
+ obj_.build(child_)
+ self.parameteritem.append(obj_)
+# end class docParamListType
+
+
+class docParamListItem(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, parameternamelist=None, parameterdescription=None):
+ if parameternamelist is None:
+ self.parameternamelist = []
+ else:
+ self.parameternamelist = parameternamelist
+ self.parameterdescription = parameterdescription
+ def factory(*args_, **kwargs_):
+ if docParamListItem.subclass:
+ return docParamListItem.subclass(*args_, **kwargs_)
+ else:
+ return docParamListItem(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parameternamelist(self): return self.parameternamelist
+ def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
+ def add_parameternamelist(self, value): self.parameternamelist.append(value)
+ def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
+ def get_parameterdescription(self): return self.parameterdescription
+ def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
+ def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
+ for parameternamelist_ in self.parameternamelist:
+ parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
+ if self.parameterdescription:
+ self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
+ def hasContent_(self):
+ if (
+ self.parameternamelist is not None or
+ self.parameterdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamListItem'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parameternamelist=[\n')
+ level += 1
+ for parameternamelist in self.parameternamelist:
+ showIndent(outfile, level)
+ outfile.write('model_.parameternamelist(\n')
+ parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.parameterdescription:
+ showIndent(outfile, level)
+ outfile.write('parameterdescription=model_.descriptionType(\n')
+ self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameternamelist':
+ obj_ = docParamNameList.factory()
+ obj_.build(child_)
+ self.parameternamelist.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parameterdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_parameterdescription(obj_)
+# end class docParamListItem
+
+
+class docParamNameList(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, parametername=None):
+ if parametername is None:
+ self.parametername = []
+ else:
+ self.parametername = parametername
+ def factory(*args_, **kwargs_):
+ if docParamNameList.subclass:
+ return docParamNameList.subclass(*args_, **kwargs_)
+ else:
+ return docParamNameList(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_parametername(self): return self.parametername
+ def set_parametername(self, parametername): self.parametername = parametername
+ def add_parametername(self, value): self.parametername.append(value)
+ def insert_parametername(self, index, value): self.parametername[index] = value
+ def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
+ for parametername_ in self.parametername:
+ parametername_.export(outfile, level, namespace_, name_='parametername')
+ def hasContent_(self):
+ if (
+ self.parametername is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamNameList'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('parametername=[\n')
+ level += 1
+ for parametername in self.parametername:
+ showIndent(outfile, level)
+ outfile.write('model_.parametername(\n')
+ parametername.exportLiteral(outfile, level, name_='parametername')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'parametername':
+ obj_ = docParamName.factory()
+ obj_.build(child_)
+ self.parametername.append(obj_)
+# end class docParamNameList
+
+
+class docParamName(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
+ self.direction = direction
+ if mixedclass_ is None:
+ self.mixedclass_ = MixedContainer
+ else:
+ self.mixedclass_ = mixedclass_
+ if content_ is None:
+ self.content_ = []
+ else:
+ self.content_ = content_
+ def factory(*args_, **kwargs_):
+ if docParamName.subclass:
+ return docParamName.subclass(*args_, **kwargs_)
+ else:
+ return docParamName(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_ref(self): return self.ref
+ def set_ref(self, ref): self.ref = ref
+ def get_direction(self): return self.direction
+ def set_direction(self, direction): self.direction = direction
+ def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docParamName')
+ outfile.write('>')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
+ if self.direction is not None:
+ outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
+ for item_ in self.content_:
+ item_.export(outfile, level, item_.name, namespace_)
+ def hasContent_(self):
+ if (
+ self.ref is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docParamName'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.direction is not None:
+ showIndent(outfile, level)
+ outfile.write('direction = "%s",\n' % (self.direction,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('content_ = [\n')
+ for item_ in self.content_:
+ item_.exportLiteral(outfile, level, name_)
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('direction'):
+ self.direction = attrs.get('direction').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'ref':
+ childobj_ = docRefTextType.factory()
+ childobj_.build(child_)
+ obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
+ MixedContainer.TypeNone, 'ref', childobj_)
+ self.content_.append(obj_)
+ elif child_.nodeType == Node.TEXT_NODE:
+ obj_ = self.mixedclass_(MixedContainer.CategoryText,
+ MixedContainer.TypeNone, '', child_.nodeValue)
+ self.content_.append(obj_)
+# end class docParamName
+
+
+class docXRefSectType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, id=None, xreftitle=None, xrefdescription=None):
+ self.id = id
+ if xreftitle is None:
+ self.xreftitle = []
+ else:
+ self.xreftitle = xreftitle
+ self.xrefdescription = xrefdescription
+ def factory(*args_, **kwargs_):
+ if docXRefSectType.subclass:
+ return docXRefSectType.subclass(*args_, **kwargs_)
+ else:
+ return docXRefSectType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_xreftitle(self): return self.xreftitle
+ def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
+ def add_xreftitle(self, value): self.xreftitle.append(value)
+ def insert_xreftitle(self, index, value): self.xreftitle[index] = value
+ def get_xrefdescription(self): return self.xrefdescription
+ def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
+ def get_id(self): return self.id
+ def set_id(self, id): self.id = id
+ def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
+ if self.id is not None:
+ outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
+ for xreftitle_ in self.xreftitle:
+ showIndent(outfile, level)
+ outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
+ if self.xrefdescription:
+ self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
+ def hasContent_(self):
+ if (
+ self.xreftitle is not None or
+ self.xrefdescription is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docXRefSectType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.id is not None:
+ showIndent(outfile, level)
+ outfile.write('id = %s,\n' % (self.id,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('xreftitle=[\n')
+ level += 1
+ for xreftitle in self.xreftitle:
+ showIndent(outfile, level)
+ outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.xrefdescription:
+ showIndent(outfile, level)
+ outfile.write('xrefdescription=model_.descriptionType(\n')
+ self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('id'):
+ self.id = attrs.get('id').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'xreftitle':
+ xreftitle_ = ''
+ for text__content_ in child_.childNodes:
+ xreftitle_ += text__content_.nodeValue
+ self.xreftitle.append(xreftitle_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'xrefdescription':
+ obj_ = descriptionType.factory()
+ obj_.build(child_)
+ self.set_xrefdescription(obj_)
+# end class docXRefSectType
+
+
+class docCopyType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, link=None, para=None, sect1=None, internal=None):
+ self.link = link
+ if para is None:
+ self.para = []
+ else:
+ self.para = para
+ if sect1 is None:
+ self.sect1 = []
+ else:
+ self.sect1 = sect1
+ self.internal = internal
+ def factory(*args_, **kwargs_):
+ if docCopyType.subclass:
+ return docCopyType.subclass(*args_, **kwargs_)
+ else:
+ return docCopyType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_para(self): return self.para
+ def set_para(self, para): self.para = para
+ def add_para(self, value): self.para.append(value)
+ def insert_para(self, index, value): self.para[index] = value
+ def get_sect1(self): return self.sect1
+ def set_sect1(self, sect1): self.sect1 = sect1
+ def add_sect1(self, value): self.sect1.append(value)
+ def insert_sect1(self, index, value): self.sect1[index] = value
+ def get_internal(self): return self.internal
+ def set_internal(self, internal): self.internal = internal
+ def get_link(self): return self.link
+ def set_link(self, link): self.link = link
+ def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCopyType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
+ if self.link is not None:
+ outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
+ for para_ in self.para:
+ para_.export(outfile, level, namespace_, name_='para')
+ for sect1_ in self.sect1:
+ sect1_.export(outfile, level, namespace_, name_='sect1')
+ if self.internal:
+ self.internal.export(outfile, level, namespace_, name_='internal')
+ def hasContent_(self):
+ if (
+ self.para is not None or
+ self.sect1 is not None or
+ self.internal is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCopyType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.link is not None:
+ showIndent(outfile, level)
+ outfile.write('link = %s,\n' % (self.link,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('para=[\n')
+ level += 1
+ for para in self.para:
+ showIndent(outfile, level)
+ outfile.write('model_.para(\n')
+ para.exportLiteral(outfile, level, name_='para')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ showIndent(outfile, level)
+ outfile.write('sect1=[\n')
+ level += 1
+ for sect1 in self.sect1:
+ showIndent(outfile, level)
+ outfile.write('model_.sect1(\n')
+ sect1.exportLiteral(outfile, level, name_='sect1')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ if self.internal:
+ showIndent(outfile, level)
+ outfile.write('internal=model_.docInternalType(\n')
+ self.internal.exportLiteral(outfile, level, name_='internal')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('link'):
+ self.link = attrs.get('link').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'para':
+ obj_ = docParaType.factory()
+ obj_.build(child_)
+ self.para.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'sect1':
+ obj_ = docSect1Type.factory()
+ obj_.build(child_)
+ self.sect1.append(obj_)
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'internal':
+ obj_ = docInternalType.factory()
+ obj_.build(child_)
+ self.set_internal(obj_)
+# end class docCopyType
+
+
+class docCharType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, char=None, valueOf_=''):
+ self.char = char
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docCharType.subclass:
+ return docCharType.subclass(*args_, **kwargs_)
+ else:
+ return docCharType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_char(self): return self.char
+ def set_char(self, char): self.char = char
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docCharType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
+ if self.char is not None:
+ outfile.write(' char=%s' % (quote_attrib(self.char), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docCharType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.char is not None:
+ showIndent(outfile, level)
+ outfile.write('char = "%s",\n' % (self.char,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('char'):
+ self.char = attrs.get('char').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docCharType
+
+
+class docEmptyType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, valueOf_=''):
+ self.valueOf_ = valueOf_
+ def factory(*args_, **kwargs_):
+ if docEmptyType.subclass:
+ return docEmptyType.subclass(*args_, **kwargs_)
+ else:
+ return docEmptyType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def getValueOf_(self): return self.valueOf_
+ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
+ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='docEmptyType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
+ pass
+ def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
+ if self.valueOf_.find('![CDATA')>-1:
+ value=quote_xml('%s' % self.valueOf_)
+ value=value.replace('![CDATA','<![CDATA')
+ value=value.replace(']]',']]>')
+ outfile.write(value)
+ else:
+ outfile.write(quote_xml('%s' % self.valueOf_))
+ def hasContent_(self):
+ if (
+ self.valueOf_ is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='docEmptyType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ pass
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ self.valueOf_ = ''
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ pass
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.TEXT_NODE:
+ self.valueOf_ += child_.nodeValue
+ elif child_.nodeType == Node.CDATA_SECTION_NODE:
+ self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
+# end class docEmptyType
+
+
+USAGE_TEXT = """
+Usage: python <Parser>.py [ -s ] <in_xml_file>
+Options:
+ -s Use the SAX parser, not the minidom parser.
+"""
+
+def usage():
+ print USAGE_TEXT
+ sys.exit(1)
+
+
+def parse(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygen",
+ namespacedef_='')
+ return rootObj
+
+
+def parseString(inString):
+ doc = minidom.parseString(inString)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygen",
+ namespacedef_='')
+ return rootObj
+
+
+def parseLiteral(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('from compound import *\n\n')
+ sys.stdout.write('rootObj = doxygen(\n')
+ rootObj.exportLiteral(sys.stdout, 0, name_="doxygen")
+ sys.stdout.write(')\n')
+ return rootObj
+
+
+def main():
+ args = sys.argv[1:]
+ if len(args) == 1:
+ parse(args[0])
+ else:
+ usage()
+
+
+if __name__ == '__main__':
+ main()
+ #import pdb
+ #pdb.run('main()')
+
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/generated/index.py b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/index.py
new file mode 100644
index 000000000..7a70e14a1
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/index.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+"""
+Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
+"""
+
+from xml.dom import minidom
+
+import os
+import sys
+import compound
+
+import indexsuper as supermod
+
+class DoxygenTypeSub(supermod.DoxygenType):
+ def __init__(self, version=None, compound=None):
+ supermod.DoxygenType.__init__(self, version, compound)
+
+ def find_compounds_and_members(self, details):
+ """
+ Returns a list of all compounds and their members which match details
+ """
+
+ results = []
+ for compound in self.compound:
+ members = compound.find_members(details)
+ if members:
+ results.append([compound, members])
+ else:
+ if details.match(compound):
+ results.append([compound, []])
+
+ return results
+
+supermod.DoxygenType.subclass = DoxygenTypeSub
+# end class DoxygenTypeSub
+
+
+class CompoundTypeSub(supermod.CompoundType):
+ def __init__(self, kind=None, refid=None, name='', member=None):
+ supermod.CompoundType.__init__(self, kind, refid, name, member)
+
+ def find_members(self, details):
+ """
+ Returns a list of all members which match details
+ """
+
+ results = []
+
+ for member in self.member:
+ if details.match(member):
+ results.append(member)
+
+ return results
+
+supermod.CompoundType.subclass = CompoundTypeSub
+# end class CompoundTypeSub
+
+
+class MemberTypeSub(supermod.MemberType):
+
+ def __init__(self, kind=None, refid=None, name=''):
+ supermod.MemberType.__init__(self, kind, refid, name)
+
+supermod.MemberType.subclass = MemberTypeSub
+# end class MemberTypeSub
+
+
+def parse(inFilename):
+
+ doc = minidom.parse(inFilename)
+ rootNode = doc.documentElement
+ rootObj = supermod.DoxygenType.factory()
+ rootObj.build(rootNode)
+
+ return rootObj
+
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/generated/indexsuper.py b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/indexsuper.py
new file mode 100644
index 000000000..a99153019
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/generated/indexsuper.py
@@ -0,0 +1,523 @@
+#!/usr/bin/env python
+
+#
+# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
+#
+
+import sys
+import getopt
+from string import lower as str_lower
+from xml.dom import minidom
+from xml.dom import Node
+
+#
+# User methods
+#
+# Calls to the methods in these classes are generated by generateDS.py.
+# You can replace these methods by re-implementing the following class
+# in a module named generatedssuper.py.
+
+try:
+ from generatedssuper import GeneratedsSuper
+except ImportError, exp:
+
+ class GeneratedsSuper:
+ def format_string(self, input_data, input_name=''):
+ return input_data
+ def format_integer(self, input_data, input_name=''):
+ return '%d' % input_data
+ def format_float(self, input_data, input_name=''):
+ return '%f' % input_data
+ def format_double(self, input_data, input_name=''):
+ return '%e' % input_data
+ def format_boolean(self, input_data, input_name=''):
+ return '%s' % input_data
+
+
+#
+# If you have installed IPython you can uncomment and use the following.
+# IPython is available from http://ipython.scipy.org/.
+#
+
+## from IPython.Shell import IPShellEmbed
+## args = ''
+## ipshell = IPShellEmbed(args,
+## banner = 'Dropping into IPython',
+## exit_msg = 'Leaving Interpreter, back to program.')
+
+# Then use the following line where and when you want to drop into the
+# IPython shell:
+# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
+
+#
+# Globals
+#
+
+ExternalEncoding = 'ascii'
+
+#
+# Support/utility functions.
+#
+
+def showIndent(outfile, level):
+ for idx in range(level):
+ outfile.write(' ')
+
+def quote_xml(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ return s1
+
+def quote_attrib(inStr):
+ s1 = (isinstance(inStr, basestring) and inStr or
+ '%s' % inStr)
+ s1 = s1.replace('&', '&amp;')
+ s1 = s1.replace('<', '&lt;')
+ s1 = s1.replace('>', '&gt;')
+ if '"' in s1:
+ if "'" in s1:
+ s1 = '"%s"' % s1.replace('"', "&quot;")
+ else:
+ s1 = "'%s'" % s1
+ else:
+ s1 = '"%s"' % s1
+ return s1
+
+def quote_python(inStr):
+ s1 = inStr
+ if s1.find("'") == -1:
+ if s1.find('\n') == -1:
+ return "'%s'" % s1
+ else:
+ return "'''%s'''" % s1
+ else:
+ if s1.find('"') != -1:
+ s1 = s1.replace('"', '\\"')
+ if s1.find('\n') == -1:
+ return '"%s"' % s1
+ else:
+ return '"""%s"""' % s1
+
+
+class MixedContainer:
+ # Constants for category:
+ CategoryNone = 0
+ CategoryText = 1
+ CategorySimple = 2
+ CategoryComplex = 3
+ # Constants for content_type:
+ TypeNone = 0
+ TypeText = 1
+ TypeString = 2
+ TypeInteger = 3
+ TypeFloat = 4
+ TypeDecimal = 5
+ TypeDouble = 6
+ TypeBoolean = 7
+ def __init__(self, category, content_type, name, value):
+ self.category = category
+ self.content_type = content_type
+ self.name = name
+ self.value = value
+ def getCategory(self):
+ return self.category
+ def getContenttype(self, content_type):
+ return self.content_type
+ def getValue(self):
+ return self.value
+ def getName(self):
+ return self.name
+ def export(self, outfile, level, name, namespace):
+ if self.category == MixedContainer.CategoryText:
+ outfile.write(self.value)
+ elif self.category == MixedContainer.CategorySimple:
+ self.exportSimple(outfile, level, name)
+ else: # category == MixedContainer.CategoryComplex
+ self.value.export(outfile, level, namespace,name)
+ def exportSimple(self, outfile, level, name):
+ if self.content_type == MixedContainer.TypeString:
+ outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeInteger or \
+ self.content_type == MixedContainer.TypeBoolean:
+ outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeFloat or \
+ self.content_type == MixedContainer.TypeDecimal:
+ outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
+ elif self.content_type == MixedContainer.TypeDouble:
+ outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
+ def exportLiteral(self, outfile, level, name):
+ if self.category == MixedContainer.CategoryText:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ elif self.category == MixedContainer.CategorySimple:
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
+ (self.category, self.content_type, self.name, self.value))
+ else: # category == MixedContainer.CategoryComplex
+ showIndent(outfile, level)
+ outfile.write('MixedContainer(%d, %d, "%s",\n' % \
+ (self.category, self.content_type, self.name,))
+ self.value.exportLiteral(outfile, level + 1)
+ showIndent(outfile, level)
+ outfile.write(')\n')
+
+
+class _MemberSpec(object):
+ def __init__(self, name='', data_type='', container=0):
+ self.name = name
+ self.data_type = data_type
+ self.container = container
+ def set_name(self, name): self.name = name
+ def get_name(self): return self.name
+ def set_data_type(self, data_type): self.data_type = data_type
+ def get_data_type(self): return self.data_type
+ def set_container(self, container): self.container = container
+ def get_container(self): return self.container
+
+
+#
+# Data representation classes.
+#
+
+class DoxygenType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, version=None, compound=None):
+ self.version = version
+ if compound is None:
+ self.compound = []
+ else:
+ self.compound = compound
+ def factory(*args_, **kwargs_):
+ if DoxygenType.subclass:
+ return DoxygenType.subclass(*args_, **kwargs_)
+ else:
+ return DoxygenType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_compound(self): return self.compound
+ def set_compound(self, compound): self.compound = compound
+ def add_compound(self, value): self.compound.append(value)
+ def insert_compound(self, index, value): self.compound[index] = value
+ def get_version(self): return self.version
+ def set_version(self, version): self.version = version
+ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
+ outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
+ for compound_ in self.compound:
+ compound_.export(outfile, level, namespace_, name_='compound')
+ def hasContent_(self):
+ if (
+ self.compound is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='DoxygenType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.version is not None:
+ showIndent(outfile, level)
+ outfile.write('version = %s,\n' % (self.version,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('compound=[\n')
+ level += 1
+ for compound in self.compound:
+ showIndent(outfile, level)
+ outfile.write('model_.compound(\n')
+ compound.exportLiteral(outfile, level, name_='compound')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('version'):
+ self.version = attrs.get('version').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'compound':
+ obj_ = CompoundType.factory()
+ obj_.build(child_)
+ self.compound.append(obj_)
+# end class DoxygenType
+
+
+class CompoundType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, refid=None, name=None, member=None):
+ self.kind = kind
+ self.refid = refid
+ self.name = name
+ if member is None:
+ self.member = []
+ else:
+ self.member = member
+ def factory(*args_, **kwargs_):
+ if CompoundType.subclass:
+ return CompoundType.subclass(*args_, **kwargs_)
+ else:
+ return CompoundType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_member(self): return self.member
+ def set_member(self, member): self.member = member
+ def add_member(self, value): self.member.append(value)
+ def insert_member(self, index, value): self.member[index] = value
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ for member_ in self.member:
+ member_.export(outfile, level, namespace_, name_='member')
+ def hasContent_(self):
+ if (
+ self.name is not None or
+ self.member is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='CompoundType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ showIndent(outfile, level)
+ outfile.write('member=[\n')
+ level += 1
+ for member in self.member:
+ showIndent(outfile, level)
+ outfile.write('model_.member(\n')
+ member.exportLiteral(outfile, level, name_='member')
+ showIndent(outfile, level)
+ outfile.write('),\n')
+ level -= 1
+ showIndent(outfile, level)
+ outfile.write('],\n')
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+ elif child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'member':
+ obj_ = MemberType.factory()
+ obj_.build(child_)
+ self.member.append(obj_)
+# end class CompoundType
+
+
+class MemberType(GeneratedsSuper):
+ subclass = None
+ superclass = None
+ def __init__(self, kind=None, refid=None, name=None):
+ self.kind = kind
+ self.refid = refid
+ self.name = name
+ def factory(*args_, **kwargs_):
+ if MemberType.subclass:
+ return MemberType.subclass(*args_, **kwargs_)
+ else:
+ return MemberType(*args_, **kwargs_)
+ factory = staticmethod(factory)
+ def get_name(self): return self.name
+ def set_name(self, name): self.name = name
+ def get_kind(self): return self.kind
+ def set_kind(self, kind): self.kind = kind
+ def get_refid(self): return self.refid
+ def set_refid(self, refid): self.refid = refid
+ def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
+ showIndent(outfile, level)
+ outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
+ self.exportAttributes(outfile, level, namespace_, name_='MemberType')
+ if self.hasContent_():
+ outfile.write('>\n')
+ self.exportChildren(outfile, level + 1, namespace_, name_)
+ showIndent(outfile, level)
+ outfile.write('</%s%s>\n' % (namespace_, name_))
+ else:
+ outfile.write(' />\n')
+ def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
+ outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
+ outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
+ def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
+ if self.name is not None:
+ showIndent(outfile, level)
+ outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
+ def hasContent_(self):
+ if (
+ self.name is not None
+ ):
+ return True
+ else:
+ return False
+ def exportLiteral(self, outfile, level, name_='MemberType'):
+ level += 1
+ self.exportLiteralAttributes(outfile, level, name_)
+ if self.hasContent_():
+ self.exportLiteralChildren(outfile, level, name_)
+ def exportLiteralAttributes(self, outfile, level, name_):
+ if self.kind is not None:
+ showIndent(outfile, level)
+ outfile.write('kind = "%s",\n' % (self.kind,))
+ if self.refid is not None:
+ showIndent(outfile, level)
+ outfile.write('refid = %s,\n' % (self.refid,))
+ def exportLiteralChildren(self, outfile, level, name_):
+ showIndent(outfile, level)
+ outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
+ def build(self, node_):
+ attrs = node_.attributes
+ self.buildAttributes(attrs)
+ for child_ in node_.childNodes:
+ nodeName_ = child_.nodeName.split(':')[-1]
+ self.buildChildren(child_, nodeName_)
+ def buildAttributes(self, attrs):
+ if attrs.get('kind'):
+ self.kind = attrs.get('kind').value
+ if attrs.get('refid'):
+ self.refid = attrs.get('refid').value
+ def buildChildren(self, child_, nodeName_):
+ if child_.nodeType == Node.ELEMENT_NODE and \
+ nodeName_ == 'name':
+ name_ = ''
+ for text__content_ in child_.childNodes:
+ name_ += text__content_.nodeValue
+ self.name = name_
+# end class MemberType
+
+
+USAGE_TEXT = """
+Usage: python <Parser>.py [ -s ] <in_xml_file>
+Options:
+ -s Use the SAX parser, not the minidom parser.
+"""
+
+def usage():
+ print USAGE_TEXT
+ sys.exit(1)
+
+
+def parse(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygenindex",
+ namespacedef_='')
+ return rootObj
+
+
+def parseString(inString):
+ doc = minidom.parseString(inString)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('<?xml version="1.0" ?>\n')
+ rootObj.export(sys.stdout, 0, name_="doxygenindex",
+ namespacedef_='')
+ return rootObj
+
+
+def parseLiteral(inFileName):
+ doc = minidom.parse(inFileName)
+ rootNode = doc.documentElement
+ rootObj = DoxygenType.factory()
+ rootObj.build(rootNode)
+ # Enable Python to collect the space used by the DOM.
+ doc = None
+ sys.stdout.write('from index import *\n\n')
+ sys.stdout.write('rootObj = doxygenindex(\n')
+ rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
+ sys.stdout.write(')\n')
+ return rootObj
+
+
+def main():
+ args = sys.argv[1:]
+ if len(args) == 1:
+ parse(args[0])
+ else:
+ usage()
+
+
+
+
+if __name__ == '__main__':
+ main()
+ #import pdb
+ #pdb.run('main()')
+
diff --git a/tools/gr-usrptest/docs/doxygen/doxyxml/text.py b/tools/gr-usrptest/docs/doxygen/doxyxml/text.py
new file mode 100644
index 000000000..629edd180
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/doxyxml/text.py
@@ -0,0 +1,56 @@
+#
+# Copyright 2010 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Utilities for extracting text from generated classes.
+"""
+
+def is_string(txt):
+ if isinstance(txt, str):
+ return True
+ try:
+ if isinstance(txt, unicode):
+ return True
+ except NameError:
+ pass
+ return False
+
+def description(obj):
+ if obj is None:
+ return None
+ return description_bit(obj).strip()
+
+def description_bit(obj):
+ if hasattr(obj, 'content'):
+ contents = [description_bit(item) for item in obj.content]
+ result = ''.join(contents)
+ elif hasattr(obj, 'content_'):
+ contents = [description_bit(item) for item in obj.content_]
+ result = ''.join(contents)
+ elif hasattr(obj, 'value'):
+ result = description_bit(obj.value)
+ elif is_string(obj):
+ return obj
+ else:
+ raise StandardError('Expecting a string or something with content, content_ or value attribute')
+ # If this bit is a paragraph then add one some line breaks.
+ if hasattr(obj, 'name') and obj.name == 'para':
+ result += "\n\n"
+ return result
diff --git a/tools/gr-usrptest/docs/doxygen/other/group_defs.dox b/tools/gr-usrptest/docs/doxygen/other/group_defs.dox
new file mode 100644
index 000000000..745fc6679
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/other/group_defs.dox
@@ -0,0 +1,7 @@
+/*!
+ * \defgroup block GNU Radio USRPTEST C++ Signal Processing Blocks
+ * \brief All C++ blocks that can be used from the USRPTEST GNU Radio
+ * module are listed here or in the subcategories below.
+ *
+ */
+
diff --git a/tools/gr-usrptest/docs/doxygen/other/main_page.dox b/tools/gr-usrptest/docs/doxygen/other/main_page.dox
new file mode 100644
index 000000000..2ae0bef14
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/other/main_page.dox
@@ -0,0 +1,10 @@
+/*! \mainpage
+
+Welcome to the GNU Radio USRPTEST Block
+
+This is the intro page for the Doxygen manual generated for the USRPTEST
+block (docs/doxygen/other/main_page.dox). Edit it to add more detailed
+documentation about the new GNU Radio modules contained in this
+project.
+
+*/
diff --git a/tools/gr-usrptest/docs/doxygen/swig_doc.py b/tools/gr-usrptest/docs/doxygen/swig_doc.py
new file mode 100644
index 000000000..d3536db8d
--- /dev/null
+++ b/tools/gr-usrptest/docs/doxygen/swig_doc.py
@@ -0,0 +1,328 @@
+#
+# Copyright 2010-2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+"""
+Creates the swig_doc.i SWIG interface file.
+Execute using: python swig_doc.py xml_path outputfilename
+
+The file instructs SWIG to transfer the doxygen comments into the
+python docstrings.
+
+"""
+
+import sys, time
+
+from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
+from doxyxml import DoxyOther, base
+
+def py_name(name):
+ bits = name.split('_')
+ return '_'.join(bits[1:])
+
+def make_name(name):
+ bits = name.split('_')
+ return bits[0] + '_make_' + '_'.join(bits[1:])
+
+
+class Block(object):
+ """
+ Checks if doxyxml produced objects correspond to a gnuradio block.
+ """
+
+ @classmethod
+ def includes(cls, item):
+ if not isinstance(item, DoxyClass):
+ return False
+ # Check for a parsing error.
+ if item.error():
+ return False
+ friendname = make_name(item.name())
+ is_a_block = item.has_member(friendname, DoxyFriend)
+ # But now sometimes the make function isn't a friend so check again.
+ if not is_a_block:
+ is_a_block = di.has_member(friendname, DoxyFunction)
+ return is_a_block
+
+class Block2(object):
+ """
+ Checks if doxyxml produced objects correspond to a new style
+ gnuradio block.
+ """
+
+ @classmethod
+ def includes(cls, item):
+ if not isinstance(item, DoxyClass):
+ return False
+ # Check for a parsing error.
+ if item.error():
+ return False
+ is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
+ return is_a_block2
+
+
+def utoascii(text):
+ """
+ Convert unicode text into ascii and escape quotes.
+ """
+ if text is None:
+ return ''
+ out = text.encode('ascii', 'replace')
+ out = out.replace('"', '\\"')
+ return out
+
+
+def combine_descriptions(obj):
+ """
+ Combines the brief and detailed descriptions of an object together.
+ """
+ description = []
+ bd = obj.brief_description.strip()
+ dd = obj.detailed_description.strip()
+ if bd:
+ description.append(bd)
+ if dd:
+ description.append(dd)
+ return utoascii('\n\n'.join(description)).strip()
+
+def format_params(parameteritems):
+ output = ['Args:']
+ template = ' {0} : {1}'
+ for pi in parameteritems:
+ output.append(template.format(pi.name, pi.description))
+ return '\n'.join(output)
+
+entry_templ = '%feature("docstring") {name} "{docstring}"'
+def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
+ """
+ Create a docstring entry for a swig interface file.
+
+ obj - a doxyxml object from which documentation will be extracted.
+ name - the name of the C object (defaults to obj.name())
+ templ - an optional template for the docstring containing only one
+ variable named 'description'.
+ description - if this optional variable is set then it's value is
+ used as the description instead of extracting it from obj.
+ """
+ if name is None:
+ name=obj.name()
+ if "operator " in name:
+ return ''
+ if description is None:
+ description = combine_descriptions(obj)
+ if params:
+ description += '\n\n'
+ description += utoascii(format_params(params))
+ docstring = templ.format(description=description)
+ if not docstring:
+ return ''
+ return entry_templ.format(
+ name=name,
+ docstring=docstring,
+ )
+
+
+def make_func_entry(func, name=None, description=None, params=None):
+ """
+ Create a function docstring entry for a swig interface file.
+
+ func - a doxyxml object from which documentation will be extracted.
+ name - the name of the C object (defaults to func.name())
+ description - if this optional variable is set then it's value is
+ used as the description instead of extracting it from func.
+ params - a parameter list that overrides using func.params.
+ """
+ #if params is None:
+ # params = func.params
+ #params = [prm.declname for prm in params]
+ #if params:
+ # sig = "Params: (%s)" % ", ".join(params)
+ #else:
+ # sig = "Params: (NONE)"
+ #templ = "{description}\n\n" + sig
+ #return make_entry(func, name=name, templ=utoascii(templ),
+ # description=description)
+ return make_entry(func, name=name, description=description, params=params)
+
+
+def make_class_entry(klass, description=None, ignored_methods=[], params=None):
+ """
+ Create a class docstring for a swig interface file.
+ """
+ if params is None:
+ params = klass.params
+ output = []
+ output.append(make_entry(klass, description=description, params=params))
+ for func in klass.in_category(DoxyFunction):
+ if func.name() not in ignored_methods:
+ name = klass.name() + '::' + func.name()
+ output.append(make_func_entry(func, name=name))
+ return "\n\n".join(output)
+
+
+def make_block_entry(di, block):
+ """
+ Create class and function docstrings of a gnuradio block for a
+ swig interface file.
+ """
+ descriptions = []
+ # Get the documentation associated with the class.
+ class_desc = combine_descriptions(block)
+ if class_desc:
+ descriptions.append(class_desc)
+ # Get the documentation associated with the make function
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ make_func_desc = combine_descriptions(make_func)
+ if make_func_desc:
+ descriptions.append(make_func_desc)
+ # Get the documentation associated with the file
+ try:
+ block_file = di.get_member(block.name() + ".h", DoxyFile)
+ file_desc = combine_descriptions(block_file)
+ if file_desc:
+ descriptions.append(file_desc)
+ except base.Base.NoSuchMember:
+ # Don't worry if we can't find a matching file.
+ pass
+ # And join them all together to make a super duper description.
+ super_description = "\n\n".join(descriptions)
+ # Associate the combined description with the class and
+ # the make function.
+ output = []
+ output.append(make_class_entry(block, description=super_description))
+ output.append(make_func_entry(make_func, description=super_description,
+ params=block.params))
+ return "\n\n".join(output)
+
+def make_block2_entry(di, block):
+ """
+ Create class and function docstrings of a new style gnuradio block for a
+ swig interface file.
+ """
+ descriptions = []
+ # For new style blocks all the relevant documentation should be
+ # associated with the 'make' method.
+ class_description = combine_descriptions(block)
+ make_func = block.get_member('make', DoxyFunction)
+ make_description = combine_descriptions(make_func)
+ description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
+ # Associate the combined description with the class and
+ # the make function.
+ output = []
+ output.append(make_class_entry(
+ block, description=description,
+ ignored_methods=['make'], params=make_func.params))
+ makename = block.name() + '::make'
+ output.append(make_func_entry(
+ make_func, name=makename, description=description,
+ params=make_func.params))
+ return "\n\n".join(output)
+
+def make_swig_interface_file(di, swigdocfilename, custom_output=None):
+
+ output = ["""
+/*
+ * This file was automatically generated using swig_doc.py.
+ *
+ * Any changes to it will be lost next time it is regenerated.
+ */
+"""]
+
+ if custom_output is not None:
+ output.append(custom_output)
+
+ # Create docstrings for the blocks.
+ blocks = di.in_category(Block)
+ blocks2 = di.in_category(Block2)
+
+ make_funcs = set([])
+ for block in blocks:
+ try:
+ make_func = di.get_member(make_name(block.name()), DoxyFunction)
+ # Don't want to risk writing to output twice.
+ if make_func.name() not in make_funcs:
+ make_funcs.add(make_func.name())
+ output.append(make_block_entry(di, block))
+ except block.ParsingError:
+ sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
+ raise
+
+ for block in blocks2:
+ try:
+ make_func = block.get_member('make', DoxyFunction)
+ make_func_name = block.name() +'::make'
+ # Don't want to risk writing to output twice.
+ if make_func_name not in make_funcs:
+ make_funcs.add(make_func_name)
+ output.append(make_block2_entry(di, block))
+ except block.ParsingError:
+ sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
+ raise
+
+ # Create docstrings for functions
+ # Don't include the make functions since they have already been dealt with.
+ funcs = [f for f in di.in_category(DoxyFunction)
+ if f.name() not in make_funcs and not f.name().startswith('std::')]
+ for f in funcs:
+ try:
+ output.append(make_func_entry(f))
+ except f.ParsingError:
+ sys.stderr.write('Parsing error for function {0}\n'.format(f.name()))
+
+ # Create docstrings for classes
+ block_names = [block.name() for block in blocks]
+ block_names += [block.name() for block in blocks2]
+ klasses = [k for k in di.in_category(DoxyClass)
+ if k.name() not in block_names and not k.name().startswith('std::')]
+ for k in klasses:
+ try:
+ output.append(make_class_entry(k))
+ except k.ParsingError:
+ sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
+
+ # Docstrings are not created for anything that is not a function or a class.
+ # If this excludes anything important please add it here.
+
+ output = "\n\n".join(output)
+
+ swig_doc = file(swigdocfilename, 'w')
+ swig_doc.write(output)
+ swig_doc.close()
+
+if __name__ == "__main__":
+ # Parse command line options and set up doxyxml.
+ err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
+ if len(sys.argv) != 3:
+ raise StandardError(err_msg)
+ xml_path = sys.argv[1]
+ swigdocfilename = sys.argv[2]
+ di = DoxyIndex(xml_path)
+
+ # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
+ # This is presumably a bug in SWIG.
+ #msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
+ #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
+ #delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
+ output = []
+ #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
+ #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
+ custom_output = "\n\n".join(output)
+
+ # Generate the docstrings interface file.
+ make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
diff --git a/tools/gr-usrptest/examples/lv_control_example.py b/tools/gr-usrptest/examples/lv_control_example.py
new file mode 100755
index 000000000..dc2a7be2f
--- /dev/null
+++ b/tools/gr-usrptest/examples/lv_control_example.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+from usrptest.labview_control import lv_control
+import time
+import numpy as np
+import argparse
+import sys
+
+def test00(freq, frange, steps):
+ freqrange = np.arange(freq,freq+frange,steps)
+ source = 'VST-Out'
+ sink = 'X3x0-4-B-RX2'
+ host = 'pollux'
+ base_path = 'C:\Users\sdrtest\git\labview-test\labview\RTS_Control\Host\\'
+ print('connecting to switch')
+ switch = lv_control.executive_switch(host,base_path,'RTSwitch')
+ print('connecting to siggen')
+ siggen = lv_control.vst_siggen(host,base_path,'RIO0')
+ # Configure RF-Switching
+ switch.connect_ports(source, sink)
+
+ # Sweep over freqrange
+ for freq in freqrange:
+ siggen.set_freq(float(freq))
+ time.sleep(1)
+ # Shutdown Siggen
+ siggen.disconnect()
+ switch.disconnect_all()
+
+def test01(freq, frange, steps):
+ freqrange = np.arange(freq,freq+frange,steps)
+ source = 'VST-Out'
+ sink0 = 'X3x0-4-B-RX2'
+ sink1 = 'X3x0-2-B-RX2'
+ host = 'pollux'
+ base_path = 'C:\Users\sdrtest\git\labview-test\labview\RTS_Control\Host\\'
+ print('connecting to switch')
+ switch = lv_control.executive_switch(host,base_path,'RTSwitch')
+ print('connecting to siggen')
+ siggen = lv_control.vst_siggen(host,base_path,'RIO0')
+ for freq in freqrange:
+ siggen.set_freq(float(freq))
+ print('retuning siggen to {freq} MHz'.format(freq=freq/1e6))
+ switch.connect_ports(source,sink0)
+ time.sleep(0.5)
+ switch.connect_ports(source,sink1)
+ time.sleep(0.5)
+ switch.disconnect_all()
+ time.sleep(0.2)
+ siggen.disconnect()
+ switch.disconnect_all()
+
+
+if __name__ == '__main__':
+ thismodule = sys.modules[__name__]
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-t',
+ '--test',
+ help='which testcase?'
+ )
+ parser.add_argument(
+ '-f',
+ '--freq',
+ type=float,
+ help='which center freq?'
+ )
+ parser.add_argument(
+ '-r',
+ '--range',
+ type=float,
+ help='which freq range?'
+ )
+ parser.add_argument(
+ '--steps',
+ type=float,
+ default=1e6,
+ help='which frequency step size?'
+ )
+ args = parser.parse_args()
+ getattr(thismodule,args.test)(args.freq,args.range,args.steps)
diff --git a/tools/gr-usrptest/examples/phase_diff_x310_ubx_example.grc b/tools/gr-usrptest/examples/phase_diff_x310_ubx_example.grc
new file mode 100644
index 000000000..f6cbe3d83
--- /dev/null
+++ b/tools/gr-usrptest/examples/phase_diff_x310_ubx_example.grc
@@ -0,0 +1,3296 @@
+<?xml version='1.0' encoding='utf-8'?>
+<?grc format='1' created='3.7.11'?>
+<flow_graph>
+ <timestamp>Thu Oct 6 14:49:17 2016</timestamp>
+ <block>
+ <key>options</key>
+ <param>
+ <key>author</key>
+ <value></value>
+ </param>
+ <param>
+ <key>window_size</key>
+ <value></value>
+ </param>
+ <param>
+ <key>category</key>
+ <value>[GRC Hier Blocks]</value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>description</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(8, 8)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>generate_options</key>
+ <value>qt_gui</value>
+ </param>
+ <param>
+ <key>hier_block_src_path</key>
+ <value>.:</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>phase_difference_x3x0_example</value>
+ </param>
+ <param>
+ <key>max_nouts</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>qt_qss_theme</key>
+ <value></value>
+ </param>
+ <param>
+ <key>realtime_scheduling</key>
+ <value></value>
+ </param>
+ <param>
+ <key>run_command</key>
+ <value>{python} -u {filename}</value>
+ </param>
+ <param>
+ <key>run_options</key>
+ <value>prompt</value>
+ </param>
+ <param>
+ <key>run</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>thread_safe_setters</key>
+ <value></value>
+ </param>
+ <param>
+ <key>title</key>
+ <value></value>
+ </param>
+ </block>
+ <block>
+ <key>variable</key>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(8, 100)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>baseband</value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>3e9</value>
+ </param>
+ </block>
+ <block>
+ <key>variable_qtgui_range</key>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>10e6</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(200, 112)</value>
+ </param>
+ <param>
+ <key>gui_hint</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>rx_base</value>
+ </param>
+ <param>
+ <key>label</key>
+ <value></value>
+ </param>
+ <param>
+ <key>min_len</key>
+ <value>200</value>
+ </param>
+ <param>
+ <key>orient</key>
+ <value>Qt.Horizontal</value>
+ </param>
+ <param>
+ <key>start</key>
+ <value>10e6</value>
+ </param>
+ <param>
+ <key>step</key>
+ <value>50e6</value>
+ </param>
+ <param>
+ <key>stop</key>
+ <value>6e9</value>
+ </param>
+ <param>
+ <key>rangeType</key>
+ <value>float</value>
+ </param>
+ <param>
+ <key>widget</key>
+ <value>counter_slider</value>
+ </param>
+ </block>
+ <block>
+ <key>variable</key>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(8, 160)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>10e6</value>
+ </param>
+ </block>
+ <block>
+ <key>variable_qtgui_range</key>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>value</key>
+ <value>10e6</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(168, 240)</value>
+ </param>
+ <param>
+ <key>gui_hint</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>tx_base</value>
+ </param>
+ <param>
+ <key>label</key>
+ <value></value>
+ </param>
+ <param>
+ <key>min_len</key>
+ <value>200</value>
+ </param>
+ <param>
+ <key>orient</key>
+ <value>Qt.Horizontal</value>
+ </param>
+ <param>
+ <key>start</key>
+ <value>10e6</value>
+ </param>
+ <param>
+ <key>step</key>
+ <value>50e6</value>
+ </param>
+ <param>
+ <key>stop</key>
+ <value>6e9</value>
+ </param>
+ <param>
+ <key>rangeType</key>
+ <value>float</value>
+ </param>
+ <param>
+ <key>widget</key>
+ <value>counter_slider</value>
+ </param>
+ </block>
+ <block>
+ <key>analog_sig_source_x</key>
+ <param>
+ <key>amp</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>freq</key>
+ <value>100e3</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(440, 112)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>analog_sig_source_x_1</value>
+ </param>
+ <param>
+ <key>maxoutbuf</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>minoutbuf</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>offset</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>waveform</key>
+ <value>analog.GR_COS_WAVE</value>
+ </param>
+ </block>
+ <block>
+ <key>qtgui_time_sink_x</key>
+ <param>
+ <key>autoscale</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>axislabels</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>ctrlpanel</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>entags</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(768, 404)</value>
+ </param>
+ <param>
+ <key>gui_hint</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>grid</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>qtgui_time_sink_x_0</value>
+ </param>
+ <param>
+ <key>legend</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>alpha1</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color1</key>
+ <value>"blue"</value>
+ </param>
+ <param>
+ <key>label1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker1</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style1</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width1</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha10</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color10</key>
+ <value>"blue"</value>
+ </param>
+ <param>
+ <key>label10</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker10</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style10</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width10</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha2</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color2</key>
+ <value>"red"</value>
+ </param>
+ <param>
+ <key>label2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker2</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style2</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width2</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha3</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color3</key>
+ <value>"green"</value>
+ </param>
+ <param>
+ <key>label3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker3</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style3</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width3</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha4</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color4</key>
+ <value>"black"</value>
+ </param>
+ <param>
+ <key>label4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker4</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style4</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width4</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha5</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color5</key>
+ <value>"cyan"</value>
+ </param>
+ <param>
+ <key>label5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker5</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style5</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width5</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha6</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color6</key>
+ <value>"magenta"</value>
+ </param>
+ <param>
+ <key>label6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker6</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style6</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width6</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha7</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color7</key>
+ <value>"yellow"</value>
+ </param>
+ <param>
+ <key>label7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker7</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style7</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width7</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha8</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color8</key>
+ <value>"dark red"</value>
+ </param>
+ <param>
+ <key>label8</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker8</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style8</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width8</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha9</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color9</key>
+ <value>"dark green"</value>
+ </param>
+ <param>
+ <key>label9</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker9</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style9</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width9</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>name</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>nconnections</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>size</key>
+ <value>1024</value>
+ </param>
+ <param>
+ <key>srate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>tr_chan</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>tr_delay</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>tr_level</key>
+ <value>0.0</value>
+ </param>
+ <param>
+ <key>tr_mode</key>
+ <value>qtgui.TRIG_MODE_FREE</value>
+ </param>
+ <param>
+ <key>tr_slope</key>
+ <value>qtgui.TRIG_SLOPE_POS</value>
+ </param>
+ <param>
+ <key>tr_tag</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>float</value>
+ </param>
+ <param>
+ <key>update_time</key>
+ <value>0.10</value>
+ </param>
+ <param>
+ <key>ylabel</key>
+ <value>Amplitude</value>
+ </param>
+ <param>
+ <key>yunit</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>ymax</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>ymin</key>
+ <value>-1</value>
+ </param>
+ </block>
+ <block>
+ <key>qtgui_time_sink_x</key>
+ <param>
+ <key>autoscale</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>axislabels</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>ctrlpanel</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>entags</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(664, 280)</value>
+ </param>
+ <param>
+ <key>gui_hint</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>grid</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>qtgui_time_sink_x_1</value>
+ </param>
+ <param>
+ <key>legend</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>alpha1</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color1</key>
+ <value>"blue"</value>
+ </param>
+ <param>
+ <key>label1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker1</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style1</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width1</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha10</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color10</key>
+ <value>"blue"</value>
+ </param>
+ <param>
+ <key>label10</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker10</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style10</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width10</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha2</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color2</key>
+ <value>"red"</value>
+ </param>
+ <param>
+ <key>label2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker2</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style2</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width2</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha3</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color3</key>
+ <value>"green"</value>
+ </param>
+ <param>
+ <key>label3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker3</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style3</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width3</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha4</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color4</key>
+ <value>"black"</value>
+ </param>
+ <param>
+ <key>label4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker4</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style4</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width4</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha5</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color5</key>
+ <value>"cyan"</value>
+ </param>
+ <param>
+ <key>label5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker5</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style5</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width5</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha6</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color6</key>
+ <value>"magenta"</value>
+ </param>
+ <param>
+ <key>label6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker6</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style6</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width6</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha7</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color7</key>
+ <value>"yellow"</value>
+ </param>
+ <param>
+ <key>label7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker7</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style7</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width7</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha8</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color8</key>
+ <value>"dark red"</value>
+ </param>
+ <param>
+ <key>label8</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker8</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style8</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width8</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>alpha9</key>
+ <value>1.0</value>
+ </param>
+ <param>
+ <key>color9</key>
+ <value>"dark green"</value>
+ </param>
+ <param>
+ <key>label9</key>
+ <value></value>
+ </param>
+ <param>
+ <key>marker9</key>
+ <value>-1</value>
+ </param>
+ <param>
+ <key>style9</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>width9</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>name</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>nconnections</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>size</key>
+ <value>1024</value>
+ </param>
+ <param>
+ <key>srate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>tr_chan</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>tr_delay</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>tr_level</key>
+ <value>0.0</value>
+ </param>
+ <param>
+ <key>tr_mode</key>
+ <value>qtgui.TRIG_MODE_FREE</value>
+ </param>
+ <param>
+ <key>tr_slope</key>
+ <value>qtgui.TRIG_SLOPE_POS</value>
+ </param>
+ <param>
+ <key>tr_tag</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>complex</value>
+ </param>
+ <param>
+ <key>update_time</key>
+ <value>0.10</value>
+ </param>
+ <param>
+ <key>ylabel</key>
+ <value>Amplitude</value>
+ </param>
+ <param>
+ <key>yunit</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>ymax</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>ymin</key>
+ <value>-1</value>
+ </param>
+ </block>
+ <block>
+ <key>uhd_usrp_sink</key>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>ant0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw0</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq0</key>
+ <value>tx_base</value>
+ </param>
+ <param>
+ <key>norm_gain0</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain0</key>
+ <value>15</value>
+ </param>
+ <param>
+ <key>ant10</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain10</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant11</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain11</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant12</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain12</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant13</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain13</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant14</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain14</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant15</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain15</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant16</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain16</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant17</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain17</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant18</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain18</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant19</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain19</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw1</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq1</key>
+ <value>tx_base</value>
+ </param>
+ <param>
+ <key>norm_gain1</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain1</key>
+ <value>15</value>
+ </param>
+ <param>
+ <key>ant20</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain20</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant21</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain21</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant22</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain22</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant23</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain23</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant24</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain24</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant25</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain25</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant26</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain26</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant27</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain27</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant28</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain28</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant29</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain29</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain2</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant30</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain30</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant31</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain31</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain3</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain4</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain5</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain6</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain7</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant8</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain8</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>ant9</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>norm_gain9</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>clock_rate</key>
+ <value>0.0</value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>dev_addr</key>
+ <value>"addr=192.168.10.2"</value>
+ </param>
+ <param>
+ <key>dev_args</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(816, 104)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>uhd_usrp_sink_1</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>fc32</value>
+ </param>
+ <param>
+ <key>clock_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec0</key>
+ <value>"A:0 B:0"</value>
+ </param>
+ <param>
+ <key>time_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>nchan</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>num_mboards</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>hide_cmd_port</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>hide_lo_controls</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>stream_args</key>
+ <value></value>
+ </param>
+ <param>
+ <key>stream_chans</key>
+ <value>[]</value>
+ </param>
+ <param>
+ <key>sync</key>
+ <value></value>
+ </param>
+ <param>
+ <key>len_tag_name</key>
+ <value></value>
+ </param>
+ <param>
+ <key>otw</key>
+ <value></value>
+ </param>
+ </block>
+ <block>
+ <key>uhd_usrp_source</key>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>ant0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw0</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq0</key>
+ <value>rx_base</value>
+ </param>
+ <param>
+ <key>dc_offs_enb0</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb0</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain0</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain0</key>
+ <value>15</value>
+ </param>
+ <param>
+ <key>lo_export0</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source0</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant10</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb10</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb10</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain10</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain10</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export10</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source10</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant11</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb11</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb11</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain11</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain11</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export11</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source11</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant12</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb12</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb12</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain12</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain12</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export12</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source12</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant13</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb13</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb13</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain13</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain13</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export13</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source13</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant14</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb14</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb14</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain14</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain14</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export14</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source14</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant15</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb15</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb15</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain15</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain15</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export15</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source15</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant16</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb16</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb16</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain16</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain16</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export16</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source16</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant17</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb17</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb17</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain17</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain17</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export17</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source17</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant18</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb18</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb18</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain18</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain18</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export18</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source18</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant19</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb19</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb19</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain19</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain19</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export19</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source19</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw1</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq1</key>
+ <value>rx_base</value>
+ </param>
+ <param>
+ <key>dc_offs_enb1</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb1</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain1</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain1</key>
+ <value>15</value>
+ </param>
+ <param>
+ <key>lo_export1</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source1</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant20</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb20</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb20</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain20</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain20</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export20</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source20</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant21</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb21</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb21</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain21</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain21</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export21</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source21</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant22</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb22</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb22</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain22</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain22</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export22</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source22</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant23</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb23</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb23</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain23</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain23</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export23</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source23</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant24</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb24</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb24</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain24</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain24</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export24</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source24</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant25</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb25</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb25</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain25</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain25</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export25</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source25</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant26</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb26</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb26</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain26</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain26</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export26</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source26</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant27</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb27</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb27</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain27</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain27</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export27</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source27</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant28</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb28</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb28</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain28</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain28</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export28</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source28</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant29</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb29</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb29</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain29</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain29</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export29</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source29</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb2</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb2</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain2</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain2</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export2</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source2</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant30</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb30</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb30</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain30</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain30</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export30</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source30</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant31</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb31</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb31</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain31</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain31</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export31</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source31</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb3</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb3</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain3</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain3</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export3</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source3</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb4</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb4</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain4</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain4</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export4</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source4</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb5</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb5</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain5</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain5</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export5</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source5</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb6</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb6</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain6</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain6</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export6</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source6</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb7</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb7</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain7</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain7</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export7</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source7</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant8</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb8</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb8</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain8</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain8</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export8</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source8</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>ant9</key>
+ <value></value>
+ </param>
+ <param>
+ <key>bw9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>center_freq9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>dc_offs_enb9</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>iq_imbal_enb9</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>norm_gain9</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>gain9</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>lo_export9</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>lo_source9</key>
+ <value>internal</value>
+ </param>
+ <param>
+ <key>clock_rate</key>
+ <value>0.0</value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>dev_addr</key>
+ <value>"addr=192.168.10.2"</value>
+ </param>
+ <param>
+ <key>dev_args</key>
+ <value>""</value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(320, 376)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>uhd_usrp_source_0</value>
+ </param>
+ <param>
+ <key>maxoutbuf</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>clock_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec0</key>
+ <value>"A:0 B:0"</value>
+ </param>
+ <param>
+ <key>time_source0</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source1</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source2</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source3</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source4</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source5</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source6</key>
+ <value></value>
+ </param>
+ <param>
+ <key>clock_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>sd_spec7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>time_source7</key>
+ <value></value>
+ </param>
+ <param>
+ <key>minoutbuf</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>nchan</key>
+ <value>2</value>
+ </param>
+ <param>
+ <key>num_mboards</key>
+ <value>1</value>
+ </param>
+ <param>
+ <key>type</key>
+ <value>fc32</value>
+ </param>
+ <param>
+ <key>samp_rate</key>
+ <value>samp_rate</value>
+ </param>
+ <param>
+ <key>hide_cmd_port</key>
+ <value>False</value>
+ </param>
+ <param>
+ <key>hide_lo_controls</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>stream_args</key>
+ <value></value>
+ </param>
+ <param>
+ <key>stream_chans</key>
+ <value>[]</value>
+ </param>
+ <param>
+ <key>sync</key>
+ <value></value>
+ </param>
+ <param>
+ <key>otw</key>
+ <value></value>
+ </param>
+ </block>
+ <block>
+ <key>usrptest_phase_calc_ccf</key>
+ <param>
+ <key>alias</key>
+ <value></value>
+ </param>
+ <param>
+ <key>comment</key>
+ <value></value>
+ </param>
+ <param>
+ <key>affinity</key>
+ <value></value>
+ </param>
+ <param>
+ <key>_enabled</key>
+ <value>True</value>
+ </param>
+ <param>
+ <key>_coordinate</key>
+ <value>(568, 408)</value>
+ </param>
+ <param>
+ <key>_rotation</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>id</key>
+ <value>usrptest_phase_calc_ccf_0</value>
+ </param>
+ <param>
+ <key>maxoutbuf</key>
+ <value>0</value>
+ </param>
+ <param>
+ <key>minoutbuf</key>
+ <value>0</value>
+ </param>
+ </block>
+ <connection>
+ <source_block_id>analog_sig_source_x_1</source_block_id>
+ <sink_block_id>uhd_usrp_sink_1</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>analog_sig_source_x_1</source_block_id>
+ <sink_block_id>uhd_usrp_sink_1</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>uhd_usrp_source_0</source_block_id>
+ <sink_block_id>qtgui_time_sink_x_1</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>uhd_usrp_source_0</source_block_id>
+ <sink_block_id>usrptest_phase_calc_ccf_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>uhd_usrp_source_0</source_block_id>
+ <sink_block_id>qtgui_time_sink_x_1</sink_block_id>
+ <source_key>1</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>uhd_usrp_source_0</source_block_id>
+ <sink_block_id>usrptest_phase_calc_ccf_0</sink_block_id>
+ <source_key>1</source_key>
+ <sink_key>1</sink_key>
+ </connection>
+ <connection>
+ <source_block_id>usrptest_phase_calc_ccf_0</source_block_id>
+ <sink_block_id>qtgui_time_sink_x_0</sink_block_id>
+ <source_key>0</source_key>
+ <sink_key>0</sink_key>
+ </connection>
+</flow_graph>
diff --git a/tools/gr-usrptest/grc/CMakeLists.txt b/tools/gr-usrptest/grc/CMakeLists.txt
new file mode 100644
index 000000000..47f494abf
--- /dev/null
+++ b/tools/gr-usrptest/grc/CMakeLists.txt
@@ -0,0 +1,23 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+install(FILES
+ usrptest_phase_calc_ccf.xml
+ DESTINATION share/gnuradio/grc/blocks
+)
diff --git a/tools/gr-usrptest/grc/usrptest_measurement_sink_f.xml b/tools/gr-usrptest/grc/usrptest_measurement_sink_f.xml
new file mode 100644
index 000000000..c681e457f
--- /dev/null
+++ b/tools/gr-usrptest/grc/usrptest_measurement_sink_f.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<block>
+ <name>measurement_sink_f</name>
+ <key>usrptest_measurement_sink_f</key>
+ <category>[usrptest]</category>
+ <import>import usrptest</import>
+ <make>usrptest.measurement_sink_f($num_samples, $)</make>
+ <!-- Make one 'param' node for every Parameter you want settable from the GUI.
+ Sub-nodes:
+ * name
+ * key (makes the value accessible as $keyname, e.g. in the make node)
+ * type -->
+ <param>
+ <name>...</name>
+ <key>...</key>
+ <type>...</type>
+ </param>
+
+ <!-- Make one 'sink' node per input. Sub-nodes:
+ * name (an identifier for the GUI)
+ * type
+ * vlen
+ * optional (set to 1 for optional inputs) -->
+ <sink>
+ <name>in</name>
+ <type><!-- e.g. int, float, complex, byte, short, xxx_vector, ...--></type>
+ </sink>
+
+ <!-- Make one 'source' node per output. Sub-nodes:
+ * name (an identifier for the GUI)
+ * type
+ * vlen
+ * optional (set to 1 for optional inputs) -->
+ <source>
+ <name>out</name>
+ <type><!-- e.g. int, float, complex, byte, short, xxx_vector, ...--></type>
+ </source>
+</block>
diff --git a/tools/gr-usrptest/grc/usrptest_phase_calc_ccf.xml b/tools/gr-usrptest/grc/usrptest_phase_calc_ccf.xml
new file mode 100644
index 000000000..4c98f56eb
--- /dev/null
+++ b/tools/gr-usrptest/grc/usrptest_phase_calc_ccf.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<block>
+ <name>Calculate phase diff</name>
+ <key>usrptest_phase_calc_ccf</key>
+ <category>[usrptest]</category>
+ <import>import usrptest</import>
+ <make>usrptest.phase_calc_ccf()</make>
+ <sink>
+ <name>in1</name>
+ <type>complex</type>
+ </sink>
+ <sink>
+ <name>in2</name>
+ <type>complex</type>
+ </sink>
+ <source>
+ <name>out</name>
+ <type>float</type>
+ </source>
+</block>
diff --git a/tools/gr-usrptest/include/usrptest/CMakeLists.txt b/tools/gr-usrptest/include/usrptest/CMakeLists.txt
new file mode 100644
index 000000000..761a8bee4
--- /dev/null
+++ b/tools/gr-usrptest/include/usrptest/CMakeLists.txt
@@ -0,0 +1,26 @@
+# Copyright 2011,2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Install public header files
+########################################################################
+install(FILES
+ api.h
+ measurement_sink_f.h DESTINATION include/usrptest
+)
diff --git a/tools/gr-usrptest/include/usrptest/api.h b/tools/gr-usrptest/include/usrptest/api.h
new file mode 100644
index 000000000..0812606d5
--- /dev/null
+++ b/tools/gr-usrptest/include/usrptest/api.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_USRPTEST_API_H
+#define INCLUDED_USRPTEST_API_H
+
+#include <gnuradio/attributes.h>
+
+#ifdef gnuradio_usrptest_EXPORTS
+# define USRPTEST_API __GR_ATTR_EXPORT
+#else
+# define USRPTEST_API __GR_ATTR_IMPORT
+#endif
+
+#endif /* INCLUDED_USRPTEST_API_H */
diff --git a/tools/gr-usrptest/include/usrptest/measurement_sink_f.h b/tools/gr-usrptest/include/usrptest/measurement_sink_f.h
new file mode 100644
index 000000000..aaf1fb9d1
--- /dev/null
+++ b/tools/gr-usrptest/include/usrptest/measurement_sink_f.h
@@ -0,0 +1,61 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2016 Ettus Research LLC.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef INCLUDED_USRPTEST_MEASUREMENT_SINK_F_H
+#define INCLUDED_USRPTEST_MEASUREMENT_SINK_F_H
+
+#include <usrptest/api.h>
+#include <gnuradio/sync_block.h>
+
+namespace gr {
+ namespace usrptest {
+
+ /*!
+ * \brief <+description of block+>
+ * \ingroup usrptest
+ *
+ */
+ class USRPTEST_API measurement_sink_f : virtual public gr::sync_block
+ {
+ public:
+ typedef boost::shared_ptr<measurement_sink_f> sptr;
+
+ /*!
+ * \brief Return a shared_ptr to a new instance of usrptest::measurement_sink_f.
+ *
+ * To avoid accidental use of raw pointers, usrptest::measurement_sink_f's
+ * constructor is in a private implementation
+ * class. usrptest::measurement_sink_f::make is the public interface for
+ * creating new instances.
+ */
+ static sptr make(int num_samples,int runs);
+ virtual std::vector<float> get_avg() const = 0;
+ virtual std::vector<float> get_stddev() const = 0;
+ virtual int get_run() const = 0;
+ virtual void start_run() = 0;
+
+ };
+
+ } // namespace usrptest
+} // namespace gr
+
+#endif /* INCLUDED_USRPTEST_MEASUREMENT_SINK_F_H */
+
diff --git a/tools/gr-usrptest/lib/CMakeLists.txt b/tools/gr-usrptest/lib/CMakeLists.txt
new file mode 100644
index 000000000..10ba461e1
--- /dev/null
+++ b/tools/gr-usrptest/lib/CMakeLists.txt
@@ -0,0 +1,59 @@
+# Copyright 2011,2012,2016 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Setup library
+########################################################################
+include(GrPlatform) #define LIB_SUFFIX
+
+include_directories(${Boost_INCLUDE_DIR})
+link_directories(${Boost_LIBRARY_DIRS})
+
+list(APPEND usrptest_sources
+ measurement_sink_f_impl.cc
+)
+
+set(usrptest_sources "${usrptest_sources}" PARENT_SCOPE)
+if(NOT usrptest_sources)
+ MESSAGE(STATUS "No C++ sources... skipping lib/")
+ return()
+endif(NOT usrptest_sources)
+
+add_library(gnuradio-usrptest SHARED ${usrptest_sources})
+target_link_libraries(gnuradio-usrptest ${Boost_LIBRARIES} ${GNURADIO_ALL_LIBRARIES})
+set_target_properties(gnuradio-usrptest PROPERTIES DEFINE_SYMBOL "gnuradio_usrptest_EXPORTS")
+
+if(APPLE)
+ set_target_properties(gnuradio-usrptest PROPERTIES
+ INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
+ )
+endif(APPLE)
+
+########################################################################
+# Install built library files
+########################################################################
+include(GrMiscUtils)
+GR_LIBRARY_FOO(gnuradio-usrptest RUNTIME_COMPONENT "usrptest_runtime" DEVEL_COMPONENT "usrptest_devel")
+
+########################################################################
+# Print summary
+########################################################################
+message(STATUS "Using install prefix: ${CMAKE_INSTALL_PREFIX}")
+message(STATUS "Building for version: ${VERSION} / ${LIBVER}")
+
diff --git a/tools/gr-usrptest/lib/measurement_sink_f_impl.cc b/tools/gr-usrptest/lib/measurement_sink_f_impl.cc
new file mode 100644
index 000000000..58faccb9a
--- /dev/null
+++ b/tools/gr-usrptest/lib/measurement_sink_f_impl.cc
@@ -0,0 +1,124 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2016 Ettus Research LLC.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <gnuradio/io_signature.h>
+#include "measurement_sink_f_impl.h"
+
+namespace gr {
+ namespace usrptest {
+
+ measurement_sink_f::sptr
+ measurement_sink_f::make(int num_samples, int runs)
+ {
+ return gnuradio::get_initial_sptr
+ (new measurement_sink_f_impl(num_samples, runs));
+
+ }
+
+ /*
+ * The private constructor
+ */
+ measurement_sink_f_impl::measurement_sink_f_impl(int num_samples, int runs)
+ : gr::sync_block("measurement_sink_f",
+ gr::io_signature::make(1, 1, sizeof(float)),
+ gr::io_signature::make(0, 0, 0)),
+ d_runs(runs),
+ d_nsamples(num_samples)
+ {
+ d_curr_run = 0; // number of completed measurement runs
+ d_curr_avg = 0.0f; // accumulated average
+ d_curr_M2 = 0.0f; // accumulated M2
+ d_run = false; // true if a measurement is currently recorded
+ d_curr_sample = 0; // current sample count
+ }
+
+ /*
+ * Our virtual destructor.
+ */
+ measurement_sink_f_impl::~measurement_sink_f_impl()
+ {
+ }
+
+ int
+ measurement_sink_f_impl::work(int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items)
+ {
+ const float *in = (const float *) input_items[0];
+ if ((d_curr_run < d_runs)&&d_run){ //check if we need to record data
+ const int max_items = std::min(noutput_items, d_nsamples-d_curr_sample); // calculate number of samples we have to take into account
+ for (int item=0; item < max_items;++item){
+ ++d_curr_sample;
+ inc_both(in[item]);
+ }
+ if (d_curr_sample == d_nsamples) {
+ d_avg.push_back(d_curr_avg);
+ d_stddev.push_back(std::sqrt(d_curr_M2/(float)(d_curr_sample - 1)));
+ ++d_curr_run;
+ d_run = false;
+ d_curr_sample = 0;
+ d_curr_avg = 0.0f;
+ d_curr_M2 = 0.0f;
+ }
+ }
+ return noutput_items;
+ }
+
+
+ void
+ measurement_sink_f_impl::inc_both(const float new_val)
+ {
+ float delta = new_val - d_curr_avg;
+ d_curr_avg = d_curr_avg + delta/(float)(d_curr_sample);
+ d_curr_M2 = d_curr_M2 + delta*(new_val - d_curr_avg);
+ }
+
+
+ void
+ measurement_sink_f_impl::start_run()
+ {
+ d_run = true;
+ }
+
+ std::vector<float>
+ measurement_sink_f_impl::get_avg() const
+ {
+ return d_avg;
+ }
+
+ std::vector<float>
+ measurement_sink_f_impl::get_stddev() const
+ {
+ return d_stddev;
+ }
+
+ int
+ measurement_sink_f_impl::get_run() const
+ {
+ return d_curr_run;
+ }
+
+ } /* namespace usrptest */
+} /* namespace gr */
+
diff --git a/tools/gr-usrptest/lib/measurement_sink_f_impl.h b/tools/gr-usrptest/lib/measurement_sink_f_impl.h
new file mode 100644
index 000000000..4d63adfa2
--- /dev/null
+++ b/tools/gr-usrptest/lib/measurement_sink_f_impl.h
@@ -0,0 +1,62 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2016 Ettus Research LLC.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this software; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef INCLUDED_USRPTEST_MEASUREMENT_SINK_F_IMPL_H
+#define INCLUDED_USRPTEST_MEASUREMENT_SINK_F_IMPL_H
+
+#include <usrptest/measurement_sink_f.h>
+
+namespace gr {
+ namespace usrptest {
+
+ class measurement_sink_f_impl : public measurement_sink_f
+ {
+ private:
+ std::vector< float > d_avg;
+ std::vector< float > d_stddev;
+ bool d_run;
+ int d_runs;
+ int d_nsamples;
+ int d_curr_sample;
+ int d_curr_run;
+ float d_curr_avg;
+ float d_curr_M2;
+ void inc_both(const float new_val);
+
+
+ public:
+ measurement_sink_f_impl(int num_samples, int runs);
+ ~measurement_sink_f_impl();
+ std::vector<float> get_avg() const;
+ std::vector<float> get_stddev() const;
+ int get_run() const;
+ void start_run();
+
+ // Where all the action really happens
+ int work(int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items);
+ };
+
+ } // namespace usrptest
+} // namespace gr
+
+#endif /* INCLUDED_USRPTEST_MEASUREMENT_SINK_F_IMPL_H */
+
diff --git a/tools/gr-usrptest/lib/qa_usrptest.cc b/tools/gr-usrptest/lib/qa_usrptest.cc
new file mode 100644
index 000000000..c8e6495ae
--- /dev/null
+++ b/tools/gr-usrptest/lib/qa_usrptest.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * This class gathers together all the test cases for the gr-filter
+ * directory into a single test suite. As you create new test cases,
+ * add them here.
+ */
+
+#include "qa_usrptest.h"
+
+CppUnit::TestSuite *
+qa_usrptest::suite()
+{
+ CppUnit::TestSuite *s = new CppUnit::TestSuite("usrptest");
+
+ return s;
+}
diff --git a/tools/gr-usrptest/lib/qa_usrptest.h b/tools/gr-usrptest/lib/qa_usrptest.h
new file mode 100644
index 000000000..94ce3f33b
--- /dev/null
+++ b/tools/gr-usrptest/lib/qa_usrptest.h
@@ -0,0 +1,38 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _QA_USRPTEST_H_
+#define _QA_USRPTEST_H_
+
+#include <gnuradio/attributes.h>
+#include <cppunit/TestSuite.h>
+
+//! collect all the tests for the gr-filter directory
+
+class __GR_ATTR_EXPORT qa_usrptest
+{
+ public:
+ //! return suite of tests for all of gr-filter directory
+ static CppUnit::TestSuite *suite();
+};
+
+#endif /* _QA_USRPTEST_H_ */
diff --git a/tools/gr-usrptest/lib/test_usrptest.cc b/tools/gr-usrptest/lib/test_usrptest.cc
new file mode 100644
index 000000000..808638cdf
--- /dev/null
+++ b/tools/gr-usrptest/lib/test_usrptest.cc
@@ -0,0 +1,48 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2012 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cppunit/TextTestRunner.h>
+#include <cppunit/XmlOutputter.h>
+
+#include <gnuradio/unittests.h>
+#include "qa_usrptest.h"
+#include <iostream>
+#include <fstream>
+
+int
+main (int argc, char **argv)
+{
+ CppUnit::TextTestRunner runner;
+ std::ofstream xmlfile(get_unittest_path("usrptest.xml").c_str());
+ CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile);
+
+ runner.addTest(qa_usrptest::suite());
+ runner.setOutputter(xmlout);
+
+ bool was_successful = runner.run("", false);
+
+ return was_successful ? 0 : 1;
+}
diff --git a/tools/gr-usrptest/python/CMakeLists.txt b/tools/gr-usrptest/python/CMakeLists.txt
new file mode 100644
index 000000000..7bb812b0c
--- /dev/null
+++ b/tools/gr-usrptest/python/CMakeLists.txt
@@ -0,0 +1,49 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Include python install macros
+########################################################################
+include(GrPython)
+if(NOT PYTHONINTERP_FOUND)
+ return()
+endif()
+
+########################################################################
+# Install python sources
+########################################################################
+GR_PYTHON_INSTALL(
+ FILES
+ __init__.py
+ functions.py
+ phase_calc_ccf.py DESTINATION ${GR_PYTHON_DIR}/usrptest
+)
+
+add_subdirectory(flowgraphs)
+add_subdirectory(labview_control)
+add_subdirectory(rts_tests)
+
+########################################################################
+# Handle the unit tests
+########################################################################
+include(GrTest)
+
+set(GR_TEST_TARGET_DEPS gnuradio-usrptest)
+set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig)
+GR_ADD_TEST(qa_measurement_sink_f ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_measurement_sink_f.py)
diff --git a/tools/gr-usrptest/python/__init__.py b/tools/gr-usrptest/python/__init__.py
new file mode 100644
index 000000000..f4265974a
--- /dev/null
+++ b/tools/gr-usrptest/python/__init__.py
@@ -0,0 +1,35 @@
+#
+# Copyright 2008,2009 Free Software Foundation, Inc.
+#
+# This application is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This application is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+# The presence of this file turns this directory into a Python package
+
+'''
+This is the GNU Radio USRPTEST module. Place your Python package
+description here (python/__init__.py).
+'''
+
+# import swig generated symbols into the usrptest namespace
+try:
+ # this might fail if the module is python-only
+ from usrptest_swig import *
+except ImportError:
+ pass
+
+# import any pure python here
+from phase_calc_ccf import phase_calc_ccf
+#
diff --git a/tools/gr-usrptest/python/build_utils.py b/tools/gr-usrptest/python/build_utils.py
new file mode 100644
index 000000000..cf58a9763
--- /dev/null
+++ b/tools/gr-usrptest/python/build_utils.py
@@ -0,0 +1,226 @@
+#
+# Copyright 2004,2009,2012 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+"""Misc utilities used at build time
+"""
+
+import re, os, os.path
+from build_utils_codes import *
+
+
+# set srcdir to the directory that contains Makefile.am
+try:
+ srcdir = os.environ['srcdir']
+except KeyError, e:
+ srcdir = "."
+srcdir = srcdir + '/'
+
+# set do_makefile to either true or false dependeing on the environment
+try:
+ if os.environ['do_makefile'] == '0':
+ do_makefile = False
+ else:
+ do_makefile = True
+except KeyError, e:
+ do_makefile = False
+
+# set do_sources to either true or false dependeing on the environment
+try:
+ if os.environ['do_sources'] == '0':
+ do_sources = False
+ else:
+ do_sources = True
+except KeyError, e:
+ do_sources = True
+
+name_dict = {}
+
+def log_output_name (name):
+ (base, ext) = os.path.splitext (name)
+ ext = ext[1:] # drop the leading '.'
+
+ entry = name_dict.setdefault (ext, [])
+ entry.append (name)
+
+def open_and_log_name (name, dir):
+ global do_sources
+ if do_sources:
+ f = open (name, dir)
+ else:
+ f = None
+ log_output_name (name)
+ return f
+
+def expand_template (d, template_filename, extra = ""):
+ '''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file
+ '''
+ global do_sources
+ output_extension = extract_extension (template_filename)
+ template = open_src (template_filename, 'r')
+ output_name = d['NAME'] + extra + '.' + output_extension
+ log_output_name (output_name)
+ if do_sources:
+ output = open (output_name, 'w')
+ do_substitution (d, template, output)
+ output.close ()
+ template.close ()
+
+def output_glue (dirname):
+ output_makefile_fragment ()
+ output_ifile_include (dirname)
+
+def output_makefile_fragment ():
+ global do_makefile
+ if not do_makefile:
+ return
+# overwrite the source, which must be writable; this should have been
+# checked for beforehand in the top-level Makefile.gen.gen .
+ f = open (os.path.join (os.environ.get('gendir', os.environ.get('srcdir', '.')), 'Makefile.gen'), 'w')
+ f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n')
+ output_subfrag (f, 'h')
+ output_subfrag (f, 'i')
+ output_subfrag (f, 'cc')
+ f.close ()
+
+def output_ifile_include (dirname):
+ global do_sources
+ if do_sources:
+ f = open ('%s_generated.i' % (dirname,), 'w')
+ f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n')
+ files = name_dict.setdefault ('i', [])
+ files.sort ()
+ f.write ('%{\n')
+ for file in files:
+ f.write ('#include <%s>\n' % (file[0:-1] + 'h',))
+ f.write ('%}\n\n')
+ for file in files:
+ f.write ('%%include <%s>\n' % (file,))
+
+def output_subfrag (f, ext):
+ files = name_dict.setdefault (ext, [])
+ files.sort ()
+ f.write ("GENERATED_%s =" % (ext.upper ()))
+ for file in files:
+ f.write (" \\\n\t%s" % (file,))
+ f.write ("\n\n")
+
+def extract_extension (template_name):
+ # template name is something like: GrFIRfilterXXX.h.t
+ # we return everything between the penultimate . and .t
+ mo = re.search (r'\.([a-z]+)\.t$', template_name)
+ if not mo:
+ raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,)
+ return mo.group (1)
+
+def open_src (name, mode):
+ global srcdir
+ return open (os.path.join (srcdir, name), mode)
+
+def do_substitution (d, in_file, out_file):
+ def repl (match_obj):
+ key = match_obj.group (1)
+ # print key
+ return d[key]
+
+ inp = in_file.read ()
+ out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp)
+ out_file.write (out)
+
+
+
+copyright = '''/* -*- c++ -*- */
+/*
+ * Copyright 2003,2004 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street,
+ * Boston, MA 02110-1301, USA.
+ */
+'''
+
+def is_complex (code3):
+ if i_code (code3) == 'c' or o_code (code3) == 'c':
+ return '1'
+ else:
+ return '0'
+
+
+def standard_dict (name, code3, package='gr'):
+ d = {}
+ d['NAME'] = name
+ d['NAME_IMPL'] = name+'_impl'
+ d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
+ d['GUARD_NAME_IMPL'] = 'INCLUDED_%s_%s_IMPL_H' % (package.upper(), name.upper())
+ d['BASE_NAME'] = re.sub ('^' + package + '_', '', name)
+ d['SPTR_NAME'] = '%s_sptr' % name
+ d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
+ d['COPYRIGHT'] = copyright
+ d['TYPE'] = i_type (code3)
+ d['I_TYPE'] = i_type (code3)
+ d['O_TYPE'] = o_type (code3)
+ d['TAP_TYPE'] = tap_type (code3)
+ d['IS_COMPLEX'] = is_complex (code3)
+ return d
+
+
+def standard_dict2 (name, code3, package):
+ d = {}
+ d['NAME'] = name
+ d['BASE_NAME'] = name
+ d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
+ d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
+ d['COPYRIGHT'] = copyright
+ d['TYPE'] = i_type (code3)
+ d['I_TYPE'] = i_type (code3)
+ d['O_TYPE'] = o_type (code3)
+ d['TAP_TYPE'] = tap_type (code3)
+ d['IS_COMPLEX'] = is_complex (code3)
+ return d
+
+def standard_impl_dict2 (name, code3, package):
+ d = {}
+ d['NAME'] = name
+ d['IMPL_NAME'] = name
+ d['BASE_NAME'] = name.rstrip("impl").rstrip("_")
+ d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper())
+ d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten'
+ d['COPYRIGHT'] = copyright
+ d['FIR_TYPE'] = "fir_filter_" + code3
+ d['CFIR_TYPE'] = "fir_filter_" + code3[0:2] + 'c'
+ d['TYPE'] = i_type (code3)
+ d['I_TYPE'] = i_type (code3)
+ d['O_TYPE'] = o_type (code3)
+ d['TAP_TYPE'] = tap_type (code3)
+ d['IS_COMPLEX'] = is_complex (code3)
+ return d
diff --git a/tools/gr-usrptest/python/build_utils_codes.py b/tools/gr-usrptest/python/build_utils_codes.py
new file mode 100644
index 000000000..9ea96baae
--- /dev/null
+++ b/tools/gr-usrptest/python/build_utils_codes.py
@@ -0,0 +1,52 @@
+#
+# Copyright 2004 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+def i_code (code3):
+ return code3[0]
+
+def o_code (code3):
+ if len (code3) >= 2:
+ return code3[1]
+ else:
+ return code3[0]
+
+def tap_code (code3):
+ if len (code3) >= 3:
+ return code3[2]
+ else:
+ return code3[0]
+
+def i_type (code3):
+ return char_to_type[i_code (code3)]
+
+def o_type (code3):
+ return char_to_type[o_code (code3)]
+
+def tap_type (code3):
+ return char_to_type[tap_code (code3)]
+
+
+char_to_type = {}
+char_to_type['s'] = 'short'
+char_to_type['i'] = 'int'
+char_to_type['f'] = 'float'
+char_to_type['c'] = 'gr_complex'
+char_to_type['b'] = 'unsigned char'
diff --git a/tools/gr-usrptest/python/flowgraphs/CMakeLists.txt b/tools/gr-usrptest/python/flowgraphs/CMakeLists.txt
new file mode 100644
index 000000000..7ea94b505
--- /dev/null
+++ b/tools/gr-usrptest/python/flowgraphs/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Install python sources
+########################################################################
+GR_PYTHON_INSTALL(
+ FILES
+ __init__.py
+ selftest_fg.py
+ phasealignment_fg.py DESTINATION ${GR_PYTHON_DIR}/usrptest/flowgraphs
+)
diff --git a/tools/gr-usrptest/python/flowgraphs/__init__.py b/tools/gr-usrptest/python/flowgraphs/__init__.py
new file mode 100644
index 000000000..048e5638a
--- /dev/null
+++ b/tools/gr-usrptest/python/flowgraphs/__init__.py
@@ -0,0 +1,14 @@
+"""
+usrptest.flowgraphs
+======================================
+
+Contents
+--------
+
+Subpackages
+-----------
+::
+
+The existance of the file turns the folder into a Python module.
+
+"""
diff --git a/tools/gr-usrptest/python/flowgraphs/phasealignment_fg.py b/tools/gr-usrptest/python/flowgraphs/phasealignment_fg.py
new file mode 100644
index 000000000..f120e2421
--- /dev/null
+++ b/tools/gr-usrptest/python/flowgraphs/phasealignment_fg.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python2
+
+from gnuradio import gr
+from gnuradio import uhd
+from gnuradio import analog
+from usrptest import phase_calc_ccf, measurement_sink_f
+from usrptest.functions import log_level
+from random import uniform
+from ast import literal_eval
+import copy
+import logging
+import sys
+
+
+class phasealignment_fg(gr.top_block):
+ def __init__(self, uhd_app):
+ gr.top_block.__init__(self, "Calculate dphi for all USRPs (Rx only)")
+ ##############################
+ # Block dicts
+ ##############################
+ self.log = logging.getLogger(__name__)
+ [self.log.removeHandler(h) for h in self.log.handlers]
+ self.log.addHandler(logging.StreamHandler(sys.stdout))
+ self.log.setLevel(log_level(uhd_app.args.log_level))
+ self.rx_streams = list()
+ self.phase_diff_calc = list()
+ self.measurement_sink = list()
+ self.uhd_app = copy.copy(uhd_app)
+ self.tx_app = copy.copy(uhd_app)
+ self.samp_rate = uhd_app.args.samp_rate
+ # Create all devices specified in --receiver
+ # Create all remaining blocks and connect devices to the first port and
+ # sink
+ self.uhd_app.args.num_chan = len(self.uhd_app.args.channels)
+ self.log.info('setting up usrp....')
+ ##############################
+ # Setup RX
+ ##############################
+ self.log.debug("RX-Setup with args: {}".format(self.uhd_app.args))
+ self.uhd_app.setup_usrp(uhd.usrp_source, self.uhd_app.args)
+ if self.uhd_app.args.measurement_setup is not None:
+ self.measurement_channels = self.uhd_app.args.measurement_setup.strip(
+ ).split(',')
+ # make sure every channels is listed in measurement_channels at least once
+ if len(set(
+ self.measurement_channels)) != self.uhd_apps.args.num_chan:
+ self.uhd_app.vprint(
+ "[{prefix}] Number of measurement channels has to be the number of used channels."
+ )
+ self.uhd_app.exit(1)
+ self.measurement_channels = [self.uhd_app.args.channels.index(m) for m in self.measurement_channels]
+ else:
+ self.measurement_channels = range(self.uhd_app.args.num_chan)
+
+ self.measurement_channels_names = list()
+ for chan in self.measurement_channels:
+ usrp_info = self.uhd_app.usrp.get_usrp_info(chan)
+ self.measurement_channels_names.append("_".join(
+ [usrp_info['mboard_serial'], usrp_info['rx_serial']]))
+
+ #Connect channels to first port of d_phi_calc_block and to measurement_sink
+ for num, chan in enumerate(self.measurement_channels[:-1]):
+ self.phase_diff_calc.append(phase_calc_ccf())
+ self.measurement_sink.append(
+ measurement_sink_f(
+ int(self.uhd_app.args.samp_rate *
+ self.uhd_app.args.duration), self.uhd_app.args.runs))
+ self.connect((self.uhd_app.usrp, chan),
+ (self.phase_diff_calc[num], 0))
+ self.connect((self.phase_diff_calc[num], 0),
+ (self.measurement_sink[num], 0))
+ # Connect devices to second port of d_phi_block
+ for num, chan in enumerate(self.measurement_channels[1:]):
+ self.connect((self.uhd_app.usrp, chan),
+ (self.phase_diff_calc[num], 1))
+ ##############################
+ # Setup TX
+ ##############################
+ if self.uhd_app.args.tx_channels is not None:
+ self.tx_app.args.antenna = self.tx_app.args.tx_antenna
+ self.tx_app.args.channels = [
+ int(chan.strip())
+ for chan in self.tx_app.args.tx_channels.split(',')
+ ]
+ self.tx_app.usrp = None
+ self.log.debug("TX-Setup with args: {}".format(self.tx_app.args))
+ self.tx_app.setup_usrp(uhd.usrp_sink, self.tx_app.args)
+ self.siggen = analog.sig_source_c(self.samp_rate,
+ analog.GR_COS_WAVE,
+ self.tx_app.args.tx_offset, 1.0)
+ for chan in range(len(self.tx_app.channels)):
+ self.connect((self.siggen, 0), (self.tx_app.usrp, chan))
+
+
+ def get_samp_rate(self):
+ return self.samp_rate
+
+ def set_samp_rate(self, samp_rate):
+ self.samp_rate = samp_rate
+
+ def retune_frequency(self, band_num=1, bands=1):
+ ref_chan = self.uhd_app.channels[0]
+ freq_range = literal_eval(
+ self.uhd_app.usrp.get_freq_range(ref_chan).__str__(
+ )) # returns tuple with (start_freq, end_freq, step)
+
+ if bands > 1:
+ bw = (freq_range[1] - freq_range[0])/bands
+ freq_range = list(freq_range)
+ freq_range[0] = freq_range[0] + ((band_num-1) % bands)*bw
+ freq_range[1] = freq_range[0] + bw
+
+ retune_freq = uniform(freq_range[0], freq_range[1])
+ self.log.info('tune all channels to: {:f} MHz'.format(retune_freq /
+ 1e6))
+ self.uhd_app.set_freq(retune_freq)
+ self.log.info('tune all channels to: {:f} MHz'.format(
+ self.uhd_app.args.freq / 1e6))
+ self.uhd_app.set_freq(self.uhd_app.args.freq)
diff --git a/tools/gr-usrptest/python/flowgraphs/selftest_fg.py b/tools/gr-usrptest/python/flowgraphs/selftest_fg.py
new file mode 100644
index 000000000..cdfc35e74
--- /dev/null
+++ b/tools/gr-usrptest/python/flowgraphs/selftest_fg.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+from gnuradio import blocks
+from gnuradio import gr
+from gnuradio import uhd
+from usrptest import phase_calc_ccf
+from gnuradio.uhd.uhd_app import UHDApp
+import numpy as np
+
+class selftest_fg(gr.top_block):
+
+ def __init__(self, freq, samp_rate, dphase, devices=list()):
+ gr.top_block.__init__(self, "Generate Signal extract phase")
+
+ ##################################################
+ # Variables
+ ##################################################
+ self.samp_rate = samp_rate
+ self.freq = 10e3
+ self.devices = devices
+ self.dphase = dphase
+ self.tx_gain = 50
+ self.rx_gain = 50
+ self.center_freq = freq
+ self.omega = 2*np.pi*self.freq
+ self.steps = np.arange(
+ self.samp_rate)*float(self.omega)/float(self.samp_rate)
+ self.reference = self.complex_sine(self.steps)
+ self.test_signal = self.complex_sine(self.steps+0.5*np.pi)
+ self.device_test = False
+
+ ##################################################
+ # Block dicts
+ ##################################################
+ self.rx_devices = dict()
+ self.tx_devices = dict()
+ self.sink_dict = dict()
+ self.phase_dict = dict()
+ self.reference_source = blocks.vector_source_c(self.reference)
+
+ if len(self.devices):
+ ##################################################
+ # Devices
+ ##################################################
+ self.device_test = True
+ #To reuse existing setup_usrp() command
+ for device in self.devices:
+ # Create and configure all devices
+ self.rx_devices[device] = uhd.usrp_source(
+ device, uhd.stream_args(
+ cpu_format="fc32", channel=range(1)))
+ self.tx_devices[device] = uhd.usrp_sink(
+ device, uhd.stream_args(
+ cpu_format="fc32", channel=range(1)))
+ self.rx_devices[device].set_samp_rate(self.samp_rate)
+ self.rx_devices[device].set_center_freq(self.center_freq, 0)
+ self.rx_devices[device].set_gain(self.rx_gain, 0)
+ self.tx_devices[device].set_samp_rate(self.samp_rate)
+ self.tx_devices[device].set_center_freq(self.center_freq, 0)
+ self.tx_devices[device].set_gain(self.tx_gain, 0)
+ self.sink_dict[device] = blocks.vector_sink_f()
+ self.phase_dict[device] = phase_calc_ccf(
+ self.samp_rate, self.freq)
+ for device in self.tx_devices.values():
+ self.connect((self.reference_source, 0), (device, 0))
+
+ for device_key in self.rx_devices.keys():
+ self.connect(
+ (self.rx_devices[device_key], 0), (self.phase_dict[device_key], 0))
+ self.connect((self.reference_source, 0),
+ (self.phase_dict[device_key], 1))
+ self.connect(
+ (self.phase_dict[device_key], 0), (self.sink_dict[device_key], 0))
+ # Debug options
+ # self.sink_list.append(blocks.vector_sink_c())
+ #self.connect((device, 0), (self.sink_list[-1], 0))
+ # self.sink_list.append(blocks.vector_sink_c())
+ #self.connect((self.reference_source, 0), (self.sink_list[-1], 0))
+ else:
+ ##################################################
+ # Blocks
+ ##################################################
+ self.result = blocks.vector_sink_f(1)
+ self.test_source = blocks.vector_source_c(self.test_signal)
+ self.block_phase_calc = phase_calc_ccf(
+ self.samp_rate, self.freq)
+
+ ##################################################
+ # Connections
+ ##################################################
+ self.connect((self.reference_source, 0), (self.block_phase_calc, 1))
+ self.connect((self.test_source, 0), (self.block_phase_calc, 0))
+ self.connect((self.block_phase_calc, 0), (self.result, 0))
+ def complex_sine(self, steps):
+ return np.exp(1j*steps)
+
+ def get_samp_rate(self):
+ return self.samp_rate
+
+ def set_samp_rate(self, samp_rate):
+ self.samp_rate = samp_rate
+
+ def run(self):
+ self.start()
+ self.wait()
+ if self.device_test:
+ data = dict()
+ for device_key in self.sink_dict.keys():
+ curr_data = self.sink_dict[device_key].data()
+ curr_data = curr_data[int(0.2*self.samp_rate):-int(0.2*self.samp_rate)]
+ phase_avg = np.average(curr_data)
+ if (np.max(curr_data) < phase_avg+self.dphase*0.5) and (np.min(curr_data) > phase_avg-self.dphase*0.5):
+ data[device_key] = phase_avg
+ else:
+ print("Error phase not settled")
+
+ #Debug
+ # plt.ylim(-1, 1)
+ # plt.xlim(self.samp_rate/2.), (self.samp_rate/2.)+1000)
+ #for key in data:
+ # plt.plot(data[key])
+ return data
diff --git a/tools/gr-usrptest/python/functions.py b/tools/gr-usrptest/python/functions.py
new file mode 100644
index 000000000..2ce2ee451
--- /dev/null
+++ b/tools/gr-usrptest/python/functions.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python2
+import numpy as np
+import time
+import copy
+import logging
+
+
+def setup_phase_alignment_parser(parser):
+ test_group = parser.add_argument_group(
+ 'Phase alignment specific arguments')
+ test_group.add_argument(
+ '--runs',
+ default=10,
+ type=int,
+ help='Number of times to retune and measure d_phi')
+ test_group.add_argument(
+ '--duration',
+ default=5.0,
+ type=float,
+ help='Duration of a measurement run')
+ test_group.add_argument(
+ '--measurement-setup',
+ type=str,
+ help='Comma-seperated list of channel ids. Phase difference will be calculated between consecutive channels. default=(0,1,2,..,M-1) M: num_chan'
+ )
+ test_group.add_argument(
+ '--log-level',
+ type=str,
+ choices=["critical", "error", "warning", "info", "debug"],
+ default="info")
+ test_group.add_argument(
+ '--freq-bands',
+ type=int,
+ help="Number of frequency bands in daughterboard range to randomly retune to",
+ default=1)
+ return parser
+
+
+def setup_tx_phase_alignment_parser(parser):
+ tx_group = parser.add_argument_group(
+ 'TX Phase alignment specific arguments.')
+ tx_group.add_argument(
+ '--tx-channels', type=str, help='which channels to use')
+ tx_group.add_argument(
+ '--tx-antenna',
+ type=str,
+ help='comma-separated list of channel antennas for tx')
+ tx_group.add_argument(
+ '--tx-offset',
+ type=float,
+ help='frequency offset in Hz which should be added to center frequency for transmission'
+ )
+ return parser
+
+
+def setup_rts_phase_alignment_parser(parser):
+ rts_group = parser.add_argument_group(
+ 'RTS Phase alignment specific arguments')
+ rts_group.add_argument(
+ '-pd',
+ '--phasedev',
+ type=float,
+ default=1.0,
+ help='maximum phase standard deviation of dphi in a run which is considered settled (in deg)'
+ )
+ rts_group.add_argument(
+ '-dp',
+ '--dphi',
+ type=float,
+ default=2.0,
+ help='maximum allowed d_phase deviation between runs (in deg)')
+ rts_group.add_argument(
+ '--freqlist',
+ type=str,
+ help='comma-separated list of frequencies to test')
+ rts_group.add_argument(
+ '--lv-host',
+ type=str,
+ help='specify this argument if running tests with vst/switch')
+ rts_group.add_argument('--lv-vst-name', type=str, help='vst device name')
+ rts_group.add_argument(
+ '--lv-switch-name', type=str, help='executive switch name')
+ rts_group.add_argument(
+ '--lv-basepath',
+ type=str,
+ help='basepath for LabVIEW VIs on Windows')
+ rts_group.add_argument(
+ '--tx-offset',
+ type=float,
+ help='transmitter frequency offset in VST')
+ rts_group.add_argument(
+ '--lv-switch-ports', type=str, help='comma-separated switch-port pair')
+ return parser
+
+def setup_manual_phase_alignment_parser(parser):
+ manual_group = parser.add_argument_group(
+ 'Manual Phase alignment specific arguments')
+ manual_group.add_argument(
+ '--plot',
+ dest='plot',
+ action='store_true',
+ help='Set this argument to enable plotting results with matplotlib'
+ )
+ manual_group.add_argument(
+ '--auto',
+ action='store_true',
+ help='Set this argument to enable automatic selection of test frequencies'
+ )
+ manual_group.add_argument(
+ '--start-freq',
+ type=float,
+ default=0.0,
+ help='Start frequency for automatic selection'
+ ),
+ manual_group.add_argument(
+ '--stop-freq',
+ type=float,
+ default=0.0,
+ help='Stop frequency for automatic selection')
+
+ parser.set_defaults(plot=False,auto=False)
+ return parser
+
+
+def process_measurement_sinks(top_block):
+ data = list()
+ curr_data = dict()
+ for num, chan in enumerate(top_block.measurement_channels[:-1]):
+ curr_data['avg'] = list(top_block.measurement_sink[num].get_avg())
+ curr_data['stddev'] = list(top_block.measurement_sink[num].get_stddev(
+ ))
+ curr_data['first'] = top_block.measurement_channels_names[num]
+ curr_data['second'] = top_block.measurement_channels_names[num + 1]
+ data.append(copy.copy(curr_data))
+ return data
+
+
+def run_test(top_block, ntimes):
+ results = dict()
+ num_sinks = len(top_block.measurement_sink)
+ for i in xrange(ntimes):
+ #tune frequency to random position and back to specified frequency
+ top_block.retune_frequency(bands=top_block.uhd_app.args.freq_bands,band_num=i+1)
+ time.sleep(2)
+ #trigger start in all measurement_sinks
+ for sink in top_block.measurement_sink:
+ sink.start_run()
+ #wait until every measurement_sink is ready with the current run
+ while (sum([ms.get_run() for ms in top_block.measurement_sink]) < (
+ (i + 1) * num_sinks)):
+ time.sleep(1)
+ results = process_measurement_sinks(top_block)
+ return results
+
+
+def log_level(string):
+ return getattr(logging, string.upper())
diff --git a/tools/gr-usrptest/python/labview_control/CMakeLists.txt b/tools/gr-usrptest/python/labview_control/CMakeLists.txt
new file mode 100644
index 000000000..924df5479
--- /dev/null
+++ b/tools/gr-usrptest/python/labview_control/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Install python sources
+########################################################################
+GR_PYTHON_INSTALL(
+ FILES
+ __init__.py
+ lv_control.py DESTINATION ${GR_PYTHON_DIR}/usrptest/labview_control
+)
+
diff --git a/tools/gr-usrptest/python/labview_control/__init__.py b/tools/gr-usrptest/python/labview_control/__init__.py
new file mode 100644
index 000000000..28d2fe03b
--- /dev/null
+++ b/tools/gr-usrptest/python/labview_control/__init__.py
@@ -0,0 +1,14 @@
+"""
+usrptest.labview_control
+======================================
+
+Contents
+--------
+
+Subpackages
+-----------
+::
+
+The existance of the file turns the folder into a Python module.
+
+"""
diff --git a/tools/gr-usrptest/python/labview_control/lv_control.py b/tools/gr-usrptest/python/labview_control/lv_control.py
new file mode 100644
index 000000000..27407a07c
--- /dev/null
+++ b/tools/gr-usrptest/python/labview_control/lv_control.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+from labview_automation.client import LabVIEWClient
+
+class vst_siggen:
+ def __init__(self,host,vi_base_path,rio_device):
+ self._host = host
+ self._path = vi_base_path
+ self._rio = rio_device
+ self._caller = lv_caller(host,2552,vi_base_path)
+
+ def __del__(self):
+ self.disconnect()
+
+ def set_freq(self, freq):
+ self._caller.vst_set_freq(self._rio,freq)
+
+ def disconnect(self):
+ try:
+ self._caller.vst_disconnect(self._rio)
+ except:
+ pass
+
+
+class executive_switch:
+ def __init__(self,host,vi_base_path,device_name):
+ self._host = host
+ self._path = vi_base_path
+ self._device = device_name
+ self._caller = lv_caller(host,2552,vi_base_path)
+
+ def __del__(self):
+ self.disconnect_all()
+
+ def connect_ports(self, port0, port1):
+ self._caller.switch_connect_ports(self._device,port0,port1)
+
+ def disconnect_all(self):
+ try:
+ self._caller.switch_disconnect_all(self._device)
+ except:
+ pass
+
+
+
+class lv_caller:
+ def __init__(self, host, port, vi_base_path):
+ self._host = host
+ self._port = port
+ self._client = LabVIEWClient(host, port)
+ self._path = vi_base_path
+
+ def vst_disconnect(self, rio_device):
+ with self._client as c:
+ control_values = {
+ "rio_device": rio_device,
+ }
+ result = c.run_vi_synchronous("".join([self._path,"vst_disconnect.vi"]),control_values)
+ return result
+
+ def vst_set_freq(self, rio_device, freq):
+ with self._client as c:
+ control_values = {
+ "rio_device": rio_device,
+ "cw_freq": freq,
+ "power": 0,
+ }
+ result = c.run_vi_synchronous("".join([self._path,"vst_set_freq.vi"]),control_values)
+ return result
+
+ def switch_connect_ports(self, switch_device, port0, port1):
+ with self._client as c:
+ control_values = {
+ "virtual_switch": switch_device,
+ "chan0": port0,
+ "chan1": port1,
+ }
+ result = c.run_vi_synchronous("".join([self._path,"switch_connect_ports.vi"]),control_values)
+ return result
+
+
+ def switch_disconnect_all(self, switch_device):
+ with self._client as c:
+ control_values = {
+ "virtual_switch": switch_device,
+ }
+ result = c.run_vi_synchronous("".join([self._path,"switch_disconnect.vi"]),control_values)
+ return result
diff --git a/tools/gr-usrptest/python/phase_calc_ccf.py b/tools/gr-usrptest/python/phase_calc_ccf.py
new file mode 100644
index 000000000..fe3cf55a8
--- /dev/null
+++ b/tools/gr-usrptest/python/phase_calc_ccf.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 Ettus Research LLC.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+from gnuradio import gr
+from gnuradio import blocks
+import numpy as np
+
+
+class phase_calc_ccf(gr.hier_block2):
+ """
+ docstring for block phase_calc_ccf
+ """
+
+ def __init__(self):
+ gr.hier_block2.__init__(
+ self,
+ "phase_calc_ccf",
+ gr.io_signature(2, 2, gr.sizeof_gr_complex), # Input signature
+ gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
+ self.block = dict()
+ self.block['mult_conj'] = blocks.multiply_conjugate_cc()
+ self.block['arg'] = blocks.complex_to_arg()
+ self.block['mult_const'] = blocks.multiply_const_ff(180.0 / np.pi)
+
+ self.connect((self, 0), (self.block['mult_conj'], 0))
+ self.connect((self, 1), (self.block['mult_conj'], 1))
+ self.connect((self.block['mult_conj'], 0), (self.block['arg'], 0))
+ self.connect((self.block['arg'], 0), (self.block['mult_const'], 0))
+ self.connect((self.block['mult_const'], 0), (self, 0))
diff --git a/tools/gr-usrptest/python/qa_measurement_sink_f.py b/tools/gr-usrptest/python/qa_measurement_sink_f.py
new file mode 100755
index 000000000..ceb9913ae
--- /dev/null
+++ b/tools/gr-usrptest/python/qa_measurement_sink_f.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 Ettus Research LLC.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+
+from gnuradio import gr, gr_unittest
+from gnuradio import blocks
+import usrptest_swig as usrptest
+
+class qa_measurement_sink_f (gr_unittest.TestCase):
+
+ def setUp (self):
+ self.tb = gr.top_block ()
+
+ def tearDown (self):
+ self.tb = None
+
+ def test_001_t (self):
+ # set up fg
+ self.tb.run ()
+ # check data
+
+
+if __name__ == '__main__':
+ gr_unittest.run(qa_measurement_sink_f, "qa_measurement_sink_f.xml")
diff --git a/tools/gr-usrptest/python/rts_tests/CMakeLists.txt b/tools/gr-usrptest/python/rts_tests/CMakeLists.txt
new file mode 100644
index 000000000..03111a9bb
--- /dev/null
+++ b/tools/gr-usrptest/python/rts_tests/CMakeLists.txt
@@ -0,0 +1,33 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Install python sources
+########################################################################
+GR_PYTHON_INSTALL(
+ FILES
+ __init__.py
+ test_phasealignment.py DESTINATION ${GR_PYTHON_DIR}/usrptest/rts_tests
+)
+
+GR_PYTHON_INSTALL(
+ PROGRAMS
+ test_phasealignment.py
+ DESTINATION bin
+)
diff --git a/tools/gr-usrptest/python/rts_tests/__init__.py b/tools/gr-usrptest/python/rts_tests/__init__.py
new file mode 100644
index 000000000..048e5638a
--- /dev/null
+++ b/tools/gr-usrptest/python/rts_tests/__init__.py
@@ -0,0 +1,14 @@
+"""
+usrptest.flowgraphs
+======================================
+
+Contents
+--------
+
+Subpackages
+-----------
+::
+
+The existance of the file turns the folder into a Python module.
+
+"""
diff --git a/tools/gr-usrptest/python/rts_tests/test_phasealignment.py b/tools/gr-usrptest/python/rts_tests/test_phasealignment.py
new file mode 100755
index 000000000..12856bbdb
--- /dev/null
+++ b/tools/gr-usrptest/python/rts_tests/test_phasealignment.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 Ettus Research LLC.
+#
+# This is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+#
+import unittest
+from tinydb import TinyDB, Query
+from usrptest.flowgraphs import phasealignment_fg
+from usrptest.functions import setup_phase_alignment_parser, setup_tx_phase_alignment_parser, setup_rts_phase_alignment_parser, run_test, log_level
+from usrptest.labview_control import lv_control
+from gnuradio.uhd.uhd_app import UHDApp
+import logging
+import sys
+import numpy as np
+import argparse
+import time
+import copy
+
+
+class gr_usrp_test(unittest.TestCase):
+ def __init__(self, methodName='runTest', args=None):
+ super(gr_usrp_test, self).__init__(methodName)
+ self.args = args
+
+
+class qa_phasealignment(gr_usrp_test):
+ def setUp(self):
+ time.sleep(15) #Wait for devices to settle, just in case
+ if self.args.lv_host is not None:
+ self.siggen = lv_control.vst_siggen(self.args.lv_host,
+ self.args.lv_basepath,
+ self.args.lv_vst_name)
+ self.switch = lv_control.executive_switch(self.args.lv_host,
+ self.args.lv_basepath,
+ self.args.lv_switch_name)
+ self.siggen.set_freq(self.args.freq + self.args.tx_offset)
+ self.switch.connect_ports(
+ *self.args.lv_switch_ports.strip().split(','))
+
+ self.uhd_app = UHDApp(args=self.args)
+ self.log = logging.getLogger("test_phasealignment")
+ self.tb = phasealignment_fg.phasealignment_fg(self.uhd_app)
+ self.db = TinyDB('phase_db.json')
+
+ def tearDown(self):
+ self.uhd_app = None
+ self.tb = None
+ if args.lv_host is not None:
+ self.siggen.disconnect()
+ self.switch.disconnect_all()
+
+ def test_001(self):
+ self.tb.start()
+ time.sleep(2)
+ results = run_test(
+ self.tb,
+ self.args.runs) # dict key:dev, value: dict{dphase:[],stddev:[]}
+ time.sleep(1)
+ #self.tb.stop()
+ #self.tb.wait()
+ self.time_stamp = time.strftime('%Y%m%d%H%M')
+ self.passed = True
+ for result in results:
+ fdev = result['first']
+ sdev = result['second']
+ self.log.info('Comparing values for phase difference between {} and {}'.
+ format(fdev, sdev))
+ dphase_list = result['avg']
+ dev_list = result['stddev']
+ dphase = np.average(dphase_list)
+ dev = np.average(dev_list)
+ ref_meas = get_reference_meas(self.db, fdev, sdev, self.args.freq)
+ for dphase_i in dphase_list:
+ passed = True
+ if abs(dphase_i - dphase) > self.args.dphi and passed:
+ self.log.info(
+ '\t dPhase of a measurement_run differs from average dhpase. dphase_run: {}, dphase_avg: {}'.
+ format(dphase_i, dphase))
+ passed = False
+ if dev > self.args.phasedev:
+ self.log.info('\t dPhase deviates during measurement. stddev: {}'.
+ format(dev))
+ passed = False
+ if ref_meas:
+ if abs(ref_meas['dphase'] - dphase) > self.args.dphi:
+ self.log.info(
+ '\t dPhase differs from reference measurement. Now: {}, reference: {}'.
+ format(dphase, ref_meas['dphase']))
+ if not passed:
+ self.passed = False
+ else:
+ self.db.insert({
+ 'dev1': fdev,
+ 'dev2': sdev,
+ 'timestamp': self.time_stamp,
+ 'dphase': dphase,
+ 'dphase_dev': dev,
+ 'freq': self.args.freq
+ })
+ self.tb.stop()
+ self.tb.wait()
+ self.assertTrue(self.passed)
+
+
+def get_previous_meas(db, dev1, dev2, freq):
+ meas = Query()
+ results = db.search((meas.dev1 == dev1) & (meas.dev2 == dev2) & (meas.freq
+ == freq))
+ prev_result = dict()
+ if results:
+ prev_result = results[0]
+ for result in results:
+ if result['timestamp'] > prev_result['timestamp']:
+ prev_result = result
+ return prev_result
+
+
+def get_reference_meas(db, dev1, dev2, freq):
+ meas = Query()
+ results = db.search((meas.dev1 == dev1) & (meas.dev2 == dev2) & (meas.freq
+ == freq))
+ ref_result = dict()
+ if results:
+ ref_result = results[0]
+ for result in results:
+ if result['timestamp'] < ref_result['timestamp']:
+ ref_result = result
+ return ref_result
+
+
+if __name__ == '__main__':
+ # parse all arguments
+ parser = argparse.ArgumentParser(conflict_handler='resolve')
+ parser = setup_phase_alignment_parser(parser)
+ parser = setup_tx_phase_alignment_parser(parser)
+ parser = setup_rts_phase_alignment_parser(parser)
+ logging.basicConfig(stream=sys.stdout)
+ UHDApp.setup_argparser(parser=parser)
+ args = parser.parse_args()
+ logging.getLogger("test_phasealignment").setLevel(log_level(args.log_level))
+
+ freqlist = args.freqlist.strip().split(',')
+
+ def make_suite(testcase_class, freqlist):
+ testloader = unittest.TestLoader()
+ testnames = testloader.getTestCaseNames(testcase_class)
+ suite = unittest.TestSuite()
+ for name in testnames:
+ for freq in freqlist:
+ test_args = copy.deepcopy(args)
+ test_args.freq = float(freq)
+ suite.addTest(testcase_class(name, args=test_args))
+ return suite
+
+ # Add tests.
+ alltests = unittest.TestSuite()
+ alltests.addTest(make_suite(qa_phasealignment, freqlist))
+ result = unittest.TextTestRunner(verbosity=2).run(alltests) # Run tests.
+ sys.exit(not result.wasSuccessful())
diff --git a/tools/gr-usrptest/python/setup.py b/tools/gr-usrptest/python/setup.py
new file mode 100644
index 000000000..6969f413d
--- /dev/null
+++ b/tools/gr-usrptest/python/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+
+setup(name='urptest_automation',
+ version='0.0.1',
+ description='usrptest integration into RTS and Labview',
+ packages=['usrptest_automation'],
+ install_requires=['labview-automation>=15.0.0.dev1','hoplite>=15.0.0.dev1']
+ )
diff --git a/tools/gr-usrptest/swig/CMakeLists.txt b/tools/gr-usrptest/swig/CMakeLists.txt
new file mode 100644
index 000000000..ae42be628
--- /dev/null
+++ b/tools/gr-usrptest/swig/CMakeLists.txt
@@ -0,0 +1,65 @@
+# Copyright 2011 Free Software Foundation, Inc.
+#
+# This file is part of GNU Radio
+#
+# GNU Radio is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GNU Radio is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Radio; see the file COPYING. If not, write to
+# the Free Software Foundation, Inc., 51 Franklin Street,
+# Boston, MA 02110-1301, USA.
+
+########################################################################
+# Check if there is C++ code at all
+########################################################################
+if(NOT usrptest_sources)
+ MESSAGE(STATUS "No C++ sources... skipping swig/")
+ return()
+endif(NOT usrptest_sources)
+
+########################################################################
+# Include swig generation macros
+########################################################################
+find_package(SWIG)
+find_package(PythonLibs 2)
+if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND)
+ return()
+endif()
+include(GrSwig)
+include(GrPython)
+
+########################################################################
+# Setup swig generation
+########################################################################
+foreach(incdir ${GNURADIO_RUNTIME_INCLUDE_DIRS})
+ list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/gnuradio/swig)
+endforeach(incdir)
+
+set(GR_SWIG_LIBRARIES gnuradio-usrptest)
+set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/usrptest_swig_doc.i)
+set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include)
+
+GR_SWIG_MAKE(usrptest_swig usrptest_swig.i)
+
+########################################################################
+# Install the build swig module
+########################################################################
+GR_SWIG_INSTALL(TARGETS usrptest_swig DESTINATION ${GR_PYTHON_DIR}/usrptest)
+
+########################################################################
+# Install swig .i files for development
+########################################################################
+install(
+ FILES
+ usrptest_swig.i
+ ${CMAKE_CURRENT_BINARY_DIR}/usrptest_swig_doc.i
+ DESTINATION ${GR_INCLUDE_DIR}/usrptest/swig
+)
diff --git a/tools/gr-usrptest/swig/usrptest_swig.i b/tools/gr-usrptest/swig/usrptest_swig.i
new file mode 100644
index 000000000..273b7c866
--- /dev/null
+++ b/tools/gr-usrptest/swig/usrptest_swig.i
@@ -0,0 +1,16 @@
+/* -*- c++ -*- */
+
+#define USRPTEST_API
+
+%include "gnuradio.i" // the common stuff
+
+//load generated python docstrings
+%include "usrptest_swig_doc.i"
+
+%{
+#include "usrptest/measurement_sink_f.h"
+%}
+
+
+%include "usrptest/measurement_sink_f.h"
+GR_SWIG_BLOCK_MAGIC2(usrptest, measurement_sink_f);
diff --git a/tools/kitchen_sink/kitchen_sink.cpp b/tools/kitchen_sink/kitchen_sink.cpp
index e17ee6437..818146927 100644
--- a/tools/kitchen_sink/kitchen_sink.cpp
+++ b/tools/kitchen_sink/kitchen_sink.cpp
@@ -35,7 +35,6 @@
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <csignal>
-#include <uhd/utils/msg.hpp>
namespace po = boost::program_options;