aboutsummaryrefslogtreecommitdiffstats
path: root/fpga/usrp3/tools
diff options
context:
space:
mode:
Diffstat (limited to 'fpga/usrp3/tools')
-rw-r--r--fpga/usrp3/tools/make/viv_design_builder.mak59
-rw-r--r--fpga/usrp3/tools/make/viv_hls_ip_builder.mak33
-rw-r--r--fpga/usrp3/tools/make/viv_ip_builder.mak104
-rw-r--r--fpga/usrp3/tools/make/viv_preamble.mak77
-rw-r--r--fpga/usrp3/tools/make/viv_sim_preamble.mak60
-rw-r--r--fpga/usrp3/tools/make/viv_simulator.mak85
-rw-r--r--fpga/usrp3/tools/scripts/auto_inst_e310.yml22
-rw-r--r--fpga/usrp3/tools/scripts/auto_inst_x310.yml29
-rw-r--r--fpga/usrp3/tools/scripts/check_config.json16
-rwxr-xr-xfpga/usrp3/tools/scripts/git-hash.sh75
-rwxr-xr-xfpga/usrp3/tools/scripts/ise_jtag_program.sh17
-rwxr-xr-xfpga/usrp3/tools/scripts/launch_vivado.py475
-rwxr-xr-xfpga/usrp3/tools/scripts/launch_vivado.sh94
-rw-r--r--fpga/usrp3/tools/scripts/setupenv_base.sh472
-rwxr-xr-xfpga/usrp3/tools/scripts/shared-ip-loc-manage.sh119
-rwxr-xr-xfpga/usrp3/tools/scripts/uhd_image_builder.py537
-rwxr-xr-xfpga/usrp3/tools/scripts/uhd_image_builder_gui.py656
-rw-r--r--fpga/usrp3/tools/scripts/viv_check_syntax.tcl14
-rw-r--r--fpga/usrp3/tools/scripts/viv_gen_ip_makefile.py54
-rw-r--r--fpga/usrp3/tools/scripts/viv_gen_part_id.py37
-rw-r--r--fpga/usrp3/tools/scripts/viv_generate_bd.tcl78
-rw-r--r--fpga/usrp3/tools/scripts/viv_generate_hls_ip.tcl36
-rw-r--r--fpga/usrp3/tools/scripts/viv_generate_ip.tcl43
-rw-r--r--fpga/usrp3/tools/scripts/viv_hardware_utils.tcl97
-rw-r--r--fpga/usrp3/tools/scripts/viv_ip_utils.tcl142
-rw-r--r--fpga/usrp3/tools/scripts/viv_ip_xci_editor.py95
-rw-r--r--fpga/usrp3/tools/scripts/viv_sim_project.tcl149
-rw-r--r--fpga/usrp3/tools/scripts/viv_strategies.tcl170
-rw-r--r--fpga/usrp3/tools/scripts/viv_synth.tcl16
-rw-r--r--fpga/usrp3/tools/scripts/viv_utils.tcl290
-rwxr-xr-xfpga/usrp3/tools/scripts/xil_bitfile_parser.py84
-rw-r--r--fpga/usrp3/tools/utils/README.md4
-rwxr-xr-xfpga/usrp3/tools/utils/gen_xdc_from_rinf.py334
-rw-r--r--fpga/usrp3/tools/utils/image_package_mapping.py279
-rwxr-xr-xfpga/usrp3/tools/utils/package_images.py369
-rw-r--r--fpga/usrp3/tools/utils/rfnoc-system-sim/.gitignore1
-rw-r--r--fpga/usrp3/tools/utils/rfnoc-system-sim/README6
-rwxr-xr-xfpga/usrp3/tools/utils/rfnoc-system-sim/colosseum_models.py593
-rwxr-xr-xfpga/usrp3/tools/utils/rfnoc-system-sim/ni_hw_models.py261
-rw-r--r--fpga/usrp3/tools/utils/rfnoc-system-sim/rfnocsim.py757
-rwxr-xr-xfpga/usrp3/tools/utils/rfnoc-system-sim/sim_colosseum.py142
-rwxr-xr-xfpga/usrp3/tools/utils/run_testbenches.py386
-rw-r--r--fpga/usrp3/tools/utils/testbenches.excludes15
43 files changed, 7382 insertions, 0 deletions
diff --git a/fpga/usrp3/tools/make/viv_design_builder.mak b/fpga/usrp3/tools/make/viv_design_builder.mak
new file mode 100644
index 000000000..5a54da012
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_design_builder.mak
@@ -0,0 +1,59 @@
+#
+# Copyright 2014-2015 Ettus Research
+#
+
+include $(BASE_DIR)/../tools/make/viv_preamble.mak
+SIMULATION = 0
+
+# -------------------------------------------------------------------
+# Usage: BUILD_VIVADO_DESIGN
+# Args: $1 = TCL_SCRIPT_NAME
+# $2 = TOP_MODULE
+# $3 = ARCH (zynq, kintex7, etc)
+# $4 = PART_ID (<device>/<package>/<speedgrade>[/<temperaturegrade>[/<silicon_revision>]])
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# - BUILD_DIR must be defined globally
+# - DESIGN_SRCS must be defined and should contain all source files
+# - VERILOG_DEFS must be defined and should contain all PP defines
+# -------------------------------------------------------------------
+BUILD_VIVADO_DESIGN = \
+ @ \
+ export VIV_TOOLS_DIR=$(call RESOLVE_PATH,$(TOOLS_DIR)); \
+ export VIV_OUTPUT_DIR=$(call RESOLVE_PATH,$(BUILD_DIR)); \
+ export VIV_TOP_MODULE=$(2); \
+ export VIV_PART_NAME=`python $(TOOLS_DIR)/scripts/viv_gen_part_id.py $(3)/$(4)`; \
+ export VIV_MODE=$(VIVADO_MODE); \
+ export VIV_DESIGN_SRCS=$(call RESOLVE_PATHS,$(DESIGN_SRCS)); \
+ export VIV_VERILOG_DEFS="$(VERILOG_DEFS)"; \
+ cd $(BUILD_DIR); \
+ $(TOOLS_DIR)/scripts/launch_vivado.py --parse-config $(BUILD_DIR)/../dev_config.json -mode $(VIVADO_MODE) -source $(call RESOLVE_PATH,$(1)) -log build.log -journal $(2).jou
+
+
+# -------------------------------------------------------------------
+# Usage: CHECK_VIVADO_DESIGN
+# Args: $1 = TCL_SCRIPT_NAME
+# $2 = TOP_MODULE
+# $3 = ARCH (zynq, kintex7, etc)
+# $4 = PART_ID (<device>/<package>/<speedgrade>[/<temperaturegrade>[/<silicon_revision>]])
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# - BUILD_DIR must be defined globally
+# - DESIGN_SRCS must be defined and should contain all source files
+# - VERILOG_DEFS must be defined and should contain all PP defines
+# -------------------------------------------------------------------
+CHECK_VIVADO_DESIGN = \
+ @ \
+ export VIV_TOOLS_DIR=$(call RESOLVE_PATH,$(TOOLS_DIR)); \
+ export VIV_OUTPUT_DIR=$(call RESOLVE_PATH,$(BUILD_DIR)); \
+ export VIV_TOP_MODULE=$(2); \
+ export VIV_PART_NAME=`python $(TOOLS_DIR)/scripts/viv_gen_part_id.py $(3)/$(4)`; \
+ export VIV_MODE=$(VIVADO_MODE); \
+ export VIV_DESIGN_SRCS=$(call RESOLVE_PATHS,$(DESIGN_SRCS)); \
+ export VIV_VERILOG_DEFS="$(VERILOG_DEFS)"; \
+ cd $(BUILD_DIR); \
+ $(TOOLS_DIR)/scripts/launch_vivado.py --parse-config $(TOOLS_DIR)/scripts/check_config.json -mode $(VIVADO_MODE) -source $(call RESOLVE_PATH,$(1)) -log build.log -journal $(2).jou
+
+
+# Predeclare RFNOC_OOT_SRCS to make sure it's not recursively expanded
+RFNOC_OOT_SRCS :=
diff --git a/fpga/usrp3/tools/make/viv_hls_ip_builder.mak b/fpga/usrp3/tools/make/viv_hls_ip_builder.mak
new file mode 100644
index 000000000..67b52ed2a
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_hls_ip_builder.mak
@@ -0,0 +1,33 @@
+#
+# Copyright 2015-2017 Ettus Research
+#
+
+# -------------------------------------------------------------------
+# Usage: BUILD_VIVADO_HLS_IP
+# Args: $1 = HLS_IP_NAME (High level synthsis IP name)
+# $2 = PART_ID (<device>/<package>/<speedgrade>)
+# $3 = HLS_IP_SRCS (Absolute paths to the HLS IP source files)
+# $4 = HLS_IP_SRC_DIR (Absolute path to the top level HLS IP src dir)
+# $5 = HLS_IP_BUILD_DIR (Absolute path to the top level HLS IP build dir)
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# -------------------------------------------------------------------
+BUILD_VIVADO_HLS_IP = \
+ @ \
+ echo "========================================================"; \
+ echo "BUILDER: Building HLS IP $(1)"; \
+ echo "========================================================"; \
+ export HLS_IP_NAME=$(1); \
+ export PART_NAME=$(subst /,,$(2)); \
+ export HLS_IP_SRCS='$(3)'; \
+ export HLS_IP_INCLUDES='$(6)'; \
+ echo "BUILDER: Staging HLS IP in build directory..."; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) reserve; \
+ cp -rf $(4)/$(1)/* $(5)/$(1); \
+ cd $(5); \
+ echo "BUILDER: Building HLS IP..."; \
+ export VIV_ERR=0; \
+ vivado_hls -f $(TOOLS_DIR)/scripts/viv_generate_hls_ip.tcl -l $(1).log || export VIV_ERR=$$?; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) release; \
+ exit $$(($$VIV_ERR))
+
diff --git a/fpga/usrp3/tools/make/viv_ip_builder.mak b/fpga/usrp3/tools/make/viv_ip_builder.mak
new file mode 100644
index 000000000..2663b5862
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_ip_builder.mak
@@ -0,0 +1,104 @@
+#
+# Copyright 2014 Ettus Research
+#
+
+ifeq ($(SIMULATION),1)
+SYNTH_IP=0
+else
+SYNTH_IP=1
+endif
+
+# -------------------------------------------------------------------
+# Usage: BUILD_VIVADO_IP
+# Args: $1 = IP_NAME (IP name)
+# $2 = ARCH (zynq, kintex7, etc)
+# $3 = PART_ID (<device>/<package>/<speedgrade>[/<tempgrade>[/<silicon revision>]])
+# $4 = IP_SRC_DIR (Absolute path to the top level ip src dir)
+# $5 = IP_BUILD_DIR (Absolute path to the top level ip build dir)
+# $6 = GENERATE_EXAMPLE (0 or 1)
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# -------------------------------------------------------------------
+BUILD_VIVADO_IP = \
+ @ \
+ echo "========================================================"; \
+ echo "BUILDER: Building IP $(1)"; \
+ echo "========================================================"; \
+ export XCI_FILE=$(call RESOLVE_PATH,$(5)/$(1)/$(1).xci); \
+ export PART_NAME=`python $(TOOLS_DIR)/scripts/viv_gen_part_id.py $(2)/$(3)`; \
+ export GEN_EXAMPLE=$(6); \
+ export SYNTH_IP=$(SYNTH_IP); \
+ echo "BUILDER: Staging IP in build directory..."; \
+ rm -rf $(5)/$(1)/*; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) reserve; \
+ cp -rf $(4)/$(1)/* $(5)/$(1); \
+ echo "BUILDER: Retargeting IP to part $(2)/$(3)..."; \
+ python $(TOOLS_DIR)/scripts/viv_ip_xci_editor.py --output_dir=$(5)/$(1) --target=$(2)/$(3) retarget $(4)/$(1)/$(1).xci; \
+ cd $(5); \
+ echo "BUILDER: Building IP..."; \
+ export VIV_ERR=0; \
+ $(TOOLS_DIR)/scripts/launch_vivado.py -mode batch -source $(call RESOLVE_PATH,$(TOOLS_DIR)/scripts/viv_generate_ip.tcl) -log $(1).log -nojournal || export VIV_ERR=$$?; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) release; \
+ exit $$VIV_ERR
+
+# -------------------------------------------------------------------
+# Usage: BUILD_VIVADO_BD
+# Args: $1 = BD_NAME (IP name)
+# $2 = ARCH (zynq, kintex7, etc)
+# $3 = PART_ID (<device>/<package>/<speedgrade>[/<tempgrade>[/<silicon revision>]])
+# $4 = BD_SRC_DIR (Absolute path to the top level ip src dir)
+# $5 = BD_BUILD_DIR (Absolute path to the top level ip build dir)
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# -------------------------------------------------------------------
+BUILD_VIVADO_BD = \
+ @ \
+ echo "========================================================"; \
+ echo "BUILDER: Building BD $(1)"; \
+ echo "========================================================"; \
+ export BD_FILE=$(call RESOLVE_PATH,$(5)/$(1)/$(1).bd); \
+ export PART_NAME=`python $(TOOLS_DIR)/scripts/viv_gen_part_id.py $(2)/$(3)`; \
+ echo "BUILDER: Staging BD in build directory..."; \
+ rm $(5)/$(1)/* -rf; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) reserve; \
+ cp -rf $(4)/$(1)/* $(5)/$(1); \
+ echo "BUILDER: Retargeting BD to part $(2)/$(3)..."; \
+ cd $(5)/$(1); \
+ echo "BUILDER: Building BD..."; \
+ export VIV_ERR=0; \
+ $(TOOLS_DIR)/scripts/launch_vivado.py -mode batch -source $(call RESOLVE_PATH,$(TOOLS_DIR)/scripts/viv_generate_bd.tcl) -log $(1).log -nojournal || export VIV_ERR=$$?; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) release; \
+ exit $$VIV_ERR
+
+# -------------------------------------------------------------------
+# Usage: BUILD_VIVADO_BDTCL
+# Args: $1 = BD_NAME (IP name)
+# $2 = ARCH (zynq, kintex7, etc)
+# $3 = PART_ID (<device>/<package>/<speedgrade>[/<tempgrade>[/<silicon revision>]])
+# $4 = BDTCL_SRC_DIR (Absolute path to the top level ip src dir)
+# $5 = BDTCL_BUILD_DIR (Absolute path to the top level ip build dir)
+# $6 = BD_IP_REPOS (space-separated list of absolute paths to IP repos)
+# $7 = BD_HDL_SRCS (space-separated list of absolute paths to HDL sources)
+# Prereqs:
+# - TOOLS_DIR must be defined globally
+# -------------------------------------------------------------------
+BUILD_VIVADO_BDTCL = \
+ @ \
+ echo "========================================================"; \
+ echo "BUILDER: Generating BD from Tcl $(1)"; \
+ echo "========================================================"; \
+ export BD_FILE=$(call RESOLVE_PATH,$(5)/$(1)/$(1).tcl); \
+ export PART_NAME=`python $(TOOLS_DIR)/scripts/viv_gen_part_id.py $(2)/$(3)`; \
+ export BD_IP_REPOS=$(call RESOLVE_PATH,$(6)); \
+ export BD_HDL_SRCS=$(call RESOLVE_PATHS,$(7)); \
+ echo "BUILDER: Staging BD Tcl in build directory..."; \
+ rm $(5)/$(1)/* -rf; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) reserve; \
+ cp -rf $(4)/$(1)/* $(5)/$(1); \
+ echo "BUILDER: Retargeting BD to part $(2)/$(3)..."; \
+ cd $(5)/$(1); \
+ echo "BUILDER: Generating BD..."; \
+ export VIV_ERR=0; \
+ $(TOOLS_DIR)/scripts/launch_vivado.py -mode batch -source $(call RESOLVE_PATH,$(TOOLS_DIR)/scripts/viv_generate_bd.tcl) -log $(1).log -nojournal || export VIV_ERR=$$?; \
+ $(TOOLS_DIR)/scripts/shared-ip-loc-manage.sh --path=$(5)/$(1) release; \
+ exit $$VIV_ERR
diff --git a/fpga/usrp3/tools/make/viv_preamble.mak b/fpga/usrp3/tools/make/viv_preamble.mak
new file mode 100644
index 000000000..208858757
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_preamble.mak
@@ -0,0 +1,77 @@
+#
+# Copyright 2014-2015 Ettus Research
+#
+
+# -------------------------------------------------------------------
+# Environment Setup
+# -------------------------------------------------------------------
+ifeq ($(VIV_PLATFORM),Cygwin)
+RESOLVE_PATH = $(subst \,/,$(shell cygpath -aw $(1)))
+RESOLVE_PATHS = "$(foreach path,$(1),$(subst \,/,$(shell cygpath -aw $(abspath $(path)))))"
+else
+RESOLVE_PATH = $(1)
+RESOLVE_PATHS = "$(1)"
+endif
+
+# -------------------------------------------------------------------
+# Project Setup
+# -------------------------------------------------------------------
+# Requirement: BASE_DIR must be defined
+
+TOOLS_DIR = $(abspath $(BASE_DIR)/../tools)
+LIB_DIR = $(abspath $(BASE_DIR)/../lib)
+SIMLIB_DIR = $(abspath $(BASE_DIR)/../sim)
+LIB_IP_DIR = $(abspath $(LIB_DIR)/ip)
+HLS_IP_DIR = $(abspath $(LIB_DIR)/hls)
+
+O ?= .
+
+ifdef NAME
+BUILD_DIR = $(abspath $(O)/build-$(NAME))
+else
+BUILD_DIR = $(abspath $(O)/build)
+endif
+
+IP_BUILD_DIR = $(abspath ./build-ip/$(subst /,,$(PART_ID)))
+
+# -------------------------------------------------------------------
+# Git Hash Retrieval
+# -------------------------------------------------------------------
+GIT_HASH=$(shell $(TOOLS_DIR)/scripts/git-hash.sh --hashfile=$(TOOLS_DIR)/../../project.githash)
+GIT_HASH_VERILOG_DEF=$(addprefix GIT_HASH=32'h,$(GIT_HASH))
+
+# -------------------------------------------------------------------
+# GUI Mode switch. Calling with GUI:=1 will launch Vivado GUI for build
+# -------------------------------------------------------------------
+ifeq ($(GUI),1)
+VIVADO_MODE=gui
+else
+VIVADO_MODE=batch
+endif
+
+# -------------------------------------------------------------------
+# Toolchain dependency target
+# -------------------------------------------------------------------
+.check_tool:
+ @echo "BUILDER: Checking tools..."
+ @echo -n "* "; bash --version | grep bash || (echo "ERROR: Bash not found in environment. Please install it"; exit 1;)
+ @echo -n "* "; python --version || (echo "ERROR: Python not found in environment. Please install it"; exit 1;)
+ @echo -n "* "; vivado -version 2>&1 | grep Vivado || (echo "ERROR: Vivado not found in environment. Please run setupenv.sh"; exit 1;)
+
+# -------------------------------------------------------------------
+# Intermediate build dirs
+# -------------------------------------------------------------------
+.build_dirs:
+ @mkdir -p $(BUILD_DIR)
+ @mkdir -p $(IP_BUILD_DIR)
+
+.prereqs: .check_tool .build_dirs
+
+.PHONY: .check_tool .build_dirs .prereqs
+
+# -------------------------------------------------------------------
+# Validate prerequisites
+# -------------------------------------------------------------------
+ifndef PART_ID
+ $(error PART_ID was empty or not set)
+endif
diff --git a/fpga/usrp3/tools/make/viv_sim_preamble.mak b/fpga/usrp3/tools/make/viv_sim_preamble.mak
new file mode 100644
index 000000000..47ad153f4
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_sim_preamble.mak
@@ -0,0 +1,60 @@
+#
+# Copyright 2016 Ettus Research
+#
+
+include $(BASE_DIR)/../tools/make/viv_preamble.mak
+SIMULATION = 1
+SIM_RUNTIME_US = 1000000000
+
+# -------------------------------------------------------------------
+# Setup simulation
+# -------------------------------------------------------------------
+# Define part using PART_ID (<device>/<package>/<speedgrade>)
+# and architecture (zynq, kintex7, or artix7)
+# User can override these if needed
+ARCH = kintex7
+PART_ID = xc7k410t/ffg900/-2
+
+# Include makefiles and sources for the DUT and its dependencies
+include $(BASE_DIR)/../lib/sim/Makefile.srcs
+
+DESIGN_SRCS = $(abspath $(SIM_DESIGN_SRCS))
+
+# Include interfaces and classes
+include $(BASE_DIR)/../sim/general/Makefile.srcs
+include $(BASE_DIR)/../sim/axi/Makefile.srcs
+include $(BASE_DIR)/../sim/control/Makefile.srcs
+include $(BASE_DIR)/../sim/rfnoc/Makefile.srcs
+
+INC_SRCS = $(abspath \
+$(SIM_GENERAL_SRCS) \
+$(SIM_AXI_SRCS) \
+$(SIM_CONTROL_SRCS) \
+$(SIM_RFNOC_SRCS) \
+)
+
+# Predeclare RFNOC_OOT_SRCS to make sure it's not recursively expanded
+RFNOC_OOT_SRCS :=
+
+all:
+ $(error "all" or "<empty>" is not a valid target. Run make help for a list of supported targets.)
+
+ipclean:
+ @rm -rf $(abspath ./build-ip)
+
+cleanall: ipclean clean
+
+help::
+ @echo "-----------------"
+ @echo "Supported Targets"
+ @echo "-----------------"
+ @echo "ipclean: Cleanup all IP intermediate files"
+ @echo "clean: Cleanup all simulator intermediate files"
+ @echo "cleanall: Cleanup everything!"
+ @echo "vsim: Simulate with Modelsim"
+ @echo "vlint: Lint simulation files with Modelsim's Verilog compiler"
+ @echo "vclean: Cleanup Modelsim's intermediates files"
+ @echo "xsim: Simulate with Vivado's XSIM simulator"
+ @echo "xclean: Cleanup Vivado's XSIM intermediate files"
+
+.PHONY: ipclean cleanall help
diff --git a/fpga/usrp3/tools/make/viv_simulator.mak b/fpga/usrp3/tools/make/viv_simulator.mak
new file mode 100644
index 000000000..add3e651d
--- /dev/null
+++ b/fpga/usrp3/tools/make/viv_simulator.mak
@@ -0,0 +1,85 @@
+#
+# Copyright 2014-2015 Ettus Research
+#
+
+# -------------------------------------------------------------------
+# Mode switches
+# -------------------------------------------------------------------
+
+# Calling with FAST:=1 will switch to using unifast libs
+ifeq ($(FAST),1)
+SIM_FAST=true
+else
+SIM_FAST=false
+endif
+
+# -------------------------------------------------------------------
+# Path variables
+# -------------------------------------------------------------------
+
+ifdef SIM_COMPLIBDIR
+COMPLIBDIR = $(call RESOLVE_PATH,$(SIM_COMPLIBDIR))
+endif
+
+# Parse part name from ID
+PART_NAME=$(subst /,,$(PART_ID))
+
+# -------------------------------------------------------------------
+# Usage: SETUP_AND_LAUNCH_SIMULATION
+# Args: $1 = Simulator Name
+# -------------------------------------------------------------------
+SETUP_AND_LAUNCH_SIMULATION = \
+ @ \
+ export VIV_SIMULATOR=$1; \
+ export VIV_DESIGN_SRCS=$(call RESOLVE_PATHS,$(DESIGN_SRCS)); \
+ export VIV_SIM_SRCS=$(call RESOLVE_PATHS,$(SIM_SRCS)); \
+ export VIV_INC_SRCS=$(call RESOLVE_PATHS,$(INC_SRCS)); \
+ export VIV_SIM_TOP=$(SIM_TOP); \
+ export VIV_SYNTH_TOP="$(SYNTH_DUT)"; \
+ export VIV_PART_NAME=$(PART_NAME); \
+ export VIV_SIM_RUNTIME=$(SIM_RUNTIME_US); \
+ export VIV_SIM_FAST="$(SIM_FAST)"; \
+ export VIV_SIM_COMPLIBDIR=$(COMPLIBDIR); \
+ export VIV_SIM_USER_DO=$(MODELSIM_USER_DO); \
+ export VIV_MODE=$(VIVADO_MODE); \
+ export VIV_SIM_64BIT=$(MODELSIM_64BIT); \
+ $(TOOLS_DIR)/scripts/launch_vivado.sh -mode $(VIVADO_MODE) -source $(call RESOLVE_PATH,$(TOOLS_DIR)/scripts/viv_sim_project.tcl) -log xsim.log -nojournal
+
+.SECONDEXPANSION:
+
+##xsim: Run the simulation using the Xilinx Vivado Simulator
+xsim: .check_tool $(DESIGN_SRCS) $(SIM_SRCS) $(INC_SRCS)
+ $(call SETUP_AND_LAUNCH_SIMULATION,XSim)
+
+##xclean: Cleanup Xilinx Vivado Simulator intermediate files
+xclean:
+ @rm -f xsim*.log
+ @rm -rf xsim_proj
+ @rm -f xvhdl.log
+ @rm -f xvhdl.pba
+ @rm -f xvlog.log
+ @rm -f xvlog.pb
+ @rm -f vivado_pid*.str
+
+##vsim: Run the simulation using Modelsim
+vsim: .check_tool $(COMPLIBDIR) $(DESIGN_SRCS) $(SIM_SRCS) $(INC_SRCS)
+ $(call SETUP_AND_LAUNCH_SIMULATION,Modelsim)
+
+##vlint: Run verilog compiler to lint files.
+vlint: .check_tool
+ @vlog $(SIM_SRCS) +incdir+$(BASE_DIR)/../sim/axi +incdir+$(BASE_DIR)/../sim/general +incdir+$(BASE_DIR)/../sim/control +incdir+$(BASE_DIR)/../sim/rfnoc +incdir+$(BASE_DIR)/../lib/rfnoc
+
+##vclean: Cleanup Modelsim intermediate files
+vclean:
+ @rm -f modelsim*.log
+ @rm -rf modelsim_proj
+ @rm -f vivado_pid*.str
+ @rm -rf work
+
+# Use clean with :: to support allow "make clean" to work with multiple makefiles
+clean:: xclean vclean
+
+help::
+ @grep -h "##" $(abspath $(lastword $(MAKEFILE_LIST))) | grep -v "\"##\"" | sed -e 's/\\$$//' | sed -e 's/##//'
+
+.PHONY: xsim xsim_hls xclean vsim vlint vclean clean help
diff --git a/fpga/usrp3/tools/scripts/auto_inst_e310.yml b/fpga/usrp3/tools/scripts/auto_inst_e310.yml
new file mode 100644
index 000000000..5842e17c2
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/auto_inst_e310.yml
@@ -0,0 +1,22 @@
+# auto-inst file for E310
+# To regenerate rfnoc_ce_auto_inst_e310.v:
+# ./uhd_image_builder.py -d e310 -t E310_RFNOC_sg3 -y auto_inst_e310.yml -o rfnoc_ce_auto_inst_e310.v
+
+- block: axi_fifo_loopback
+ parameters:
+
+- block: window
+ parameters:
+
+- block: fft
+ parameters:
+
+- block: fosphor
+ parameters:
+ MTU: 12
+
+- block: axi_fifo_loopback
+ parameters:
+
+- block: fir_filter
+ parameters:
diff --git a/fpga/usrp3/tools/scripts/auto_inst_x310.yml b/fpga/usrp3/tools/scripts/auto_inst_x310.yml
new file mode 100644
index 000000000..26548031b
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/auto_inst_x310.yml
@@ -0,0 +1,29 @@
+# auto-inst file for X310
+# To regenerate rfnoc_ce_auto_inst_x310.v:
+# ./uhd_image_builder.py -d x310 -t X310_RFNOC_HG -m 10 --fill-with-fifos -y auto_inst_x310.yml -o rfnoc_ce_auto_inst_x310.v
+
+- block: ddc
+ parameters:
+ NOC_ID: 64'hDDC0_0000_0000_0001
+ NUM_CHAINS: 1
+
+- block: duc
+ parameters:
+
+- block: fft
+ parameters:
+
+- block: window
+ parameters:
+
+- block: fir_filter
+ parameters:
+
+- block: siggen
+ parameters:
+
+- block: keep_one_in_n
+ parameters:
+
+- block: fosphor
+ parameters:
diff --git a/fpga/usrp3/tools/scripts/check_config.json b/fpga/usrp3/tools/scripts/check_config.json
new file mode 100644
index 000000000..f15904043
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/check_config.json
@@ -0,0 +1,16 @@
+{
+ "warning": {
+ "ignore": [
+ "BD 41-1661",
+ "Synth 8-2306",
+ "build-ip",
+ "Synth 8-350",
+ "Synth 8-3331"
+ ]
+ },
+ "critical warning": {
+ "ignore": [
+ "Synth 8-2490"
+ ]
+ }
+}
diff --git a/fpga/usrp3/tools/scripts/git-hash.sh b/fpga/usrp3/tools/scripts/git-hash.sh
new file mode 100755
index 000000000..ed8d7963f
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/git-hash.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+function help {
+ cat <<EOHELP
+Utilities to read/write the git hash for the project.
+
+The script will attempt to get info in the following order:
+- Using a git command
+- Using a hash file (if speficied by user)
+- Otherwise hash = 0xFFFFFFFF
+
+Usage: $0 [--help|-h] [--write] [--hashfile=HASH_FILE]
+
+--hashfile : Location of git hash file [project.githash]
+--write : Write the git hash to HASH_FILE in the Ettus 32-bit register format
+--help : Shows this message
+
+EOHELP
+}
+
+hashfile="project.githash"
+write=0
+for arg in "$@"; do
+ if [[ $arg == "--help" ]]; then
+ help
+ exit 0
+ elif [[ $arg =~ "--hashfile="(.*) ]]; then
+ hashfile=${BASH_REMATCH[1]}
+ elif [[ $arg =~ "--write" ]]; then
+ write=1
+ fi
+done
+
+# Default hash value (failsafe)
+ettus_githash32="ffffffff"
+
+if [[ $write -eq 0 ]]; then
+ git_success=0
+ # First attempt: Use git
+ if [[ $(command -v git) != "" ]]; then
+ # Attempt to get hash from git.
+ # This command will fail if we are not in a git tree
+ short_hash="$(git rev-parse --verify HEAD --short=7 2>/dev/null)" && git_success=1
+ if [[ $git_success -eq 1 ]]; then
+ # Check if tree is clean. If yes, the top 4 bits are 0
+ if (git diff --quiet 2>/dev/null); then
+ ettus_githash32="0$short_hash"
+ else
+ ettus_githash32="f$short_hash"
+ fi
+ fi
+ fi
+ # Second attempt: Read from file if it exists
+ if [[ $git_success -eq 0 ]]; then
+ if [[ -f $hashfile ]]; then
+ ettus_githash32=$(cat $hashfile)
+ fi
+ fi
+ echo ${ettus_githash32}
+ exit 0
+else
+ # Require git
+ command -v git >/dev/null || { echo "ERROR: git not found"; exit 1; }
+ # Get hash from git
+ short_hash="$(git rev-parse --verify HEAD --short=7 2>/dev/null)" || { echo "ERROR: Not a git tree"; exit 2; }
+ # Check if tree is clean. If yes, the top 4 bits are 0
+ if (git diff --quiet 2>/dev/null); then
+ ettus_githash32="0$short_hash"
+ else
+ ettus_githash32="f$short_hash"
+ fi
+ echo $ettus_githash32 > $hashfile
+ echo "INFO: Wrote $ettus_githash32 to $hashfile"
+ exit 0
+fi
diff --git a/fpga/usrp3/tools/scripts/ise_jtag_program.sh b/fpga/usrp3/tools/scripts/ise_jtag_program.sh
new file mode 100755
index 000000000..c6699424d
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/ise_jtag_program.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+echo "loading $1 into FPGA..."
+
+CMD_PATH=/tmp/impact.cmd
+
+echo "generating ${CMD_PATH}..."
+
+echo "setmode -bscan" > ${CMD_PATH}
+echo "setcable -p auto" >> ${CMD_PATH}
+echo "addDevice -p 1 -file $1" >> ${CMD_PATH}
+echo "program -p 1" >> ${CMD_PATH}
+echo "quit" >> ${CMD_PATH}
+
+impact -batch ${CMD_PATH}
+
+echo "done!"
diff --git a/fpga/usrp3/tools/scripts/launch_vivado.py b/fpga/usrp3/tools/scripts/launch_vivado.py
new file mode 100755
index 000000000..01774bef3
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/launch_vivado.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python
+#
+# Notice: Some parts of this file were copied from PyBOMBS, which has a
+# different copyright, and is highlighted appropriately. The following
+# copyright notice pertains to all the parts written specifically for this
+# script.
+#
+# Copyright 2016 Ettus Research
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+Run Vivado builds
+"""
+
+from __future__ import print_function
+import os
+import sys
+import re
+import json
+from datetime import datetime
+import time
+import argparse
+import subprocess
+import threading
+try:
+ from Queue import Queue, Empty
+except ImportError:
+ from queue import Queue, Empty # Py3k
+
+READ_TIMEOUT = 0.1 # s
+
+#############################################################################
+# The following functions were copied with minor modifications from PyBOMBS:
+def get_console_width():
+ '''
+ Returns width of console.
+
+ http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
+ '''
+ env = os.environ
+ def ioctl_GWINSZ(fd):
+ try:
+ import fcntl, termios, struct
+ cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+ except:
+ return
+ return cr
+ cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ cr = ioctl_GWINSZ(fd)
+ os.close(fd)
+ except:
+ pass
+ if not cr:
+ cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
+ return cr[1]
+
+def which(program):
+ """
+ Equivalent to Unix' `which` command.
+ Returns None if the executable `program` can't be found.
+
+ If a full path is given (e.g. /usr/bin/foo), it will return
+ the path if the executable can be found, or None otherwise.
+
+ If no path is given, it will search PATH.
+ """
+ def is_exe(fpath):
+ " Check fpath is an executable "
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+ if os.path.split(program)[0] and is_exe(program):
+ return program
+ else:
+ for path in os.environ.get("PATH", "").split(os.pathsep):
+ exe_file = os.path.join(path, program)
+ if is_exe(exe_file):
+ return exe_file
+ return None
+#
+# End of functions copied from PyBOMBS.
+#############################################################################
+
+def print_timer(time_delta):
+ """docstring for print_timer"""
+ hours, secs = divmod(time_delta.seconds, 3600)
+ mins, secs = divmod(secs, 60)
+ return "[{h:02}:{m:02}:{s:02}]".format(
+ h=hours, m=mins, s=secs,
+ )
+
+def list_search(patterns, string):
+ " Returns True if string matches any element of pattern "
+ for pattern in patterns:
+ if re.search(pattern, string) is not None:
+ return True
+ return False
+
+def parse_args():
+ " Parses args for this script, and for Vivado. "
+ parser = argparse.ArgumentParser(
+ description="Run Vivado and parse output.",
+ )
+ parser.add_argument(
+ '--no-color', action="store_true",
+ help="Don't colorize output.",
+ )
+ parser.add_argument(
+ '--vivado-command', default=None,
+ help="Vivado command.",
+ )
+ parser.add_argument(
+ '--parse-config', default=None,
+ help="Additional parser configurations",
+ )
+ parser.add_argument(
+ '-v', '--verbose', default=False,
+ action='store_true',
+ help="Print Vivado output")
+ parser.add_argument(
+ '--warnings', default=False,
+ action='store_true',
+ help="Print Vivado warnings")
+ our_args, viv_args = parser.parse_known_args()
+ return our_args, " ".join(viv_args)
+
+class VivadoRunner(object):
+ " Vivado Runner "
+ colors = {
+ 'warning': '\033[0;35m',
+ 'critical warning': '\033[33m',
+ 'error': '\033[1;31m',
+ 'fatal': '\033[1;31m',
+ 'task': '\033[32m',
+ 'cmd': '\033[1;34m',
+ 'normal': '\033[0m',
+ }
+ # Black 0;30 Dark Gray 1;30
+ # Blue 0;34 Light Blue 1;34
+ # Green 0;32 Light Green 1;32
+ # Cyan 0;36 Light Cyan 1;36
+ # Red 0;31 Light Red 1;31
+ # Purple 0;35 Light Purple 1;35
+ # Brown 0;33 Yellow 1;33
+ # Light Gray 0;37 White 1;37
+
+ viv_tcl_cmds = {
+ 'synth_design' : 'Synthesis',
+ 'opt_design': 'Logic Optimization',
+ 'place_design': 'Placer',
+ 'route_design': 'Routing',
+ 'phys_opt_design': 'Physical Synthesis',
+ 'report_timing' : 'Timing Reporting',
+ 'report_power': 'Power Reporting',
+ 'report_drc': 'DRC',
+ 'write_bitstream': 'Write Bitstream',
+ }
+
+ def __init__(self, args, viv_args):
+ self.status = ''
+ self.args = args
+ self.current_task = "Initialization"
+ self.current_phase = "Starting"
+ self.command = args.vivado_command + " " + viv_args
+ self.notif_queue = Queue()
+ self.msg_counters = {}
+ self.fatal_error_found = False
+ self.line_types = {
+ 'cmd': {
+ 'regexes': [
+ '^Command: .+',
+ ],
+ 'action': self.show_cmd,
+ 'id': "Command",
+ },
+ 'task': {
+ 'regexes': [
+ '^Starting .* Task',
+ '^.*Translating synthesized netlist.*',
+ '^\[TEST CASE .*',
+ ],
+ 'action': self.update_task,
+ 'id': "Task",
+ },
+ 'phase': {
+ 'regexes': [
+ '^Phase (?P<id>[a-zA-Z0-9/. ]*)$',
+ '^Start (?P<id>[a-zA-Z0-9/. ]*)$',
+ '^(?P<id>TESTBENCH STARTED: [\w_]*)$',
+ ],
+ 'action': self.update_phase,
+ 'id': "Phase",
+ },
+ 'warning': {
+ 'regexes': [
+ '^WARNING'
+ ],
+ 'action': lambda x: self.act_on_build_msg('warning', x),
+ 'id': "Warning",
+ 'fatal': [
+ ]
+ },
+ 'critical warning': {
+ 'regexes': [
+ '^CRITICAL WARNING'
+ ],
+ 'action': lambda x: self.act_on_build_msg('critical warning', x),
+ 'id': "Critical Warning",
+ 'fatal': [
+ ]
+ },
+ 'error': {
+ 'regexes': [
+ '^ERROR',
+ 'no such file or directory',
+ '^Result: FAILED'
+ ],
+ 'action': lambda x: self.act_on_build_msg('error', x),
+ 'id': "Error",
+ 'fatal': [
+ '.', # All errors are fatal by default
+ ]
+ },
+ 'test': {
+ 'regexes': [
+ '^ - T'
+ '^Result: '
+ ],
+ 'action': self.update_testbench,
+ 'id': "Test"
+ }
+ }
+ self.parse_config = None
+ if args.parse_config is not None:
+ try:
+ args.parse_config = os.path.normpath(args.parse_config)
+ parse_config = json.load(open(args.parse_config))
+ self.add_notification(
+ "Using parser configuration from: {pc}".format(pc=args.parse_config),
+ color=self.colors.get('normal')
+ )
+ loadables = ('regexes', 'ignore', 'fatal')
+ for line_type in self.line_types:
+ for loadable in loadables:
+ self.line_types[line_type][loadable] = \
+ self.line_types[line_type].get(loadable, []) + \
+ parse_config.get(line_type, {}).get(loadable, [])
+ except (IOError, ValueError):
+ self.add_notification(
+ "Could not read parser configuration from: {pc}".format(pc=args.parse_config),
+ color=self.colors.get('warning')
+ )
+ self.tty = sys.stdout.isatty()
+ self.timer = datetime.now() # Make sure this is the last line in ctor
+
+ def run(self):
+ """
+ Kick off Vivado build.
+
+ Returns True if it all passed.
+ """
+ def enqueue_output(stdout_data, stdout_queue):
+ " Puts the output from the process into the queue "
+ for line in iter(stdout_data.readline, b''):
+ stdout_queue.put(line)
+ stdout_data.close()
+ def poll_queue(q):
+ " Safe polling from queue "
+ try:
+ return q.get(timeout=READ_TIMEOUT).decode('utf-8')
+ except UnicodeDecodeError:
+ pass
+ except Empty:
+ pass
+ return ""
+ # Start process
+ self.add_notification(
+ "Executing command: {cmd}".format(cmd=self.command), add_time=True, color=self.colors.get('cmd')
+ )
+ proc = subprocess.Popen(
+ self.command,
+ shell=True, # Yes we run this in a shell. Unsafe but helps with Vivado.
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT # Pipe it all out via stdout
+ )
+ # Init thread and queue
+ q_stdout = Queue()
+ t = threading.Thread(target=enqueue_output, args=(proc.stdout, q_stdout))
+ # End the thread when the program terminates
+ t.daemon = True
+ t.start()
+ status_line_t = threading.Thread(target=VivadoRunner.run_loop, args=(self.print_status_line, 0.5 if self.tty else 60*10))
+ status_line_t.daemon = True
+ status_line_t.start()
+ # Run loop
+ while proc.poll() is None or not q_stdout.empty(): # Run while process is alive
+ line_stdout = poll_queue(q_stdout)
+ self.update_output(line_stdout)
+ success = (proc.returncode == 0) and not self.fatal_error_found
+ self.cleanup_output(success)
+ return success
+
+ def update_output(self, lines):
+ " Receives a line from Vivado output and acts upon it. "
+ self.process_line(lines)
+
+ @staticmethod
+ def run_loop(func, delay, *args, **kwargs):
+ while True:
+ func(*args, **kwargs)
+ time.sleep(delay)
+
+ def print_status_line(self):
+ " Prints status on stdout"
+ old_status_line_len = len(self.status)
+ self.update_status_line()
+ sys.stdout.write("\x1b[2K\r") # Scroll cursor back to beginning and clear last line
+ self.flush_notification_queue(old_status_line_len)
+ sys.stdout.write(self.status)
+ sys.stdout.flush()
+ # Make sure we print enough spaces to clear out all of the previous message
+ # if not msgs_printed:
+ # sys.stdout.write(" " * max(0, old_status_line_len - len(self.status)))
+
+ def cleanup_output(self, success):
+ " Run final printery after all is said and done. "
+ # Check message counts are within limits
+ self.update_phase("Finished")
+ self.add_notification(
+ "Process terminated. Status: {status}".format(status='Success' if success else 'Failure'),
+ add_time=True,
+ color=self.colors.get("task" if success else "error")
+ )
+ sys.stdout.write("\n")
+ self.flush_notification_queue(len(self.status))
+ print("")
+ print("========================================================")
+ print("Warnings: ", self.msg_counters.get('warning', 0))
+ print("Critical Warnings: ", self.msg_counters.get('critical warning', 0))
+ print("Errors: ", self.msg_counters.get('error', 0))
+ print("")
+ sys.stdout.flush()
+
+ def process_line(self, lines):
+ " process line "
+ for line in [l.rstrip() for l in lines.split("\n") if len(l.strip())]:
+ line_info, line_data = self.classify_line(line)
+ if line_info is not None:
+ self.line_types[line_info]['action'](line_data)
+ elif self.args.verbose:
+ print(line)
+
+ def classify_line(self, line):
+ """
+ Identify the current line. Return None if the line can't be identified.
+ """
+ for line_type in self.line_types:
+ for regex in self.line_types[line_type]['regexes']:
+ re_obj = re.search(regex, line)
+ if re_obj is not None:
+ return line_type, re_obj.groupdict().get('id', line)
+ return None, None
+
+ def update_status_line(self):
+ " Update self.status. Does not print anything! "
+ status_line = "{timer} Current task: {task} +++ Current Phase: {phase}"
+ self.status = status_line.format(
+ timer=print_timer(datetime.now() - self.timer),
+ task=self.current_task.strip(),
+ phase=self.current_phase.strip(),
+ )
+
+ def add_notification(self, msg, add_time=False, color=None):
+ """
+ Format msg and add it as a notification to the queue.
+ """
+ if add_time:
+ msg = print_timer(datetime.now() - self.timer) + " " + msg
+ if color is not None and not self.args.no_color:
+ msg = color + msg + self.colors.get('normal')
+ self.notif_queue.put(msg)
+
+ def flush_notification_queue(self, min_len):
+ " Print all strings in the notification queue. "
+ msg_printed = False
+ while not self.notif_queue.empty():
+ msg = self.notif_queue.get().strip()
+ print(msg)
+ msg_printed = True
+ return msg_printed
+
+ def act_on_build_msg(self, msg_type, msg):
+ """
+ Act on a warning, error, critical warning, etc.
+ """
+ if list_search(self.line_types[msg_type].get('fatal', []), msg):
+ self.add_notification(msg, color=self.colors.get('fatal'))
+ self.fatal_error_found = True
+ elif not list_search(self.line_types[msg_type].get('ignore', []), msg):
+ self.add_notification(msg, color=self.colors.get(msg_type))
+ self.msg_counters[msg_type] = self.msg_counters.get(msg_type, 0) + 1
+
+ def show_cmd(self, tcl_cmd):
+ " Show the current command "
+ self.update_phase("Finished")
+ tcl_cmd = tcl_cmd.replace("Command:", "").strip()
+ #sys.stdout.write("\n")
+ self.add_notification("Executing Tcl: " + tcl_cmd,
+ add_time=True, color=self.colors.get("cmd"))
+ cmd = tcl_cmd.strip().split()[0];
+ if cmd in self.viv_tcl_cmds:
+ cmd = self.viv_tcl_cmds[cmd]
+ self.update_task("Starting " + cmd + " Command", is_new=False)
+ #self.flush_notification_queue(len(self.status))
+
+ def update_task(self, task, is_new=True):
+ " Update current task "
+ # Special case: Treat "translation" as a phase as well
+ if "Translating synthesized netlist" in task:
+ task = "Translating Synthesized Netlist"
+ filtered_task = task.replace("Starting", "").replace("Task", "").replace("Command", "")
+ if is_new and (filtered_task != self.current_task):
+ self.update_phase("Finished")
+ self.current_task = filtered_task
+ self.current_phase = "Starting"
+ self.add_notification(task, add_time=True, color=self.colors.get("task"))
+ sys.stdout.write("\n")
+ self.print_status_line()
+
+ def update_phase(self, phase):
+ " Update current phase "
+ self.current_phase = phase.strip()
+ self.current_task = self.current_task.replace("Phase", "")
+ sys.stdout.write("\n")
+ self.print_status_line()
+
+ def update_testbench(self, testbench):
+ pass # Do nothing
+
+
+def main():
+ " Go, go, go! "
+ args, viv_args = parse_args()
+ if args.vivado_command is None:
+ if which("vivado"):
+ args.vivado_command = "vivado"
+ elif which("vivado_lab"):
+ args.vivado_command = "vivado_lab"
+ else:
+ print("Cannot find Vivado executable!")
+ return False
+ try:
+ return VivadoRunner(args, viv_args).run()
+ except KeyboardInterrupt:
+ print("")
+ print("")
+ print("Caught Ctrl-C. Exiting.")
+ print("")
+ return False
+
+if __name__ == "__main__":
+ exit(not main())
+
diff --git a/fpga/usrp3/tools/scripts/launch_vivado.sh b/fpga/usrp3/tools/scripts/launch_vivado.sh
new file mode 100755
index 000000000..a0cce6e99
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/launch_vivado.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+
+#------------------------------------------
+# Parse command line args
+#------------------------------------------
+
+function help {
+ cat <<EOHELP
+
+Usage: $0 [--help|-h] [--no-color] [<vivado args>]
+
+--no-color : Don't colorize command output
+--help, -h : Shows this message.
+
+EOHELP
+}
+
+viv_args=""
+colorize=1
+for i in "$@"; do
+ case $i in
+ -h|--help)
+ help
+ exit 0
+ ;;
+ --no-color)
+ colorize=0
+ ;;
+ *)
+ viv_args="$viv_args $i"
+ ;;
+ esac
+done
+
+#------------------------------------------
+# Colorize
+#------------------------------------------
+
+# VIV_COLOR_SCHEME must be defined in the environment setup script
+if [ $colorize -eq 0 ]; then
+ VIV_COLOR_SCHEME=none
+fi
+
+case "$VIV_COLOR_SCHEME" in
+ default)
+ CLR_OFF='tput sgr0'
+ ERR_CLR='tput setaf 1'
+ CRIWARN_CLR='tput setaf 1'
+ WARN_CLR='tput setaf 3'
+ ;;
+ *)
+ CLR_OFF=''
+ ERR_CLR=$CLR_OFF
+ CRIWARN_CLR=$CLR_OFF
+ WARN_CLR=$CLR_OFF
+esac
+
+trim() {
+ local var="$*"
+ var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
+ var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
+ echo -n "$var"
+}
+
+VIVADO_COMMAND="vivado"
+if command -v vivado_lab >/dev/null 2>&1; then
+ VIVADO_COMMAND=vivado_lab
+fi
+
+$VIVADO_COMMAND $viv_args 2>&1 | while IFS= read -r line
+
+do
+ if [[ $line != \#* ]]; then # Ignore script output
+ case $(trim $line) in
+ *FATAL:*|*Fatal:*)
+ eval $ERR_CLR; echo "$line"; eval $CLR_OFF
+ ;;
+ *ERROR:*|*Error:*)
+ eval $ERR_CLR; echo "$line"; eval $CLR_OFF
+ ;;
+ *CRITICAL[[:space:]]WARNING:*|*Crtical[[:space:]]Warning:*)
+ eval $CRIWARN_CLR; echo "$line"; eval $CLR_OFF
+ ;;
+ *WARNING:*|*Warning:*)
+ eval $WARN_CLR; echo "$line"; eval $CLR_OFF
+ ;;
+ *)
+ echo "$line"
+ esac
+ else
+ echo "$line"
+ fi
+done
+exit ${PIPESTATUS[0]}
diff --git a/fpga/usrp3/tools/scripts/setupenv_base.sh b/fpga/usrp3/tools/scripts/setupenv_base.sh
new file mode 100644
index 000000000..0cdf71370
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/setupenv_base.sh
@@ -0,0 +1,472 @@
+#!/bin/bash
+#
+# Copyright 2015 Ettus Research
+#
+
+#----------------------------------------------------------------------------
+# Global defaults
+#----------------------------------------------------------------------------
+export VIV_PLATFORM=$(uname -o)
+
+# Vivado specific
+if [[ $VIV_PLATFORM = "Cygwin" ]]; then
+ if [[ -d "/cygdrive/c/Xilinx/Vivado_Lab" ]]; then
+ VIVADO_BASE_PATH="/cygdrive/c/Xilinx/Vivado_Lab"
+ else
+ VIVADO_BASE_PATH="/cygdrive/c/Xilinx/Vivado"
+ fi
+ MODELSIM_BASE_PATH="/cygdrive/c/mentor/modelsim"
+else
+ if [[ -d "/opt/Xilinx/Vivado_Lab" ]]; then
+ VIVADO_BASE_PATH="/opt/Xilinx/Vivado_Lab"
+ else
+ VIVADO_BASE_PATH="/opt/Xilinx/Vivado"
+ fi
+ MODELSIM_BASE_PATH="/opt/mentor/modelsim"
+fi
+
+function resolve_viv_path {
+ if [[ $VIV_PLATFORM = "Cygwin" ]]; then
+ echo $(cygpath -aw $1)
+ else
+ echo $1
+ fi
+}
+
+#----------------------------------------------------------------------------
+# Validate prerequisites
+#----------------------------------------------------------------------------
+# Ensure required variables
+if [ -z "$REPO_BASE_PATH" ]; then
+ echo "ERROR: Please define the variable REPO_BASE_PATH before calling this script"
+ return
+fi
+if [ -z "$VIVADO_VER" ]; then
+ echo "ERROR: Please define the variable VIVADO_VER before calling this script"
+ return
+fi
+if [ -z "$DISPLAY_NAME" ]; then
+ echo "ERROR: Please define the variable DISPLAY_NAME before calling this script"
+ return
+fi
+if [ ${#PRODUCT_ID_MAP[@]} -eq 0 ]; then
+ echo "ERROR: Please define the variable PRODUCT_ID_MAP before calling this script"
+ return
+fi
+
+# Ensure that the script is sourced
+if [[ $BASH_SOURCE = $0 ]]; then
+ echo "ERROR: This script must be sourced."
+ help
+ exit 1
+fi
+
+#----------------------------------------------------------------------------
+# Help message display function
+#----------------------------------------------------------------------------
+function help {
+ cat <<EOHELP
+
+Usage: source setupenv.sh [--help|-h] [--vivado-path=<PATH>] [--modelsim-path=<PATH>]
+
+--vivado-path : Path to the base install directory for Xilinx Vivado
+ (Default: /opt/Xilinx/Vivado or /opt/Xilinx/Vivado_Lab)
+--modelsim-path : Path to the base install directory for Modelsim (optional simulation tool)
+ (Default: /opt/mentor/modelsim)
+--help -h : Shows this message.
+
+This script sets up the environment required to build FPGA images for the Ettus Research
+${DISPLAY_NAME}. It will also optionally set up the the environment to run the
+Modelsim simulator (although this tool is not required).
+
+Required tools: Xilinx Vivado $VIVADO_VER (Synthesis and Simulation)
+Optional tools: Mentor Graphics Modelsim (Simulation)
+
+EOHELP
+}
+
+#----------------------------------------------------------------------------
+# Setup and parse command line
+#----------------------------------------------------------------------------
+# Detect platform bitness
+if [ "$(uname -m)" = "x86_64" ]; then
+ BITNESS="64"
+else
+ BITNESS="32"
+fi
+
+# Go through cmd line options
+MODELSIM_REQUESTED=0
+MODELSIM_FOUND=0
+PARSE_STATE=""
+for i in "$@"; do
+ case $i in
+ -h|--help)
+ help
+ return 0
+ ;;
+ --vivado-path=*)
+ VIVADO_BASE_PATH="${i#*=}"
+ PARSE_STATE=""
+ ;;
+ --vivado-path)
+ PARSE_STATE="vivado-path"
+ ;;
+ --vivado-version=*)
+ VIVADO_USER_VER="${i#*=}"
+ PARSE_STATE=""
+ ;;
+ --vivado-version)
+ PARSE_STATE="vivado-version"
+ ;;
+ --modelsim-path=*)
+ MODELSIM_BASE_PATH="${i#*=}"
+ MODELSIM_REQUESTED=1
+ PARSE_STATE=""
+ ;;
+ --modelsim-path)
+ PARSE_STATE="modelsim-path"
+ ;;
+ *)
+ case $PARSE_STATE in
+ vivado-path)
+ VIVADO_BASE_PATH="$i"
+ PARSE_STATE=""
+ ;;
+ vivado-version)
+ VIVADO_USER_VER="$i"
+ PARSE_STATE=""
+ ;;
+ modelsim-path)
+ MODELSIM_BASE_PATH="$i"
+ MODELSIM_REQUESTED=1
+ PARSE_STATE=""
+ ;;
+ *)
+ echo "ERROR: Unrecognized option: $i"
+ help
+ return 1
+ ;;
+ esac
+ ;;
+ esac
+done
+
+# Vivado environment setup
+if [[ ${VIVADO_VER^^} = "CMDLINE_ARG" ]]; then
+ if [[ -z $VIVADO_USER_VER ]]; then
+ echo "ERROR: The --vivado-version argument must be specified when the env version is \"CMDLINE_ARG\""
+ return 1
+ else
+ VIVADO_VER=$VIVADO_USER_VER
+ fi
+fi
+export VIVADO_PATH=$VIVADO_BASE_PATH/$VIVADO_VER
+
+echo "Setting up a ${BITNESS}-bit FPGA build environment for the ${DISPLAY_NAME}..."
+#----------------------------------------------------------------------------
+# Prepare Vivado environment
+#----------------------------------------------------------------------------
+if [ -d "$VIVADO_PATH/bin" ]; then
+ echo "- Vivado: Found ($VIVADO_PATH/bin)"
+else
+ echo "- Vivado: Version $VIVADO_VER not found in $VIVADO_BASE_PATH (ERROR.. Builds and simulations will not work)"
+ if [[ -z $VIVADO_USER_VER ]]; then
+ echo " Use the --vivado-path option to override the search path"
+ else
+ echo " Use the --vivado-path option to override the search path or specify the correct --vivado-version"
+ fi
+ unset VIVADO_USER_VER
+ return 1
+fi
+
+$VIVADO_PATH/settings${BITNESS}.sh
+if [[ -e $VIVADO_PATH/.settings${BITNESS}-Vivado_Lab.sh ]]; then
+ $VIVADO_PATH/.settings${BITNESS}-Vivado_Lab.sh
+else
+ $VIVADO_PATH/.settings${BITNESS}-Vivado.sh
+fi
+if [[ -e $(readlink -f $VIVADO_BASE_PATH/..)/DocNav/.settings${BITNESS}-DocNav.sh ]]; then
+ $(readlink -f $VIVADO_BASE_PATH/..)/DocNav/.settings${BITNESS}-DocNav.sh
+fi
+
+if [[ -x `which tput 2>/dev/null` ]] ; then
+ export VIV_COLOR_SCHEME=default
+fi
+VIVADO_EXEC="$REPO_BASE_PATH/tools/scripts/launch_vivado.sh"
+
+#----------------------------------------------------------------------------
+# Prepare Modelsim environment
+#----------------------------------------------------------------------------
+if [[ -d $MODELSIM_BASE_PATH ]]; then
+ if [[ $VIV_PLATFORM = "Cygwin" ]]; then
+ VSIM_PATH=$(find -L $MODELSIM_BASE_PATH -maxdepth 3 -wholename '*win*/vsim.exe' | head -n 1)
+ else
+ VSIM_PATH=$(find -L $MODELSIM_BASE_PATH -maxdepth 3 -wholename '*linux*/vsim' | head -n 1)
+ fi
+fi
+if [[ $VSIM_PATH ]]; then
+ if [[ $($VSIM_PATH -version) =~ .*ModelSim[[:space:]](.+)[[:space:]]vsim.* ]]; then
+ MODELSIM_VER=${BASH_REMATCH[1]}
+ MODELSIM_PATH=$(dirname $VSIM_PATH)
+ fi
+ case $MODELSIM_VER in
+ DE-64|SE-64)
+ export MODELSIM_64BIT=1
+ export SIM_COMPLIBDIR=$VIVADO_PATH/modelsim64
+ ;;
+ DE|SE|PE)
+ export MODELSIM_64BIT=0
+ export SIM_COMPLIBDIR=$VIVADO_PATH/modelsim32
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+function build_simlibs {
+ mkdir -p $SIM_COMPLIBDIR
+ pushd $SIM_COMPLIBDIR
+ CMD_PATH=`mktemp XXXXXXXX.vivado_simgen.tcl`
+ if [[ $MODELSIM_64BIT -eq 1 ]]; then
+ echo "compile_simlib -force -simulator modelsim -family all -language all -library all -directory $SIM_COMPLIBDIR" > $CMD_PATH
+ else
+ echo "compile_simlib -force -simulator modelsim -family all -language all -library all -32 -directory $SIM_COMPLIBDIR" > $CMD_PATH
+ fi
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $CMD_PATH) -nolog -nojournal
+ rm -f $CMD_PATH
+ popd
+}
+
+if [[ $MODELSIM_VER ]]; then
+ echo "- Modelsim: Found ($MODELSIM_VER, $MODELSIM_PATH)"
+ if [[ -e "$SIM_COMPLIBDIR/modelsim.ini" ]]; then
+ echo "- Modelsim Compiled Libs: Found ($SIM_COMPLIBDIR)"
+ else
+ echo "- Modelsim Compiled Libs: Not found! (Run build_simlibs to generate them.)"
+ fi
+else
+ if [[ $MODELSIM_REQUESTED -eq 1 ]]; then
+ echo "- Modelsim: Not found in $MODELSIM_BASE_PATH (WARNING.. Simulations with vsim will not work)"
+ fi
+fi
+
+#----------------------------------------------------------------------------
+# Misc export variables
+#----------------------------------------------------------------------------
+export PATH=$(echo ${PATH} | tr ':' '\n' | awk '$0 !~ "/Vivado/"' | paste -sd:)
+export PATH=${PATH}:$VIVADO_PATH:$VIVADO_PATH/bin:$VIVADO_HLS_PATH:$VIVADO_HLS_PATH/bin:$MODELSIM_PATH
+
+for prod in "${!PRODUCT_ID_MAP[@]}"; do
+ IFS='/' read -r -a prod_tokens <<< "${PRODUCT_ID_MAP[$prod]}"
+ if [ ${#prod_tokens[@]} -eq 6 ]; then
+ export XIL_ARCH_${prod}=${prod_tokens[0]}
+ export XIL_PART_ID_${prod}=${prod_tokens[1]}/${prod_tokens[2]}/${prod_tokens[3]}/${prod_tokens[4]}/${prod_tokens[5]}
+ elif [ ${#prod_tokens[@]} -eq 5 ]; then
+ export XIL_ARCH_${prod}=${prod_tokens[0]}
+ export XIL_PART_ID_${prod}=${prod_tokens[1]}/${prod_tokens[2]}/${prod_tokens[3]}/${prod_tokens[4]}
+ elif [ ${#prod_tokens[@]} -eq 4 ]; then
+ export XIL_ARCH_${prod}=${prod_tokens[0]}
+ export XIL_PART_ID_${prod}=${prod_tokens[1]}/${prod_tokens[2]}/${prod_tokens[3]}
+ else
+ echo "ERROR: Invalid PRODUCT_ID_MAP entry: \"${PRODUCT_ID_MAP[$prod]}\". Must be <arch>/<part>/<pkg>/<sg>[/<tg>[/<rev>]]."
+ return 1
+ fi
+done
+
+#----------------------------------------------------------------------------
+# Define IP management aliases
+#----------------------------------------------------------------------------
+# Vivado specific
+VIV_IP_UTILS=$REPO_BASE_PATH/tools/scripts/viv_ip_utils.tcl
+
+function viv_create_ip {
+ if [[ -z $1 || -z $2 || -z $3 || -z $4 ]]; then
+ echo "Create a new Vivado IP instance and a Makefile for it"
+ echo ""
+ echo "Usage: viv_create_new_ip <IP Name> <IP Location> <IP VLNV> <Product>"
+ echo "- <IP Name>: Name of the IP instance"
+ echo "- <IP Location>: Base location for IP"
+ echo "- <IP VLNV>: The vendor, library, name, and version (VLNV) string for the IP as defined by Xilinx"
+ echo "- <Product>: Product to generate IP for. Choose from: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+
+ ip_name=$1
+ ip_dir=$(readlink -f $2)
+ ip_vlnv=$3
+ IFS='/' read -r -a prod_tokens <<< "${PRODUCT_ID_MAP[$4]}"
+ part_name=${prod_tokens[1]}${prod_tokens[2]}${prod_tokens[3]}
+ if [[ -z $part_name ]]; then
+ echo "ERROR: Invalid product name $4. Supported: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+ if [[ -d $ip_dir/$ip_name ]]; then
+ echo "ERROR: IP $ip_dir/$ip_name already exists. Please choose a different name."
+ return 1
+ fi
+
+ $VIVADO_EXEC -mode gui -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs create $part_name $ip_name $(resolve_viv_path $ip_dir) $ip_vlnv
+ echo "Generating Makefile..."
+ python $REPO_BASE_PATH/tools/scripts/viv_gen_ip_makefile.py --ip_name=$ip_name --dest=$ip_dir/$ip_name
+ echo "Done generating IP in $ip_dir/$ip_name"
+}
+
+function viv_modify_ip {
+ if [[ -z $1 ]]; then
+ echo "Modify an existing Vivado IP instance"
+ echo ""
+ echo "Usage: viv_modify_ip <IP XCI Path>"
+ echo "- <IP XCI Path>: Path to the IP XCI file."
+ return 1
+ fi
+
+ xci_path=$(readlink -f $1)
+ part_name=$(python $REPO_BASE_PATH/tools/scripts/viv_ip_xci_editor.py read_part $xci_path)
+ if [[ -z $part_name ]]; then
+ echo "ERROR: Invalid part name $part_name. XCI parse error."
+ return 1
+ fi
+ if [[ -f $xci_path ]]; then
+ $VIVADO_EXEC -mode gui -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs modify $part_name $(resolve_viv_path $xci_path)
+ else
+ echo "ERROR: IP $xci_path not found."
+ return 1
+ fi
+}
+
+function viv_modify_bd {
+ if [[ -z $1 || -z $2 ]]; then
+ echo "Modify an existing Vivado Block Design instance"
+ echo ""
+ echo "Usage: viv_modify_bd <BD Path> <Product>"
+ echo "- <BD Path>: Path to the BD file."
+ echo "- <Product>: Product to generate IP for. Choose from: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+
+ bd_path=$(readlink -f $1)
+ IFS='/' read -r -a prod_tokens <<< "${PRODUCT_ID_MAP[$2]}"
+ part_name=${prod_tokens[1]}${prod_tokens[2]}${prod_tokens[3]}
+ if [[ -f $bd_path ]]; then
+ $VIVADO_EXEC -mode gui -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs modify $part_name $(resolve_viv_path $bd_path)
+ else
+ echo "ERROR: IP $bd_path not found."
+ return 1
+ fi
+}
+
+function viv_modify_tcl_bd {
+ if [[ -z $1 || -z $2 ]]; then
+ echo "Modify an existing Vivado TCL-based Block Design instance."
+ echo ""
+ echo "Usage: viv_modify_bd_tcl <TCL Path> <Product>"
+ echo "- <TCL Path>: Path to the TCL source file."
+ echo "- <Product> : Product to generate IP for. Choose from: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+
+ src_path=$(readlink -f $1)
+ IFS='/' read -r -a prod_tokens <<< "${PRODUCT_ID_MAP[$2]}"
+ part_name=${prod_tokens[1]}${prod_tokens[2]}${prod_tokens[3]}
+ bd_ip_repo="${src_path%/top*}/lib/vivado_ipi"
+ if [[ -f $src_path ]]; then
+ $VIVADO_EXEC -mode gui -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs modify_bdtcl $part_name $(resolve_viv_path $src_path) $(resolve_viv_path $bd_ip_repo)
+ echo "INFO: Vivado BD was closed, writing source TCL..."
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs write_bdtcl $part_name $(resolve_viv_path $src_path)
+ else
+ echo "ERROR: IP $src_path not found."
+ return 1
+ fi
+}
+
+function viv_ls_ip {
+ if [[ -z $1 ]]; then
+ echo "List the items in the Vivado IP catalog"
+ echo ""
+ echo "Usage: viv_ls_ip <Product>"
+ echo "- <Product>: Product to generate IP for. Choose from: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+
+ IFS='/' read -r -a prod_tokens <<< "${PRODUCT_ID_MAP[$1]}"
+ part_name=${prod_tokens[1]}${prod_tokens[2]}${prod_tokens[3]}
+ if [[ -z $part_name ]]; then
+ echo "ERROR: Invalid product name $1. Supported: ${!PRODUCT_ID_MAP[@]}"
+ return 1
+ fi
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs list $part_name | grep -v -E '(^$|^#|\*\*)'
+ test ${PIPESTATUS[0]} -eq 0
+}
+
+function viv_upgrade_ip {
+ if [[ -z $1 ]]; then
+ echo "Upgrade one or more Xilinx IP targets"
+ echo ""
+ echo "Usage: viv_upgrade_ip <IP Directory> [--recursive]"
+ echo "- <IP Directory>: Path to the IP XCI file."
+ return 1
+ fi
+ max_depth="-maxdepth 1"
+ if [[ $2 == "--recursive" ]]; then
+ max_depth=""
+ fi
+ search_path=$(readlink -f $1)
+ IFS='' read -r -a xci_files <<< $(find $search_path $max_depth | grep .xci | xargs)
+ for xci_path in $xci_files; do
+ if [[ -f $xci_path ]]; then
+ echo "Upgrading $xci_path..."
+ part_name=$(python $REPO_BASE_PATH/tools/scripts/viv_ip_xci_editor.py read_part $xci_path)
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $VIV_IP_UTILS) -nolog -nojournal -tclargs upgrade $part_name $(resolve_viv_path $xci_path) | grep -v -E '(^$|^#|\*\*)'
+ test ${PIPESTATUS[0]} -eq 0
+ else
+ echo "ERROR: IP $xci_path not found."
+ return 1
+ fi
+ done
+}
+
+#----------------------------------------------------------------------------
+# Define hardware programming aliases
+#----------------------------------------------------------------------------
+VIV_HW_UTILS=$REPO_BASE_PATH/tools/scripts/viv_hardware_utils.tcl
+
+function viv_hw_console {
+ vivado -mode tcl -source $(resolve_viv_path $VIV_HW_UTILS) -nolog -nojournal
+}
+
+function viv_jtag_list {
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $VIV_HW_UTILS) -nolog -nojournal -tclargs list | grep -v -E '(^$|^#|\*\*)'
+ test ${PIPESTATUS[0]} -eq 0
+}
+
+function viv_jtag_program {
+ if [[ -z $1 ]]; then
+ echo "Downloads a bitfile to an FPGA device using Vivado"
+ echo ""
+ echo "Usage: viv_jtag_program <Bitfile Path> [<FTDI Serial> = .] [<Device Address> = 0:0]"
+ echo "- <Bitfile Path>: Path to a .bit FPGA configuration file"
+ echo "- <FTDI Serial>: Regular expression for filtering out devices by"
+ echo " their FTDI serial"
+ echo "- <Device Address>: Address to the device in the form <Target>:<Device>"
+ echo " Run viv_jtag_list to get a list of connected devices"
+ return 1
+ fi
+ $VIVADO_EXEC -mode batch -source $(resolve_viv_path $VIV_HW_UTILS) -nolog -nojournal -tclargs program $* | grep -v -E '(^$|^#|\*\*)'
+ test ${PIPESTATUS[0]} -eq 0
+}
+
+function probe_bitfile {
+ if [[ -z $1 ]]; then
+ echo "Probe a Xilinx bit file and report header information"
+ echo ""
+ echo "Usage: probe_bitfile <Bitfile Path>"
+ echo "- <Bitfile Path>: Path to a .bit FPGA configuration file"
+ return 1
+ fi
+ python $REPO_BASE_PATH/tools/scripts/xil_bitfile_parser.py --info $1
+}
+
+echo
+echo "Environment successfully initialized."
+return 0
diff --git a/fpga/usrp3/tools/scripts/shared-ip-loc-manage.sh b/fpga/usrp3/tools/scripts/shared-ip-loc-manage.sh
new file mode 100755
index 000000000..447f087e0
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/shared-ip-loc-manage.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+function help {
+ cat <<EOHELP
+
+Usage: shared-ip-loc-manage.sh [--help|-h] [--timeout=<TIMEOUT>] --path=<PATH> <ACTION>
+
+--path : Path to IP location
+--timeout : Timeout in seconds for the operation to complete [Optional]
+ (Default: 1200)
+--force : Force operation
+--help -h : Shows this message.
+
+ACTION : Choose from reserve, release
+
+EOHELP
+}
+
+function wait_for_lock {
+ if [ -d "$ip_dir" ]; then
+ remaining=$(($timeout))
+ trap 'echo"";echo "BUILDER: Waiting for concurrent IP build to finish... (skipped)";break;' SIGINT; \
+ while [ -f "$ip_dir/.build_lock" ]; do
+ if [ $remaining -gt 0 ]; then
+ echo -ne "Waiting for concurrent IP build to finish... (${remaining}s [Ctrl-C to proceed])\033[0K\r"
+ sleep 1
+ : $((remaining--))
+ else
+ break
+ fi
+ done
+ trap - SIGINT; \
+ if [ $remaining -eq 0 ]; then
+ echo "BUILDER: Waiting for concurrent IP build to finish... (timeout)"
+ fi
+ fi
+}
+
+function lock {
+ if [ -d "$ip_dir" ]; then
+ touch $ip_dir/.build_lock
+ fi
+}
+
+function unlock {
+ rm -f $ip_dir/.build_lock
+}
+
+function reserve {
+ if [ -d "$ip_dir" ]; then
+ wait_for_lock
+ if [ $remaining -eq 0 ]; then
+ echo "Force creating new IP location: $ip_dir"
+ unlock
+ rm -rf $ip_dir
+ mkdir -p $ip_dir
+ fi
+ fi
+ if [ ! -d "$ip_dir" ]; then
+ mkdir -p $ip_dir
+ fi
+ echo "BUILDER: Reserving IP location: $ip_dir"
+ lock
+}
+
+function release {
+ echo "BUILDER: Releasing IP location: $ip_dir"
+ unlock
+}
+
+# Parse options
+ip_dir=""
+action=""
+timeout=1800
+remaining=0
+force=0
+
+for arg in "$@"; do
+ if [[ $arg == "--help" ]]; then
+ help
+ exit 0
+ elif [[ $arg == "--force" ]]; then
+ force=1
+ elif [[ $arg =~ "--path="(.+) ]]; then
+ ip_dir=`readlink -m ${BASH_REMATCH[1]}`
+ elif [[ $arg =~ "--timeout="(.+) ]]; then
+ timeout=${BASH_REMATCH[1]}
+ else
+ action=$arg
+ break
+ fi
+done
+
+# Validate inputs
+if [ -z $ip_dir ]; then
+ echo "ERROR: Please specify a valid path using the --path option."
+ exit 1
+fi
+
+case $action in
+ reserve)
+ if [ $force -eq 1 ]; then
+ echo "Force creating new IP location: $ip_dir"
+ rm -rf $ip_dir
+ mkdir -p $ip_dir
+ lock
+ else
+ reserve
+ fi
+ ;;
+ release)
+ release
+ ;;
+ *)
+ echo "ERROR: Please specify a valid action (reserve, release)"
+ exit 1
+ ;;
+esac
+
diff --git a/fpga/usrp3/tools/scripts/uhd_image_builder.py b/fpga/usrp3/tools/scripts/uhd_image_builder.py
new file mode 100755
index 000000000..7398b6e13
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/uhd_image_builder.py
@@ -0,0 +1,537 @@
+#!/usr/bin/env python
+"""
+Copyright 2016-2017 Ettus Research
+Copyright 2019 Ettus Research, A National Instrument Brand
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from __future__ import print_function
+import argparse
+import os
+import re
+import glob
+
+HEADER_TMPL = """/////////////////////////////////////////////////////////
+// Auto-generated by uhd_image_builder.py! Any changes
+// in this file will be overwritten the next time
+// this script is run.
+/////////////////////////////////////////////////////////
+localparam NUM_CE = {num_ce};
+wire [NUM_CE*64-1:0] ce_flat_o_tdata, ce_flat_i_tdata;
+wire [63:0] ce_o_tdata[0:NUM_CE-1], ce_i_tdata[0:NUM_CE-1];
+wire [NUM_CE-1:0] ce_o_tlast, ce_o_tvalid, ce_o_tready, ce_i_tlast, ce_i_tvalid, ce_i_tready;
+wire [63:0] ce_debug[0:NUM_CE-1];
+// Flattern CE tdata arrays
+genvar k;
+generate
+ for (k = 0; k < NUM_CE; k = k + 1) begin
+ assign ce_o_tdata[k] = ce_flat_o_tdata[k*64+63:k*64];
+ assign ce_flat_i_tdata[k*64+63:k*64] = ce_i_tdata[k];
+ end
+endgenerate
+wire ce_clk = radio_clk;
+wire ce_rst = radio_rst;
+"""
+
+BLOCK_TMPL = """
+noc_block_{blockname} {blockparameters} {instname} (
+ .bus_clk(bus_clk), .bus_rst(bus_rst),
+ .ce_clk({clock}_clk), .ce_rst({clock}_rst),
+ .i_tdata(ce_o_tdata[{n}]), .i_tlast(ce_o_tlast[{n}]), .i_tvalid(ce_o_tvalid[{n}]), .i_tready(ce_o_tready[{n}]),
+ .o_tdata(ce_i_tdata[{n}]), .o_tlast(ce_i_tlast[{n}]), .o_tvalid(ce_i_tvalid[{n}]), .o_tready(ce_i_tready[{n}]),
+ .debug(ce_debug[{n}]){extraports}
+);
+"""
+
+FILL_FIFO_TMPL = """
+// Fill remaining crossbar ports with loopback FIFOs
+genvar n;
+generate
+ for (n = {fifo_start}; n < NUM_CE; n = n + 1) begin
+ noc_block_axi_fifo_loopback inst_noc_block_axi_fifo_loopback (
+ .bus_clk(bus_clk), .bus_rst(bus_rst),
+ .ce_clk(ce_clk), .ce_rst(ce_rst),
+ .i_tdata(ce_o_tdata[n]), .i_tlast(ce_o_tlast[n]), .i_tvalid(ce_o_tvalid[n]), .i_tready(ce_o_tready[n]),
+ .o_tdata(ce_i_tdata[n]), .o_tlast(ce_i_tlast[n]), .o_tvalid(ce_i_tvalid[n]), .o_tready(ce_i_tready[n]),
+ .debug(ce_debug[n])
+ );
+ end
+endgenerate
+"""
+
+# List of blocks that are part of our library but that do not take part
+# in the process this tool provides
+BLACKLIST = {'radio_core', 'axi_dma_fifo'}
+
+OOT_DIR_TMPL = """\nOOT_DIR = {oot_dir}\n"""
+OOT_INC_TMPL = """include $(OOT_DIR)/Makefile.inc\n"""
+OOT_SRCS_TMPL = """RFNOC_OOT_SRCS += {sources}\n"""
+OOT_SRCS_FILE_HDR = """##################################################
+# Include OOT makefiles
+##################################################\n"""
+
+
+def setup_parser():
+ """
+ Create argument parser
+ """
+ parser = argparse.ArgumentParser(
+ description="Generate the NoC block instantiation file",
+ )
+ parser.add_argument(
+ "-I", "--include-dir",
+ help="Path directory of the RFNoC Out-of-Tree module",
+ nargs='+',
+ default=None)
+ parser.add_argument(
+ "-y", "--yml",
+ help="YML file definition of onboard blocks\
+ (overrides the 'block' positional arguments)",
+ default=None)
+ parser.add_argument(
+ "-m", "--max-num-blocks", type=int,
+ help="Maximum number of blocks (Max. Allowed for x310|x300: 10,\
+ for e300: 14, for e320: 12, for n300: 11, \
+ for n310/n320: 10)",
+ default=10)
+ parser.add_argument(
+ "--fill-with-fifos",
+ help="If the number of blocks provided was smaller than the max\
+ number, fill the rest with FIFOs",
+ action="store_true")
+ parser.add_argument(
+ "-o", "--outfile",
+ help="Output /path/filename - By running this directive,\
+ you won't build your IP",
+ default=None)
+ parser.add_argument(
+ "--auto-inst-src",
+ help="Advanced Usage: The Verilog source for the auto_inst file that "
+ "will be used instead of generating one automatically",
+ default=None)
+ parser.add_argument(
+ "-d", "--device",
+ help="Device to be programmed [x300, x310, e310, e320, n300, n310, n320]",
+ default="x310")
+ parser.add_argument(
+ "-t", "--target",
+ help="Build target - image type [X3X0_RFNOC_HG, X3X0_RFNOC_XG,\
+ E310_RFNOC_sg3, E320_RFNOC_1G, N310_RFNOC_HG, ...]",
+ default=None)
+ parser.add_argument(
+ "-g", "--GUI",
+ help="Open Vivado GUI during the FPGA building process",
+ action="store_true")
+ parser.add_argument(
+ "-c", "--clean-all",
+ help="Cleans the IP before a new build",
+ action="store_true")
+ parser.add_argument(
+ "blocks",
+ help="List block names to instantiate.",
+ default="",
+ nargs='*',
+ )
+ return parser
+
+def get_default_parameters():
+ default = {"clock" : "ce",
+ "parameters" : None,
+ "extraports" : None}
+ return default
+
+
+def parse_yml(ymlfile):
+ """
+ Parse an input yaml file with a list of blocks and parameters!
+ """
+ try:
+ import yaml
+ except ImportError:
+ print('[ERROR] Could not import yaml module')
+ exit(1)
+
+ with open(ymlfile, 'r') as input_file:
+ data = yaml.load(input_file)
+ blocks = []
+ params = []
+ for val in data:
+ print(val['block'])
+ blocks.append(val['block'])
+ blockparams = get_default_parameters()
+ if "clock" in val:
+ blockparams["clock"] = val["clock"]
+ if "parameters" in val:
+ blockparams["parameters"] = val["parameters"]
+ if "extraports" in val:
+ blockparams["extraports"] = val["extraports"]
+ print(blockparams)
+ params.append(blockparams)
+ print(data)
+ return blocks, params
+
+def format_param_str(parameters):
+ """
+ Take a single block parameter dictionary and format as a verilog string
+ """
+ paramstr = ""
+ if parameters:
+ paramstrlist = []
+ for key in parameters.keys():
+ value = ""
+ if parameters[key] is not None:
+ value = parameters[key]
+ currstr = ".%s(%s)" % (str.upper(key), value)
+ paramstrlist.append(currstr)
+ paramstr = "#(%s)" % (", ".join(paramstrlist))
+ return paramstr
+
+def format_port_str(extraports):
+ """
+ Take a single dictionary and format as a verilog string representing extra block ports
+ """
+ portstr = ""
+ if extraports:
+ portstrlist = []
+ for key in extraports.keys():
+ value = ""
+ if extraports[key] is not None:
+ value = extraports[key]
+ currstr = ".%s(%s)" % (key, value)
+ portstrlist.append(currstr)
+ portstr = ",\n %s" % (",\n ".join(portstrlist))
+ return portstr
+
+def create_auto_inst(blocks, blockparams, max_num_blocks, fill_with_fifos=False):
+ """
+ Returns the Verilog source for the auto_inst file.
+ """
+ if len(blocks) == 0:
+ print("[GEN_RFNOC_INST ERROR] No blocks specified!")
+ exit(1)
+ if len(blocks) > max_num_blocks:
+ print("[GEN_RFNOC_INST ERROR] Trying to connect {} blocks, max is {}"
+ .format(len(blocks), max_num_blocks))
+ exit(1)
+ num_ce = max_num_blocks
+ if not fill_with_fifos:
+ num_ce = len(blocks)
+ vfile = HEADER_TMPL.format(num_ce=num_ce)
+ blocks_in_blacklist = [block for block in blocks if block in BLACKLIST]
+ if len(blocks_in_blacklist):
+ print("[RFNoC ERROR]: The following blocks require special treatment and"\
+ " can't be instantiated with this tool: ")
+ for element in blocks_in_blacklist:
+ print(" * ", element)
+ print("Remove them from the command and run the uhd_image_builder.py again.")
+ exit(0)
+ print("--Using the following blocks to generate image:")
+ block_count = {k: 0 for k in set(blocks)}
+ for i, (block, params) in enumerate(zip(blocks, blockparams)):
+ block_count[block] += 1
+ instname = "inst_{}{}".format(block, "" \
+ if block_count[block] == 1 else block_count[block])
+ print(" * {}".format(block))
+ vfile += BLOCK_TMPL.format(blockname=block,
+ blockparameters=format_param_str(params["parameters"]),
+ instname=instname,
+ n=i,
+ clock=params["clock"],
+ extraports=format_port_str(params["extraports"]))
+ if fill_with_fifos:
+ vfile += FILL_FIFO_TMPL.format(fifo_start=len(blocks))
+ return vfile
+
+def file_generator(args, vfile):
+ """
+ Takes the target device as an argument and, if no '-o' directive is given,
+ replaces the auto_ce file in the corresponding top folder. With the
+ presence of -o, it just generates a version of the verilog file which
+ is not intended to be build
+ """
+ fpga_utils_path = get_scriptpath()
+ print("Adding CE instantiation file for '%s'" % args.target)
+ path_to_file = fpga_utils_path +'/../../top/' + device_dict(args.device.lower()) +\
+ '/rfnoc_ce_auto_inst_' + args.device.lower() + '.v'
+ if args.outfile is None:
+ open(path_to_file, 'w').write(vfile)
+ else:
+ open(args.outfile, 'w').write(vfile)
+
+def append_re_line_sequence(filename, linepattern, newline):
+ """ Detects the re 'linepattern' in the file. After its last occurrence,
+ paste 'newline'. If the pattern does not exist, append the new line
+ to the file. Then, write. If the newline already exists, leaves the file
+ unchanged"""
+ oldfile = open(filename, 'r').read()
+ lines = re.findall(newline, oldfile, flags=re.MULTILINE)
+ if len(lines) != 0:
+ pass
+ else:
+ pattern_lines = re.findall(linepattern, oldfile, flags=re.MULTILINE)
+ if len(pattern_lines) == 0:
+ open(filename, 'a').write(newline)
+ return
+ last_line = pattern_lines[-1]
+ newfile = oldfile.replace(last_line, last_line + newline + '\n')
+ open(filename, 'w').write(newfile)
+
+def create_oot_include(device, include_dirs):
+ """
+ Create the include file for OOT RFNoC sources
+ """
+ oot_dir_list = []
+ target_dir = device_dict(device.lower())
+ dest_srcs_file = os.path.join(get_scriptpath(), '..', '..', 'top',\
+ target_dir, 'Makefile.OOT.inc')
+ incfile = open(dest_srcs_file, 'w')
+ incfile.write(OOT_SRCS_FILE_HDR)
+ if include_dirs is not None:
+ for dirs in include_dirs:
+ currpath = os.path.abspath(str(dirs))
+ if os.path.isdir(currpath) & (os.path.basename(currpath) == "rfnoc"):
+ # Case 1: Pointed directly to rfnoc directory
+ oot_path = currpath
+ elif os.path.isdir(os.path.join(currpath, 'rfnoc')):
+ # Case 2: Pointed to top level rfnoc module directory
+ oot_path = os.path.join(currpath, 'rfnoc')
+ elif os.path.isfile(os.path.join(currpath, 'Makefile.inc')):
+ # Case 3: Pointed to a random directory with a Makefile.inc
+ oot_path = currpath
+ else:
+ print('No RFNoC module found at ' + os.path.abspath(currpath))
+ continue
+ if oot_path not in oot_dir_list:
+ oot_dir_list.append(oot_path)
+ named_path = os.path.join('$(BASE_DIR)', get_relative_path(get_basedir(), oot_path))
+ incfile.write(OOT_DIR_TMPL.format(oot_dir=named_path))
+ if os.path.isfile(os.path.join(oot_path, 'Makefile.inc')):
+ # Check for Makefile.inc
+ incfile.write(OOT_INC_TMPL)
+ elif os.path.isfile(os.path.join(oot_path, 'rfnoc', 'Makefile.inc')):
+ # Check for Makefile.inc
+ incfile.write(OOT_INC_TMPL)
+ elif os.path.isfile(os.path.join(oot_path, 'rfnoc', 'fpga-src', 'Makefile.srcs')):
+ # Legacy: Check for fpga-src/Makefile.srcs
+ # Read, then append to file
+ curr_srcs = open(os.path.join(oot_path, 'rfnoc', 'fpga-src', 'Makefile.srcs'), 'r').read()
+ curr_srcs = curr_srcs.replace('SOURCES_PATH', os.path.join(oot_path, 'rfnoc', 'fpga-src', ''))
+ incfile.write(OOT_SRCS_TMPL.format(sources=curr_srcs))
+ else:
+ print('No valid makefile found at ' + os.path.abspath(currpath))
+ continue
+ incfile.close()
+
+def append_item_into_file(device, include_dir):
+ """
+ Basically the same as append_re_line_sequence function, but it does not
+ append anything when the input is not found
+ ---
+ Detects the re 'linepattern' in the file. After its last occurrence,
+ pastes the input string. If pattern doesn't exist
+ notifies and leaves the file unchanged
+ """
+ def get_oot_srcs_list(include_dir):
+ """
+ Pull the OOT sources out of the Makefile.srcs
+ """
+ oot_srcs_file = os.path.join(include_dir, 'Makefile.srcs')
+ oot_srcs_list = readfile(oot_srcs_file)
+ return [w.replace('SOURCES_PATH', include_dir) for w in oot_srcs_list]
+ # Here we go
+ target_dir = device_dict(device.lower())
+ if include_dir is not None:
+ for directory in include_dir:
+ dirs = os.path.join(directory, '')
+ checkdir_v(dirs)
+ dest_srcs_file = os.path.join(get_scriptpath(), '..', '..', 'top',\
+ target_dir, 'Makefile.srcs')
+ oot_srcs_list = get_oot_srcs_list(dirs)
+ dest_srcs_list = readfile(dest_srcs_file)
+ prefixpattern = re.escape('$(addprefix ' + dirs + ', \\\n')
+ linepattern = re.escape('RFNOC_OOT_SRCS = \\\n')
+ oldfile = open(dest_srcs_file, 'r').read()
+ prefixlines = re.findall(prefixpattern, oldfile, flags=re.MULTILINE)
+ if len(prefixlines) == 0:
+ lines = re.findall(linepattern, oldfile, flags=re.MULTILINE)
+ if len(lines) == 0:
+ print("Pattern {} not found. Could not write `{}'"
+ .format(linepattern, oldfile))
+ return
+ else:
+ last_line = lines[-1]
+ srcs = "".join(oot_srcs_list)
+ else:
+ last_line = prefixlines[-1]
+ srcs = "".join([
+ item
+ for item in oot_srcs_list
+ if item not in dest_srcs_list
+ ])
+ newfile = oldfile.replace(last_line, last_line + srcs)
+ open(dest_srcs_file, 'w').write(newfile)
+
+def compare(file1, file2):
+ """
+ compares two files line by line, and returns the lines of first file that
+ were not found on the second. The returned is a tuple item that can be
+ accessed in the form of a list as tuple[0], where each line takes a
+ position on the list or in a string as tuple [1].
+ """
+ notinside = []
+ with open(file1, 'r') as arg1:
+ with open(file2, 'r') as arg2:
+ text1 = arg1.readlines()
+ text2 = arg2.readlines()
+ for item in text1:
+ if item not in text2:
+ notinside.append(item)
+ return notinside
+
+def readfile(files):
+ """
+ compares two files line by line, and returns the lines of first file that
+ were not found on the second. The returned is a tuple item that can be
+ accessed in the form of a list as tuple[0], where each line takes a
+ position on the list or in a string as tuple [1].
+ """
+ contents = []
+ with open(files, 'r') as arg:
+ text = arg.readlines()
+ for item in text:
+ contents.append(item)
+ return contents
+
+def build(args):
+ " build "
+ cwd = get_scriptpath()
+ target_dir = device_dict(args.device.lower())
+ build_dir = os.path.join(cwd, '..', '..', 'top', target_dir)
+ if os.path.isdir(build_dir):
+ print("changing temporarily working directory to {0}".\
+ format(build_dir))
+ os.chdir(build_dir)
+ make_cmd = ". ./setupenv.sh "
+ if args.clean_all:
+ make_cmd = make_cmd + "&& make cleanall "
+ make_cmd = make_cmd + "&& make " + dtarget(args)
+ if args.GUI:
+ make_cmd = make_cmd + " GUI=1"
+ # Wrap it into a bash call:
+ make_cmd = '/bin/bash -c "{0}"'.format(make_cmd)
+ ret_val = os.system(make_cmd)
+ os.chdir(cwd)
+ return ret_val
+
+def device_dict(args):
+ """
+ helps selecting the device building folder based on the targeted device
+ """
+ build_dir = {
+ 'x300':'x300',
+ 'x310':'x300',
+ 'e300':'e31x',
+ 'e310':'e31x',
+ 'e320':'e320',
+ 'n300':'n3xx',
+ 'n310':'n3xx',
+ 'n320':'n3xx'
+ }
+ return build_dir[args]
+
+def dtarget(args):
+ """
+ If no target specified, selects the default building target based on the
+ targeted device
+ """
+ if args.target is None:
+ default_trgt = {
+ 'x300':'X300_RFNOC_HG',
+ 'x310':'X310_RFNOC_HG',
+ 'e310':'E310_SG3_RFNOC',
+ 'e320':'E320_RFNOC_1G',
+ 'n300':'N300_RFNOC_HG',
+ 'n310':'N310_RFNOC_HG',
+ 'n320':'N320_RFNOC_XG',
+ }
+ return default_trgt[args.device.lower()]
+ else:
+ return args.target
+
+def checkdir_v(include_dir):
+ """
+ Checks the existance of verilog files in the given include dir
+ """
+ nfiles = glob.glob(os.path.join(include_dir,'')+'*.v')
+ if len(nfiles) == 0:
+ print('[ERROR] No verilog files found in the given directory')
+ exit(0)
+ else:
+ print('Verilog sources found!')
+ return
+
+def get_scriptpath():
+ """
+ returns the absolute path where a script is located
+ """
+ return os.path.dirname(os.path.realpath(__file__))
+
+def get_basedir():
+ """
+ returns the base directory (BASE_DIR) used in rfnoc build process
+ """
+ return os.path.abspath(os.path.join(get_scriptpath(), '..', '..', 'top'))
+
+def get_relative_path(base, target):
+ """
+ Find the relative path (including going "up" directories) from base to target
+ """
+ basedir = os.path.abspath(base)
+ prefix = os.path.commonprefix([basedir, os.path.abspath(target)])
+ path_tail = os.path.relpath(os.path.abspath(target), prefix)
+ total_path = path_tail
+ if prefix != "":
+ while basedir != os.path.abspath(prefix):
+ basedir = os.path.dirname(basedir)
+ total_path = os.path.join('..', total_path)
+ return total_path
+ else:
+ print("Could not determine relative path")
+ return path_tail
+
+def main():
+ " Go, go, go! "
+ args = setup_parser().parse_args()
+ if args.yml:
+ print("Using yml file. Ignoring command line blocks arguments")
+ blocks, params = parse_yml(args.yml)
+ else:
+ blocks = args.blocks
+ params = [get_default_parameters()]*len(blocks)
+ if args.auto_inst_src is None:
+ vfile = create_auto_inst(blocks, params, args.max_num_blocks, args.fill_with_fifos)
+ else:
+ vfile = open(args.auto_inst_src, 'r').read()
+ file_generator(args, vfile)
+ create_oot_include(args.device, args.include_dir)
+ if args.outfile is None:
+ return build(args)
+ else:
+ print("Instantiation file generated at {}".\
+ format(args.outfile))
+ return 0
+
+if __name__ == "__main__":
+ exit(main())
diff --git a/fpga/usrp3/tools/scripts/uhd_image_builder_gui.py b/fpga/usrp3/tools/scripts/uhd_image_builder_gui.py
new file mode 100755
index 000000000..4d14cd256
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/uhd_image_builder_gui.py
@@ -0,0 +1,656 @@
+#!/usr/bin/env python
+"""
+Copyright 2016-2018 Ettus Research
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from __future__ import print_function
+import os
+import re
+import sys
+import signal
+import threading
+import xml.etree.ElementTree as ET
+from PyQt5 import (QtGui,
+ QtCore,
+ QtWidgets)
+from PyQt5.QtWidgets import QGridLayout
+from PyQt5.QtCore import (pyqtSlot,
+ Qt,
+ QModelIndex)
+import uhd_image_builder
+
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+class MainWindow(QtWidgets.QWidget):
+ """
+ UHD_IMAGE_BUILDER
+ """
+ # pylint: disable=too-many-instance-attributes
+
+ def __init__(self):
+ super(MainWindow, self).__init__()
+ ##################################################
+ # Initial Values
+ ##################################################
+ self.target = 'x300'
+ self.device = 'x310'
+ self.build_target = 'X310_RFNOC_HG'
+ self.oot_dirs = []
+ self.max_allowed_blocks = 10
+ self.cmd_dict = {"target": '-t {}'.format(self.build_target),
+ "device": '-d {}'.format(self.device),
+ "include": '',
+ "fill_fifos": '',
+ "viv_gui": '',
+ "cleanall": '',
+ "show_file": ''}
+ self.cmd_name = ['./uhd_image_builder.py', ]
+ self.cmd_prefix = list(self.cmd_name)
+ self.instantiation_file = os.path.join(uhd_image_builder.get_scriptpath(),
+ '..', '..', 'top', self.target,
+ 'rfnoc_ce_auto_inst_' + self.device.lower() +
+ '.v')
+
+ # List of blocks that are part of our library but that do not take place
+ # on the process this tool provides
+ self.blacklist = ['noc_block_radio_core', 'noc_block_axi_dma_fifo', 'noc_block_pfb']
+ self.lock = threading.Lock()
+ self.init_gui()
+
+ def init_gui(self):
+ """
+ Initializes GUI init values and constants
+ """
+ # pylint: disable=too-many-statements
+
+ ettus_sources = os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', 'lib',\
+ 'rfnoc', 'Makefile.srcs')
+ ##################################################
+ # Grid Layout
+ ##################################################
+ grid = QGridLayout()
+ grid.setSpacing(15)
+ ##################################################
+ # Buttons
+ ##################################################
+ oot_btn = QtWidgets.QPushButton('Add OOT Blocks', self)
+ oot_btn.setToolTip('Add your custom Out-of-tree blocks')
+ grid.addWidget(oot_btn, 9, 0)
+ from_grc_btn = QtWidgets.QPushButton('Import from GRC', self)
+ grid.addWidget(from_grc_btn, 9, 2)
+ show_file_btn = QtWidgets.QPushButton('Show instantiation File', self)
+ grid.addWidget(show_file_btn, 9, 1)
+ add_btn = QtWidgets.QPushButton('>>', self)
+ grid.addWidget(add_btn, 2, 2)
+ rem_btn = QtWidgets.QPushButton('<<', self)
+ grid.addWidget(rem_btn, 3, 2)
+ self.gen_bit_btn = QtWidgets.QPushButton('Generate .bit file', self)
+ grid.addWidget(self.gen_bit_btn, 9, 3)
+
+ ##################################################
+ # Checkbox
+ ##################################################
+ self.fill_with_fifos = QtWidgets.QCheckBox('Fill with FIFOs', self)
+ self.viv_gui = QtWidgets.QCheckBox('Open Vivado GUI', self)
+ self.cleanall = QtWidgets.QCheckBox('Clean IP', self)
+ grid.addWidget(self.fill_with_fifos, 5, 2)
+ grid.addWidget(self.viv_gui, 6, 2)
+ grid.addWidget(self.cleanall, 7, 2)
+
+ ##################################################
+ # uhd_image_builder command display
+ ##################################################
+ label_cmd_display = QtWidgets.QLabel(self)
+ label_cmd_display.setText("uhd_image_builder command:")
+ label_cmd_display.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
+ label_cmd_display.setStyleSheet(" QLabel {font-weight: bold; color: black}")
+ grid.addWidget(label_cmd_display, 10, 0)
+ self.cmd_display = QtWidgets.QTextEdit(self)
+ self.cmd_display.setMaximumHeight(label_cmd_display.sizeHint().height() * 3)
+ self.cmd_display.setReadOnly(True)
+ self.cmd_display.setText("".join(self.cmd_name))
+ grid.addWidget(self.cmd_display, 10, 1, 1, 3)
+
+ ##################################################
+ # uhd_image_builder target help display
+ ##################################################
+ self.help_display = QtWidgets.QLabel(self)
+ grid.addWidget(self.help_display, 11, 1, 1, 3)
+ self.help_display.setWordWrap(True)
+ help_description = QtWidgets.QLabel(self)
+ grid.addWidget(help_description, 11, 0)
+ help_description.setText("Target description: ")
+ help_description.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
+ help_description.setStyleSheet(" QLabel {font-weight: bold; color: black}")
+
+ ##################################################
+ # Panels - QTreeModels
+ ##################################################
+ ### Far-left Panel: Build targets
+ self.targets = QtWidgets.QTreeView(self)
+ self.targets.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
+ self.model_targets = QtGui.QStandardItemModel(self)
+ self.model_targets.setHorizontalHeaderItem(0, QtGui.QStandardItem("Select build target"))
+ self.targets.setModel(self.model_targets)
+ self.populate_target('x300')
+ self.populate_target('e300')
+ self.populate_target('e320')
+ self.populate_target('n3xx')
+ grid.addWidget(self.targets, 0, 0, 8, 1)
+
+ ### Central Panel: Available blocks
+ ### Create tree to categorize Ettus Block and OOT Blocks in different lists
+ self.blocks_available = QtWidgets.QTreeView(self)
+ self.blocks_available.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
+ self.blocks_available.setContextMenuPolicy(Qt.CustomContextMenu)
+ ettus_blocks = QtGui.QStandardItem("Ettus-provided Blocks")
+ ettus_blocks.setEnabled(False)
+ ettus_blocks.setForeground(Qt.black)
+ self.populate_list(ettus_blocks, ettus_sources)
+ self.oot = QtGui.QStandardItem("OOT Blocks for X300 devices")
+ self.oot.setEnabled(False)
+ self.oot.setForeground(Qt.black)
+ self.refresh_oot_dirs()
+ self.model_blocks_available = QtGui.QStandardItemModel(self)
+ self.model_blocks_available.appendRow(ettus_blocks)
+ self.model_blocks_available.appendRow(self.oot)
+ self.model_blocks_available.setHorizontalHeaderItem(
+ 0, QtGui.QStandardItem("List of blocks available")
+ )
+ self.blocks_available.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
+ self.blocks_available.setModel(self.model_blocks_available)
+ grid.addWidget(self.blocks_available, 0, 1, 8, 1)
+
+ ### Far-right Panel: Blocks in current design
+ self.blocks_in_design = QtWidgets.QTreeView(self)
+ self.blocks_in_design.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
+ self.model_in_design = QtGui.QStandardItemModel(self)
+ self.model_in_design.setHorizontalHeaderItem(
+ 0, QtGui.QStandardItem("Blocks in current design"))
+ self.blocks_in_design.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
+ self.blocks_in_design.setModel(self.model_in_design)
+ grid.addWidget(self.blocks_in_design, 0, 3, 8, 1)
+
+ ##################################################
+ # Informative Labels
+ ##################################################
+ block_num_hdr = QtWidgets.QLabel(self)
+ block_num_hdr.setText("Blocks in current design")
+ block_num_hdr.setStyleSheet(" QLabel {font-weight: bold; color: black}")
+ block_num_hdr.setAlignment(QtCore.Qt.AlignHCenter)
+ grid.addWidget(block_num_hdr, 0, 2)
+ self.block_num = QtWidgets.QLabel(self)
+ self.block_num.setText("-")
+ self.block_num.setAlignment(QtCore.Qt.AlignHCenter)
+ grid.addWidget(self.block_num, 1, 2)
+ self.block_num.setStyleSheet(" QLabel {color: green}")
+ self.generating_bitstream = QtWidgets.QLabel(self)
+ self.generating_bitstream.setText("")
+ self.generating_bitstream.setAlignment(QtCore.Qt.AlignHCenter)
+ grid.addWidget(self.generating_bitstream, 11, 0, 1, 5)
+ self.generating_bitstream.setStyleSheet(" QLabel {font-weight: bold; color: black}")
+
+ ##################################################
+ # Connection of the buttons with their signals
+ ##################################################
+ self.fill_with_fifos.clicked.connect(self.fill_slot)
+ self.fill_with_fifos.clicked.connect(self.cmd_display_slot)
+ self.viv_gui.clicked.connect(self.viv_gui_slot)
+ self.viv_gui.clicked.connect(self.cmd_display_slot)
+ self.cleanall.clicked.connect(self.cleanall_slot)
+ self.cleanall.clicked.connect(self.cmd_display_slot)
+ oot_btn.clicked.connect(self.file_dialog)
+ from_grc_btn.clicked.connect(self.blocks_to_add_slot)
+ from_grc_btn.clicked.connect(self.cmd_display_slot)
+ from_grc_btn.clicked.connect(self.file_grc_dialog)
+ add_btn.clicked.connect(self.add_to_design)
+ add_btn.clicked.connect(self.blocks_to_add_slot)
+ add_btn.clicked.connect(self.check_blk_num)
+ add_btn.clicked.connect(self.cmd_display_slot)
+ rem_btn.clicked.connect(self.remove_from_design)
+ rem_btn.clicked.connect(self.blocks_to_add_slot)
+ rem_btn.clicked.connect(self.cmd_display_slot)
+ show_file_btn.clicked.connect(self.show_file)
+ show_file_btn.clicked.connect(self.cmd_display_slot)
+ show_file_btn.clicked.connect(self.run_command)
+ self.gen_bit_btn.clicked.connect(self.generate_bit)
+ self.gen_bit_btn.clicked.connect(self.cmd_display_slot)
+ self.gen_bit_btn.clicked.connect(self.run_command)
+ self.targets.clicked.connect(self.ootlist)
+ self.targets.clicked.connect(self.set_target_and_device)
+ self.targets.clicked.connect(self.cmd_display_slot)
+ self.targets.clicked.connect(self.check_blk_num)
+ self.blocks_available.doubleClicked.connect(self.add_to_design)
+ self.blocks_available.doubleClicked.connect(self.blocks_to_add_slot)
+ self.blocks_available.doubleClicked.connect(self.check_blk_num)
+ self.blocks_available.doubleClicked.connect(self.cmd_display_slot)
+ self.blocks_in_design.doubleClicked.connect(self.remove_from_design)
+ self.blocks_in_design.doubleClicked.connect(self.blocks_to_add_slot)
+ self.blocks_in_design.doubleClicked.connect(self.cmd_display_slot)
+
+ ##################################################
+ # Set a default size based on screen geometry
+ ##################################################
+ screen_size = QtWidgets.QDesktopWidget().screenGeometry(-1)
+ self.resize(screen_size.width()/1.4, screen_size.height()/1.7)
+ self.setWindowTitle("uhd_image_builder.py GUI")
+ self.setLayout(grid)
+ self.show()
+
+ ##################################################
+ # Slots and functions/actions
+ ##################################################
+ @pyqtSlot()
+ def blocks_to_add_slot(self):
+ """
+ Retrieves a list of the blocks in design to be displayed in TextEdit
+ """
+ availables = []
+ blocks = []
+ availables = self.iter_tree(self.model_blocks_available, availables)
+ blk_count = self.model_in_design.rowCount()
+ self.block_num.setText("{}/{}".format(blk_count,
+ self.max_allowed_blocks))
+ for i in range(blk_count):
+ blocks.append(self.blocks_in_design.model().data(
+ self.blocks_in_design.model().index(i, 0)))
+ self.cmd_prefix = self.cmd_name + blocks
+
+ @pyqtSlot()
+ def check_blk_num(self):
+ """
+ Checks the amount of blocks in the design pannel
+ """
+ blk_count = self.model_in_design.rowCount()
+ if blk_count > self.max_allowed_blocks:
+ self.block_num.setStyleSheet(" QLabel {font-weight:bold; color: red}")
+ self.show_too_many_blocks_warning(blk_count)
+
+ @pyqtSlot()
+ def fill_slot(self):
+ """
+ Populates 'fill_fifos' value into the command dictionary
+ """
+ if self.fill_with_fifos.isChecked():
+ self.cmd_dict["fill_fifos"] = '--fill-with-fifos'
+ else:
+ self.cmd_dict["fill_fifos"] = ''
+
+ @pyqtSlot()
+ def viv_gui_slot(self):
+ """
+ Populates 'viv_gui' value into the command dictionary
+ """
+ if self.viv_gui.isChecked():
+ self.cmd_dict["viv_gui"] = '-g'
+ else:
+ self.cmd_dict["viv_gui"] = ''
+
+ @pyqtSlot()
+ def cleanall_slot(self):
+ """
+ Populates 'cleanall' value into the command dictionary
+ """
+ if self.cleanall.isChecked():
+ self.cmd_dict["cleanall"] = '-c'
+ else:
+ self.cmd_dict["cleanall"] = ''
+
+ @pyqtSlot()
+ def cmd_display_slot(self):
+ """
+ Displays the command to be run in a QTextEdit in realtime
+ """
+ text = [" ".join(self.cmd_prefix),]
+ for value in self.cmd_dict.values():
+ if value is not '':
+ text.append(value)
+ self.cmd_display.setText(" ".join(text))
+
+ @pyqtSlot()
+ def add_to_design(self):
+ """
+ Adds blocks from the 'available' pannel to the list to be added
+ into the design
+ """
+ indexes = self.blocks_available.selectedIndexes()
+ for index in indexes:
+ word = self.blocks_available.model().data(index)
+ element = QtGui.QStandardItem(word)
+ if word is not None:
+ self.model_in_design.appendRow(element)
+
+ @pyqtSlot()
+ def remove_from_design(self):
+ """
+ Removes blocks from the list that is to be added into the design
+ """
+ indexes = self.blocks_in_design.selectedIndexes()
+ for index in indexes:
+ self.model_in_design.removeRow(index.row())
+ # Edit Informative Label formatting
+ blk_count = self.model_in_design.rowCount()
+ if blk_count <= self.max_allowed_blocks:
+ self.block_num.setStyleSheet(" QLabel {color: green}")
+
+ @pyqtSlot()
+ def show_file(self):
+ """
+ Show the rfnoc_ce_auto_inst file in the default text editor
+ """
+ self.cmd_dict['show_file'] = '-o {}'.format(self.instantiation_file)
+
+ @pyqtSlot()
+ def generate_bit(self):
+ """
+ Runs the FPGA .bit generation command
+ """
+ self.cmd_dict['show_file'] = ''
+
+ @pyqtSlot()
+ def run_command(self):
+ """
+ Executes the uhd_image_builder command based on user options
+ """
+ if self.check_no_blocks() and self.check_blk_not_in_sources():
+ process = threading.Thread(target=self.generate_bitstream)
+ process.start()
+ if self.cmd_dict['show_file'] is not '':
+ os.system("xdg-open " + self.instantiation_file)
+
+ @pyqtSlot()
+ def set_target_and_device(self):
+ """
+ Populates the 'target' and 'device' values of the command directory
+ and the device dependent max_allowed_blocks in display
+ """
+ self.cmd_dict['target'] = '-t {}'.format(self.build_target)
+ self.cmd_dict['device'] = '-d {}'.format(self.device)
+ blk_count = self.model_in_design.rowCount()
+ self.block_num.setText("{}/{}".format(blk_count,
+ self.max_allowed_blocks))
+ self.instantiation_file = os.path.join(uhd_image_builder.get_scriptpath(),
+ '..', '..', 'top', self.target,
+ 'rfnoc_ce_auto_inst_' + self.device.lower() +
+ '.v')
+
+ @pyqtSlot()
+ def ootlist(self):
+ """
+ Lists the Out-of-tree module blocks
+ """
+ index = self.targets.currentIndex()
+ self.build_target = str(self.targets.model().data(index))
+ self.device = self.build_target[:4]
+ if self.device == 'X310' or self.device == 'X300':
+ self.target = 'x300'
+ self.max_allowed_blocks = 10
+ elif self.device == 'E310':
+ self.target = 'e300'
+ self.max_allowed_blocks = 14
+ elif self.device == 'E320':
+ self.target = 'e320'
+ self.max_allowed_blocks = 12
+ elif self.device == 'N300':
+ self.target = 'n3xx'
+ self.max_allowed_blocks = 11
+ elif self.device == 'N310' or self.device == 'N320':
+ self.target = 'n3xx'
+ self.max_allowed_blocks = 10
+ oot_sources = os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', 'top',\
+ self.target, 'Makefile.srcs')
+ self.show_list(self.oot, self.target, oot_sources)
+
+ # Show the help string for a selected target
+ selected_makefile = os.path.join(uhd_image_builder.get_scriptpath(),
+ '..', '..', 'top', self.target, 'Makefile')
+ pattern = "^\#\S*{}.*".format(self.build_target)
+ with open(selected_makefile) as fil:
+ help_string = re.findall(pattern, fil.read(), re.MULTILINE)[0].replace("##","")
+ self.help_display.setText(help_string)
+
+ @pyqtSlot()
+ def file_dialog(self):
+ """
+ Opens a dialog window to add manually the Out-of-tree module blocks
+ """
+ append_directory = []
+ startpath = os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', '..', '..')
+ new_oot = str(QtWidgets.QFileDialog.getExistingDirectory(self, 'RFNoC Out of Tree Directory', startpath))
+ if len(new_oot) > 0:
+ self.oot_dirs.append(new_oot)
+ uhd_image_builder.create_oot_include(self.device, self.oot_dirs)
+ self.refresh_oot_dirs()
+
+ @pyqtSlot()
+ def file_grc_dialog(self):
+ """
+ Opens a dialog window to add manually the GRC description file, from where
+ the RFNoC blocks will be parsed and added directly into the "Design" pannel
+ """
+ filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '/home/')[0]
+ if len(filename) > 0:
+ self.grc_populate_list(self.model_in_design, filename)
+ self.set_target_and_device()
+ self.blocks_to_add_slot()
+ self.cmd_display_slot()
+
+ def check_no_blocks(self):
+ """
+ Checks if there are no blocks in the design pannel. Needs to be a
+ different slot because triggers from clicking signals from pannels
+ would be superfluous
+ """
+ blk_count = self.model_in_design.rowCount()
+ if blk_count == 0:
+ self.show_no_blocks_warning()
+ return False
+ return True
+
+ def show_no_srcs_warning(self, block_to_add):
+ """
+ Shows a warning message window when no sources are found for the blocks that
+ are in the design pannel
+ """
+ # Create Warning message window
+ msg = QtWidgets.QMessageBox()
+ msg.setIcon(QtWidgets.QMessageBox.Warning)
+ msg.setText("The following blocks are in your design but their sources"\
+ " have not been added: \n\n {0}. \n\nPlease be sure of adding them"\
+ "before continuing. Would you like to add them now?"\
+ "".format(block_to_add))
+ msg.setWindowTitle("No sources for design")
+ yes_btn = msg.addButton("Yes", QtWidgets.QMessageBox.YesRole)
+ no_btn = msg.addButton("No", QtWidgets.QMessageBox.NoRole)
+ msg.exec_()
+ if msg.clickedButton() == yes_btn:
+ self.file_dialog()
+ return False
+ elif msg.clickedButton() == no_btn:
+ return True
+
+ @staticmethod
+ def show_no_blocks_warning():
+ """
+ Shows a warning message window when no blocks are found in the 'design' pannel
+ """
+ # Create Warning message window
+ msg = QtWidgets.QMessageBox()
+ msg.setIcon(QtWidgets.QMessageBox.Warning)
+ msg.setText("There are no Blocks in the current design")
+ msg.exec_()
+
+ def show_too_many_blocks_warning(self, number_of_blocks):
+ """
+ Shows a warning message window when too many blocks are found in the 'design' pannel
+ """
+ # Create Warning message window
+ msg = QtWidgets.QMessageBox()
+ msg.setIcon(QtWidgets.QMessageBox.Warning)
+ msg.setText("You added {} blocks while the maximum allowed blocks for"\
+ " a {} device is {}. Please remove some of the blocks to "\
+ "continue with the design".format(number_of_blocks,
+ self.device, self.max_allowed_blocks))
+ msg.exec_()
+
+ def iter_tree(self, model, output, parent=QModelIndex()):
+ """
+ Iterates over the Index tree
+ """
+ for i in range(model.rowCount(parent)):
+ index = model.index(i, 0, parent)
+ item = model.data(index)
+ output.append(str(item))
+ if model.hasChildren(index):
+ self.iter_tree(model, output, index)
+ return output
+
+ def show_list(self, parent, target, files):
+ """
+ Shows the Out-of-tree blocks that are available for a given device
+ """
+ parent.setText('OOT Blocks for {} devices'.format(target.upper()))
+ self.refresh_oot_dirs()
+
+ def populate_list(self, parent, files, clear=True):
+ """
+ Populates the pannels with the blocks that are listed in the Makefile.srcs
+ of our library
+ """
+ # Clean the list before populating it again
+ if (clear):
+ parent.removeRows(0, parent.rowCount())
+ suffix = '.v \\\n'
+ with open(files) as fil:
+ blocks = fil.readlines()
+ for element in blocks:
+ if element.endswith(suffix) and 'noc_block' in element:
+ element = element[:-len(suffix)]
+ if element not in self.blacklist:
+ block = QtGui.QStandardItem(element.partition('noc_block_')[2])
+ parent.appendRow(block)
+
+ @staticmethod
+ def show_not_xml_warning():
+ """
+ Shows a warning message window when no blocks are found in the 'design' pannel
+ """
+ # Create Warning message window
+ msg = QtWidgets.QMessageBox()
+ msg.setIcon(QtWidgets.QMessageBox.Warning)
+ msg.setText("[ParseError]: The chosen file is not XML formatted")
+ msg.exec_()
+
+ def grc_populate_list(self, parent, files):
+ """
+ Populates the 'Design' list with the RFNoC blocks found in a GRC file
+ """
+ try:
+ tree = ET.parse(files)
+ root = tree.getroot()
+ for blocks in root.iter('block'):
+ for param in blocks.iter('param'):
+ for key in param.iter('key'):
+ if 'fpga_module_name' in key.text:
+ if param.findtext('value') in self.blacklist:
+ continue
+ block = QtGui.QStandardItem(param.findtext('value').\
+ partition('noc_block_')[2])
+ parent.appendRow(block)
+ except ET.ParseError:
+ self.show_not_xml_warning()
+ return
+
+ def refresh_oot_dirs(self):
+ """
+ Populates the OOT directory list from the OOT include file
+ """
+ oot_include = os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', 'top',\
+ self.target, 'Makefile.OOT.inc')
+ dir_list = []
+ with open(oot_include, 'r') as fil:
+ text = fil.readlines()
+ for lines in text:
+ lines = lines.partition('$(BASE_DIR)/')
+ if (lines[1] == '$(BASE_DIR)/'):
+ relpath = lines[2].replace('\n', '')
+ ootpath = os.path.abspath(os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', 'top', relpath))
+ dir_list.append(ootpath)
+ if (len(dir_list) == 0):
+ self.oot.removeRows(0, self.oot.rowCount())
+ self.cmd_dict["include"] = ''
+ else:
+ self.oot_dirs = dir_list
+ self.cmd_dict["include"] = '-I {}'.format(' '.join(self.oot_dirs))
+ for (ii, oot) in enumerate(dir_list):
+ self.populate_list(self.oot, os.path.join(oot, 'fpga-src', 'Makefile.srcs'), clear=ii==0)
+
+ def populate_target(self, selected_target):
+ """
+ Parses the Makefile available and lists the build targets into the left pannel
+ """
+ pattern = "^(?!\#)^\S*_RFNOC[^:]*"
+ build_targets = os.path.join(uhd_image_builder.get_scriptpath(), '..', '..', 'top',
+ selected_target, 'Makefile')
+ with open(build_targets) as fil:
+ targets = re.findall(pattern, fil.read(), re.MULTILINE)
+ for target in targets:
+ self.model_targets.appendRow(QtGui.QStandardItem(target))
+
+ def check_blk_not_in_sources(self):
+ """
+ Checks if a block added from GRC flowgraph is not yet in the sources
+ list
+ """
+ availables = []
+ notin = []
+ availables = self.iter_tree(self.model_blocks_available, availables)
+ for i in range(self.model_in_design.rowCount()):
+ block_to_add = self.blocks_in_design.model().data(
+ self.blocks_in_design.model().index(i, 0))
+ if str(block_to_add) not in availables:
+ notin.append(str(block_to_add))
+ if len(notin) > 0:
+ self.show_no_srcs_warning(notin)
+ return False
+ return True
+
+ def generate_bitstream(self):
+ """
+ Runs the bitstream generation command in a separate thread
+ """
+ self.lock.acquire()
+ self.gen_bit_btn.setEnabled(False)
+ command = self.cmd_display.toPlainText()
+ self.generating_bitstream.setText(
+ "[Generating BitStream]: The FPGA is currently being generated" + \
+ " with the blocks of the current design. See the terminal window" + \
+ " for further compilation details")
+ os.system(command)
+ self.lock.release()
+ self.gen_bit_btn.setEnabled(True)
+ self.generating_bitstream.setText("")
+
+def main():
+ """
+ Main GUI method
+ """
+ app = QtWidgets.QApplication(sys.argv)
+ _window = MainWindow()
+ sys.exit(app.exec_())
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/scripts/viv_check_syntax.tcl b/fpga/usrp3/tools/scripts/viv_check_syntax.tcl
new file mode 100644
index 000000000..304bd5405
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_check_syntax.tcl
@@ -0,0 +1,14 @@
+#
+# Copyright 2018 Ettus Research
+#
+
+source $::env(VIV_TOOLS_DIR)/scripts/viv_utils.tcl
+
+# STEP#1: Create project, add sources, refresh IP
+vivado_utils::initialize_project
+
+# STEP#2: Run elaboration
+vivado_utils::check_design
+
+# Cleanup
+vivado_utils::close_batch_project
diff --git a/fpga/usrp3/tools/scripts/viv_gen_ip_makefile.py b/fpga/usrp3/tools/scripts/viv_gen_ip_makefile.py
new file mode 100644
index 000000000..87572e86e
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_gen_ip_makefile.py
@@ -0,0 +1,54 @@
+#! /usr/bin/python
+
+import sys, os
+import collections
+import argparse
+import datetime
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Create a Makefile for Xilinx IP.')
+ parser.add_argument('--ip_name', type=str, default=None, help='Name for the IP core')
+ parser.add_argument('--dest', type=str, default=None, help='Destination directory')
+ parser.add_argument('--copright_auth', type=str, default='Ettus Research', help='Copyright author')
+ args = parser.parse_args()
+ if not args.ip_name:
+ print('ERROR: Please specify a name for the IP core\n')
+ parser.print_help()
+ sys.exit(1)
+ if not args.dest:
+ print('ERROR: Please specify the location for the IP core\n')
+ parser.print_help()
+ sys.exit(1)
+ return args
+
+g_makefile_template = """#
+# {copyright}
+#
+
+include $(TOOLS_DIR)/make/viv_ip_builder.mak
+
+{ip_srcs_var} = $(IP_BUILD_DIR)/{ip_name}/{ip_name}.xci
+
+{ip_outs_var} = $(addprefix $(IP_BUILD_DIR)/{ip_name}/, \\
+{ip_name}.xci.out \\
+)
+
+$({ip_srcs_var}) $({ip_outs_var}) : $(IP_DIR)/{ip_name}/{ip_name}.xci
+\t$(call BUILD_VIVADO_IP,{ip_name},$(ARCH),$(PART_ID),$(IP_DIR),$(IP_BUILD_DIR),0)
+"""
+
+def main():
+ args = get_options();
+
+ transform = {}
+ transform['ip_name'] = args.ip_name
+ transform['ip_srcs_var'] = 'IP_' + args.ip_name.upper() + '_SRCS'
+ transform['ip_outs_var'] = 'IP_' + args.ip_name.upper() + '_OUTS'
+ transform['copyright'] = 'Copyright ' + str(datetime.datetime.now().year) + ' ' + args.copright_auth
+
+ with open(os.path.join(args.dest, 'Makefile.inc'), 'w') as mak_file:
+ mak_file.write(g_makefile_template.format(**transform))
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/scripts/viv_gen_part_id.py b/fpga/usrp3/tools/scripts/viv_gen_part_id.py
new file mode 100644
index 000000000..b82c146aa
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_gen_part_id.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+
+import argparse
+import os, sys
+import re
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Utility script to generate a properly formed partid for Xilinx projects')
+ parser.add_argument('target', type=str, default=None, help='Input value for target. Must be of the form <arch>/<device>/<package>/<speedgrade>[/<temperaturegrade>[/<silicon revision>]]')
+ args = parser.parse_args()
+ if not args.target:
+ print('ERROR: Please specify a target device tuple\n')
+ parser.print_help()
+ sys.exit(1)
+ return args
+
+def main():
+ args = get_options();
+
+ target_tok = args.target.split('/')
+ if len(target_tok) < 4:
+ print('ERROR: Invalid target format. Must be <arch>/<device>/<package>/<speedgrade>[/<temperaturegrade>[/<silicon_revision>]]')
+ print('ERROR: Parsed only ' + str(len(target_tok)) + ' tokens')
+ sys.exit(1)
+ if target_tok[0] in ['artix7', 'kintex7', 'zynq', 'spartan7', 'virtex7']:
+ print('' + target_tok[1] + target_tok[2] + target_tok[3])
+ elif target_tok[0] in ['zynquplus', 'zynquplusRFSOC']:
+ if len(target_tok) > 5:
+ print('' + target_tok[1] + '-' + target_tok[2] + target_tok[3] + '-' + target_tok[4] + '-' + target_tok[5])
+ else:
+ print('' + target_tok[1] + '-' + target_tok[2] + target_tok[3] + '-' + target_tok[4])
+ else:
+ print('unknown-part-error')
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/scripts/viv_generate_bd.tcl b/fpga/usrp3/tools/scripts/viv_generate_bd.tcl
new file mode 100644
index 000000000..546a190b1
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_generate_bd.tcl
@@ -0,0 +1,78 @@
+#
+# Copyright 2016 Ettus Research
+#
+
+# ---------------------------------------
+# Gather all external parameters
+# ---------------------------------------
+set bd_file $::env(BD_FILE) ;# Absolute path to BD/Tcl file from src dir
+set src_ext [file extension $bd_file] ;# BD or Tcl file?
+set part_name $::env(PART_NAME) ;# Full Xilinx part name
+set bd_name [file rootname [file tail $bd_file]] ;# Extract IP name
+if {[info exists env(BD_IP_REPOS)]} {
+ set ip_repos $::env(BD_IP_REPOS);# Any supporting IP repos
+} else {
+ set ip_repos {}
+}
+if {[info exists env(BD_HDL_SRCS)]} {
+ set hdl_sources $::env(BD_HDL_SRCS);# Any supporting HDL files
+} else {
+ set hdl_sources {}
+}
+
+# Delete any previous output cookie file
+file delete -force "$bd_file.out"
+# ---------------------------------------
+# Vivado Commands
+# ---------------------------------------
+create_project -part $part_name -in_memory
+# In non-project mode, the hierarchy must be updated for the HDL source files to be
+# correctly applied to the BD. See AR:
+# https://www.xilinx.com/support/answers/63488.html
+set_property source_mgmt_mode All [current_project]
+set_property ip_repo_paths "{$ip_repos}" [current_project]
+update_ip_catalog
+# Add supplementary HDL sources, if they exist.
+foreach src_file $hdl_sources {
+ set hdl_ext [file extension $src_file ]
+ if [expr [lsearch {.vhd .vhdl} $hdl_ext] >= 0] {
+ puts "BUILDER: Adding VHDL : $src_file"
+ read_vhdl -library work $src_file
+ } elseif [expr [lsearch {.v .sv .vh .svh} $hdl_ext] >= 0] {
+ puts "BUILDER: Adding Verilog : $src_file"
+ read_verilog $src_file
+ } else {
+ puts "BUILDER: \[WARNING\] File ignored!!!: $src_file"
+ }
+}
+# Open .tcl or .bd design directly.
+if [expr [lsearch {.tcl} $src_ext] >= 0] {
+ puts "BUILDER: Generating Block Diagram from script: $bd_file"
+ create_bd_design -dir . $bd_name
+ source $bd_file
+ report_ip_status
+ puts "BUILDER: Report_ip_status done"
+ set bd_file $bd_name.bd
+} else {
+ puts "BUILDER: Adding Block Diagram: $bd_file"
+ add_files -norecurse $bd_file
+ puts "BUILDER: Generating BD Target first pass..."
+ generate_target all [get_files $bd_file] -force
+ report_ip_status
+ puts "BUILDER: Report_ip_status done"
+ open_bd_design $bd_file
+}
+# Generate outputs.
+puts "BUILDER: Generating BD Target..."
+generate_target all [get_files $bd_file]
+puts "BUILDER: Generate all done"
+
+if { [get_msg_config -count -severity ERROR] == 0 } {
+ # Write output cookie file
+ set outfile [open "$bd_file.out" w]
+ puts $outfile "This file was auto-generated by viv_generate_bd.tcl and signifies that BD generation is done."
+ close $outfile
+} else {
+ exit 1
+}
+
diff --git a/fpga/usrp3/tools/scripts/viv_generate_hls_ip.tcl b/fpga/usrp3/tools/scripts/viv_generate_hls_ip.tcl
new file mode 100644
index 000000000..f32bfa876
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_generate_hls_ip.tcl
@@ -0,0 +1,36 @@
+#
+# Copyright 2015 Ettus Research
+#
+
+# ---------------------------------------
+# Gather all external parameters
+# ---------------------------------------
+set part_name $::env(PART_NAME) ;# Full Xilinx part name
+set hls_ip_name $::env(HLS_IP_NAME) ;# High level synthesis IP name
+set hls_ip_srcs $::env(HLS_IP_SRCS) ;# High level synthesis IP source files
+set hls_ip_inc $::env(HLS_IP_INCLUDES) ;# High level synthesis IP include directories
+
+# ---------------------------------------
+# Vivado Commands
+# ---------------------------------------
+open_project $hls_ip_name
+open_solution "solution"
+set_part $part_name
+set_top $hls_ip_name
+puts "BUILDER: Using include location : $hls_ip_inc"
+foreach src_file $hls_ip_srcs {
+ set src_ext [file extension $src_file ]
+ if [expr [lsearch {.c .cpp .cc .h .hpp} $src_ext] >= 0] {
+ puts "BUILDER: Adding C/C++ : $src_file"
+ add_files $src_file -cflags "-I $hls_ip_inc"
+ } elseif [expr [lsearch {.tcl} $src_ext] >= 0] {
+ puts "BUILDER: Executing tcl script : $src_file"
+ source $src_file
+ } else {
+ puts "BUILDER: \[WARNING\] File ignored!!!: $src_file"
+ }
+}
+csynth_design
+export_design -format ip_catalog
+
+exit
diff --git a/fpga/usrp3/tools/scripts/viv_generate_ip.tcl b/fpga/usrp3/tools/scripts/viv_generate_ip.tcl
new file mode 100644
index 000000000..8fe769336
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_generate_ip.tcl
@@ -0,0 +1,43 @@
+#
+# Copyright 2014 Ettus Research
+#
+
+# ---------------------------------------
+# Gather all external parameters
+# ---------------------------------------
+set xci_file $::env(XCI_FILE) ;# Absolute path to XCI file from src dir
+set part_name $::env(PART_NAME) ;# Full Xilinx part name
+set gen_example_proj $::env(GEN_EXAMPLE) ;# Generate an example project
+set synth_ip $::env(SYNTH_IP) ;# Synthesize generated IP
+set ip_name [file rootname [file tail $xci_file]] ;# Extract IP name
+
+# Delete any previous output cookie file
+file delete -force "$xci_file.out"
+
+# ---------------------------------------
+# Vivado Commands
+# ---------------------------------------
+create_project -part $part_name -in_memory -ip
+set_property target_simulator XSim [current_project]
+add_files -norecurse -force $xci_file
+reset_target all [get_files $xci_file]
+puts "BUILDER: Generating IP Target..."
+generate_target all [get_files $xci_file]
+if [string match $synth_ip "1"] {
+ puts "BUILDER: Synthesizing IP Target..."
+ synth_ip [get_ips $ip_name]
+}
+if [string match $gen_example_proj "1"] {
+ puts "BUILDER: Generating Example Design..."
+ open_example_project -force -dir . [get_ips $ip_name]
+}
+close_project
+
+if { [get_msg_config -count -severity ERROR] == 0 } {
+ # Write output cookie file
+ set outfile [open "$xci_file.out" w]
+ puts $outfile "This file was auto-generated by viv_generate_ip.tcl and signifies that IP generation is done."
+ close $outfile
+} else {
+ exit 1
+}
diff --git a/fpga/usrp3/tools/scripts/viv_hardware_utils.tcl b/fpga/usrp3/tools/scripts/viv_hardware_utils.tcl
new file mode 100644
index 000000000..2bdc02e18
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_hardware_utils.tcl
@@ -0,0 +1,97 @@
+# Function definitions
+proc ::connect_server { {hostname localhost} {port 3121} } {
+ if { [string compare [current_hw_server -quiet] ""] != 0 } {
+ disconnect_server
+ }
+ connect_hw_server -url $hostname:$port
+}
+
+proc ::disconnect_server { } {
+ disconnect_hw_server [current_hw_server]
+}
+
+proc ::jtag_list {} {
+ # Iterate through all hardware targets
+ set hw_targets [get_hw_targets -of_objects [current_hw_server -quiet] -quiet]
+ set idx_t 0
+ foreach hw_target $hw_targets {
+ puts "== Target${idx_t}: $hw_target =="
+ open_hw_target $hw_target -quiet
+ # Iterate through all hardware devices
+ set hw_devices [get_hw_devices]
+ set idx_d 0
+ foreach hw_device $hw_devices {
+ puts "--- Device${idx_d}: $hw_device (Address = ${idx_t}:${idx_d})"
+ set idx_d [expr $idx_d + 1]
+ }
+ close_hw_target -quiet
+ set idx_t [expr $idx_t + 1]
+ }
+}
+
+proc ::jtag_program { filepath {serial "."} {address "0:0"} } {
+ set idx_t [lindex [split $address :] 0]
+ set idx_d [lindex [split $address :] 1]
+
+ set hw_targets [get_hw_targets -of_objects [current_hw_server]]
+ set hw_targets_regexp {}
+
+ foreach target $hw_targets {
+ if { [regexp $serial $target] } {
+ set hw_targets_regexp [concat $hw_targets_regexp $target]
+ }
+ }
+
+ set hw_target [lindex $hw_targets_regexp $idx_t]
+
+ if { [string compare $hw_target ""] == 0 } {
+ error "ERROR: Could not open hw_target $idx_t. Either the address $address is incorrect or the device is not connected."
+ } else {
+ open_hw_target $hw_target -quiet
+ }
+
+ set hw_device [lindex [get_hw_devices] $idx_d]
+ if { [string compare $hw_device ""] == 0 } {
+ close_hw_target -quiet
+ error "ERROR: Could not open hw_device $idx_d. Either the address $address is incorrect or the device is not connected."
+ } else {
+ puts "- Target: $hw_target"
+ puts "- Device: $hw_device"
+ puts "- Filename: $filepath"
+ puts "Programming..."
+ current_hw_device $hw_device
+ set_property PROBES.FILE {} [current_hw_device]
+ set_property PROGRAM.FILE $filepath [current_hw_device]
+ program_hw_devices [current_hw_device]
+ close_hw_target -quiet
+ puts "Programming DONE"
+ }
+}
+
+# Initialization sequence
+open_hw
+connect_server
+
+if [expr $argc > 0] {
+ #Execute a command and exit
+ set cmd [lindex $argv 0]
+ if { [string compare $cmd "list"] == 0 } {
+ jtag_list
+ } elseif { [string compare $cmd "program"] == 0 } {
+ set filepath [lindex $argv 1]
+ if [expr $argc == 3] {
+ set serial [lindex $argv 2]
+ jtag_program $filepath $serial
+ } elseif [expr $argc > 3] {
+ set serial [lindex $argv 2]
+ set devaddr [lindex $argv 3]
+ jtag_program $filepath $serial $devaddr
+ } else {
+ jtag_program $filepath
+ }
+ } else {
+ error "Invalid command: $cmd"
+ }
+ disconnect_server
+ exit
+}
diff --git a/fpga/usrp3/tools/scripts/viv_ip_utils.tcl b/fpga/usrp3/tools/scripts/viv_ip_utils.tcl
new file mode 100644
index 000000000..ba0e87899
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_ip_utils.tcl
@@ -0,0 +1,142 @@
+#
+# Copyright 2015 Ettus Research
+#
+
+if [expr $argc < 2] {
+ error "ERROR: Invalid number of arguments"
+ exit
+}
+
+set cmd [lindex $argv 0]
+set part_name [lindex $argv 1]
+
+# Only create an in-memory roject when not using bdtcl commands.
+if [expr [string first "_bdtcl" $cmd] == -1] {
+ create_project -in_memory -ip -name inmem_ip_proj -part $part_name
+# Otherwise, set the system's TMP directory.
+} else {
+ set sys_tmpdir [pwd]
+ if {[file exists "/tmp"]} {set sys_tmpdir "/tmp"}
+ catch {set sys_tmpdir $::env(TRASH_FOLDER)} ;# very old Macintosh. Mac OS X doesn't have this.
+ catch {set sys_tmpdir $::env(TMP)}
+ catch {set sys_tmpdir $::env(TEMP)}
+}
+
+if { [string compare $cmd "create"] == 0 } {
+ if [expr $argc < 5] {
+ error "ERROR: Invalid number of arguments for the create operation"
+ exit
+ }
+ set ip_name [lindex $argv 2]
+ set ip_dir [lindex $argv 3]
+ set ip_vlnv [lindex $argv 4]
+ create_ip -vlnv $ip_vlnv -module_name $ip_name -dir $ip_dir
+
+} elseif { [string compare $cmd "modify"] == 0 } {
+ if [expr $argc < 3] {
+ error "ERROR: Invalid number of arguments for the modify operation"
+ exit
+ }
+
+ set src_file [lindex $argv 2]
+ set src_ext [file extension $src_file ]
+ if [expr [lsearch {.xci} $src_ext] >= 0] {
+ read_ip $src_file
+ } elseif [expr [lsearch {.bd} $src_ext] >= 0] {
+ add_files -norecurse $src_file
+ export_ip_user_files -of_objects [get_files $src_file] -force -quiet
+ open_bd_design $src_file
+ } else {
+ puts "ERROR: Invalid file extension: $src_ext"
+ }
+
+} elseif { [string compare $cmd "list"] == 0 } {
+ puts "Supported IP for device ${part_name}:"
+ foreach ip [lsort [get_ipdefs]] {
+ puts "- $ip"
+ }
+
+} elseif { [string compare $cmd "upgrade"] == 0 } {
+ if [expr $argc < 3] {
+ error "ERROR: Invalid number of arguments for the upgrade operation"
+ exit
+ }
+ set src_file [lindex $argv 2]
+ read_ip $src_file
+ upgrade_ip [get_ips *]
+
+} elseif { [string compare $cmd "modify_bdtcl"] == 0 } {
+ if [expr $argc < 4] {
+ error "ERROR: Invalid number of arguments for the modify operation"
+ exit
+ }
+
+ set src_file [lindex $argv 2]
+ set src_rootname [file rootname [file tail $src_file]]
+ set src_ext [file extension $src_file ]
+ set ip_repos [lindex $argv 3]
+ set hdl_sources "[file dirname $src_file]/hdl_sources.tcl"
+ if [expr [lsearch {.tcl} $src_ext] >= 0] {
+ # Create a temporary project to work on.
+ set tmp_bddir "${sys_tmpdir}/.viv_${src_rootname}"
+ file mkdir $tmp_bddir
+ cd $tmp_bddir
+ # Create temporary project to store user changes.
+ create_project tmp_bd $tmp_bddir -part $part_name -force
+ set_property ip_repo_paths "{$ip_repos}" [current_project]
+ update_ip_catalog
+ # Add any supporting HDL first
+ if {[file exists $hdl_sources] == 1} {
+ source $hdl_sources
+ } else {
+ puts "hdl_sources.tcl not found in IP directory. Skipping HDL import for BD design"
+ }
+ # Recreate BD design from source file (.tcl)
+ source $src_file
+ regenerate_bd_layout
+ validate_bd_design
+ save_bd_design
+ } else {
+ puts "ERROR: Invalid file extension: $src_ext"
+ }
+
+} elseif { [string compare $cmd "write_bdtcl"] == 0 } {
+ if [expr $argc < 3] {
+ error "ERROR: Invalid number of arguments for the create operation"
+ exit
+ }
+ # When regenerating a TCL file from a BD design, there should be a tmp project
+ # created by this tool ($cmd = modify_bdtcl).
+ set src_file [lindex $argv 2]
+ set src_rootname [file rootname [file tail $src_file]]
+ set src_ext [file extension $src_file ]
+ set src_dir [file dirname $src_file]
+ # Make sure a BD or TCL files is passed
+ if [expr [lsearch {.tcl} $src_ext] >= 0] {
+ # Validate that a previously created BD project exists.
+ set tmp_bddir "${sys_tmpdir}/.viv_${src_rootname}"
+ if {[file exists "$tmp_bddir/tmp_bd.xpr"] == 1} {
+ puts "INFO: Generating TCL file from BD design..."
+ # Open project and BD design
+ open_project "$tmp_bddir/tmp_bd.xpr"
+ open_bd_design [get_files "$src_rootname.bd"]
+ # Rewrite TCL BD file
+ write_bd_tcl -make_local -include_layout -force "$src_dir/$src_rootname.tcl"
+ puts "INFO: BD TCL source updated: $src_dir/$src_rootname.tcl"
+ # Close and delete tmp_bd project, not needed anymore.
+ close_project
+ puts "INFO: Deleting temp Vivado BD project..."
+ file delete -force -- $tmp_bddir
+ exit
+ } else {
+ puts "ERROR: No BD temp project found in: $tmp_bddir"
+ exit
+ }
+ } else {
+ puts "ERROR: Invalid file extension: $src_ext"
+ exit
+ }
+
+} else {
+ error "ERROR: Invalid command: $cmd"
+} \ No newline at end of file
diff --git a/fpga/usrp3/tools/scripts/viv_ip_xci_editor.py b/fpga/usrp3/tools/scripts/viv_ip_xci_editor.py
new file mode 100644
index 000000000..1f5ddf2c5
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_ip_xci_editor.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+import argparse
+import os, sys
+import re
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Utility script to query and modify a Xilinx IP XCI file')
+ parser.add_argument('action', type=str, default=None, help='Action to perform')
+ parser.add_argument('xci_filepath', type=str, default=None, help='Name for the IP core')
+ parser.add_argument('--target', type=str, default=None, help='Input value for target. Must be of the form <arch>/<device>/<package>/<speedgrade>/<silicon revision>')
+ parser.add_argument("--output_dir", type=str, default='.', help="Build directory for IP")
+ args = parser.parse_args()
+ if not args.action:
+ print('ERROR: Please specify an action to perform\n')
+ parser.print_help()
+ sys.exit(1)
+ if not args.xci_filepath:
+ print('ERROR: Please specify the location for the XCI file to operate on\n')
+ parser.print_help()
+ sys.exit(1)
+ if (not os.path.isfile(args.xci_filepath)):
+ print('ERROR: XCI File ' + args.xci_filepath + ' could not be accessed or is not a file.\n')
+ parser.print_help()
+ sys.exit(1)
+ return args
+
+def get_match_str(item):
+ return '(.*\<spirit:configurableElementValue spirit:referenceId=\".*\.' + item + '\"\>)(.+)(\</spirit:configurableElementValue\>)'
+def get_empty_match_str(item):
+ return '(.*\<spirit:configurableElementValue spirit:referenceId=\".*\.' + item + '\")/\>'
+
+def main():
+ args = get_options();
+
+ # Read XCI File
+ with open(args.xci_filepath) as in_file:
+ xci_lines = in_file.readlines()
+
+ if args.action.startswith('read_'):
+ # Extract info from XCI File
+ xci_info = dict()
+ for line in xci_lines:
+ m = re.search(get_match_str('(ARCHITECTURE|DEVICE|PACKAGE|SPEEDGRADE|TEMPERATURE_GRADE|SILICON_REVISION)'), line)
+ if m is not None:
+ xci_info[m.group(2)] = m.group(3)
+ else:
+ m = re.search(get_empty_match_str('(ARCHITECTURE|DEVICE|PACKAGE|SPEEDGRADE|TEMPERATURE_GRADE|SILICON_REVISION)'),line)
+ if m is not None:
+ xci_info[m.group(2)] = ''
+ if args.action == 'read_target':
+ print(xci_info['ARCHITECTURE'] + '/' + xci_info['DEVICE'] + '/' + xci_info['PACKAGE'] + '/' + xci_info['SPEEDGRADE'])
+ if args.action == 'read_arch':
+ print(xci_info['ARCHITECTURE'])
+ if args.action == 'read_partid':
+ print(xci_info['DEVICE'] + '/' + xci_info['PACKAGE'] + '/' + xci_info['SPEEDGRADE'] + '/' + xci_info['TEMPERATURE_GRADE'] + '/' + xci_info['SILICON_REVISION'])
+ if args.action == 'read_part':
+ print(xci_info['DEVICE'] + xci_info['PACKAGE'] + xci_info['SPEEDGRADE'])
+ elif args.action == 'retarget':
+ # Write a new XCI file with modified target info
+ if (not os.path.isdir(args.output_dir)):
+ print('ERROR: IP Build directory ' + args.output_dir + ' could not be accessed or is not a directory.')
+ sys.exit(1)
+ if not args.target:
+ print('ERROR: No target specified.')
+ sys.exit(1)
+ target_tok = args.target.split('/')
+ if len(target_tok) < 4:
+ print('ERROR: Invalid target format. Must be <arch>/<device>/<package>/<speedgrade>/<tempgrade>/<silicon revision>')
+ sys.exit(1)
+
+ replace_dict = {'ARCHITECTURE': target_tok[0], 'DEVICE': target_tok[1], 'PACKAGE': target_tok[2], 'SPEEDGRADE': target_tok[3], \
+ 'C_XDEVICEFAMILY': target_tok[0], 'C_FAMILY': target_tok[0], 'C_XDEVICE': target_tok[1]}
+ if len(target_tok) > 4:
+ replace_dict['TEMPERATURE_GRADE'] = target_tok[4]
+ if len(target_tok) > 5:
+ replace_dict['SILICON_REVISION'] = target_tok[5]
+ out_xci_filename = os.path.join(os.path.abspath(args.output_dir), os.path.basename(args.xci_filepath))
+
+ with open(out_xci_filename, 'w') as out_file:
+ for r_line in xci_lines:
+ w_line = r_line
+ m = re.search(get_match_str('(' + '|'.join(replace_dict.keys()) + ')'), r_line)
+ if m is not None:
+ w_line = m.group(1) + replace_dict[m.group(2)] + m.group(4) +'\n'
+ else:
+ m = re.search(get_empty_match_str('(' + '|'.join(replace_dict.keys()) + ')'), r_line)
+ if m is not None:
+ w_line = m.group(1) + '>' + replace_dict[m.group(2)] + '</spirit:configurableElementValue>\n'
+ out_file.write(w_line)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/scripts/viv_sim_project.tcl b/fpga/usrp3/tools/scripts/viv_sim_project.tcl
new file mode 100644
index 000000000..f2d071f10
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_sim_project.tcl
@@ -0,0 +1,149 @@
+#
+# Copyright 2014 Ettus Research
+#
+
+# ---------------------------------------
+# Gather all external parameters
+# ---------------------------------------
+set simulator $::env(VIV_SIMULATOR)
+set design_srcs $::env(VIV_DESIGN_SRCS)
+set sim_srcs $::env(VIV_SIM_SRCS)
+set inc_srcs $::env(VIV_INC_SRCS)
+set sim_top $::env(VIV_SIM_TOP)
+set part_name $::env(VIV_PART_NAME)
+set sim_runtime $::env(VIV_SIM_RUNTIME)
+set sim_fast $::env(VIV_SIM_FAST)
+set vivado_mode $::env(VIV_MODE)
+set working_dir [pwd]
+
+set sim_fileset "sim_1"
+set project_name "[string tolower $simulator]_proj"
+
+if [info exists ::env(VIV_SIM_COMPLIBDIR) ] {
+ set sim_complibdir $::env(VIV_SIM_COMPLIBDIR)
+ if [expr [file isdirectory $sim_complibdir] == 0] {
+ set sim_complibdir ""
+ }
+} else {
+ set sim_complibdir ""
+}
+if [expr ([string equal $simulator "XSim"] == 0) && ([string length $sim_complibdir] == 0)] {
+ puts "BUILDER: \[ERROR\]: Could not resolve the location for the compiled simulation libraries."
+ puts " Please build libraries for chosen simulator and set the env or"
+ puts " makefile variable SIM_COMPLIBDIR to point to the location."
+ exit 1
+}
+
+# ---------------------------------------
+# Vivado Commands
+# ---------------------------------------
+puts "BUILDER: Creating Vivado simulation project part $part_name"
+create_project -part $part_name -force $project_name/$project_name
+
+foreach src_file $design_srcs {
+ set src_ext [file extension $src_file ]
+ if [expr [lsearch {.vhd .vhdl} $src_ext] >= 0] {
+ puts "BUILDER: Adding VHDL : $src_file"
+ read_vhdl $src_file
+ } elseif [expr [lsearch {.v .vh} $src_ext] >= 0] {
+ puts "BUILDER: Adding Verilog : $src_file"
+ read_verilog $src_file
+ } elseif [expr [lsearch {.sv .svh} $src_ext] >= 0] {
+ puts "BUILDER: Adding SVerilog: $src_file"
+ read_verilog -sv $src_file
+ } elseif [expr [lsearch {.xdc} $src_ext] >= 0] {
+ puts "BUILDER: Adding XDC : $src_file"
+ read_xdc $src_file
+ } elseif [expr [lsearch {.xci} $src_ext] >= 0] {
+ puts "BUILDER: Adding IP : $src_file"
+ read_ip $src_file
+ } elseif [expr [lsearch {.ngc .edif} $src_ext] >= 0] {
+ puts "BUILDER: Adding Netlist : $src_file"
+ read_edif $src_file
+ } elseif [expr [lsearch {.bd} $src_ext] >= 0] {
+ puts "BUILDER: Adding Block Diagram: $src_file"
+ add_files -norecurse $src_file
+ } elseif [expr [lsearch {.bxml} $src_ext] >= 0] {
+ puts "BUILDER: Adding Block Diagram XML: $src_file"
+ add_files -norecurse $src_file
+ } else {
+ puts "BUILDER: \[WARNING\] File ignored!!!: $src_file"
+ }
+}
+
+foreach sim_src $sim_srcs {
+ puts "BUILDER: Adding Sim Src : $sim_src"
+ add_files -fileset $sim_fileset -norecurse $sim_src
+}
+
+foreach inc_src $inc_srcs {
+ puts "BUILDER: Adding Inc Src : $inc_src"
+ add_files -fileset $sim_fileset -norecurse $inc_src
+}
+
+# Simulator independent config
+set_property top $sim_top [get_filesets $sim_fileset]
+set_property default_lib xil_defaultlib [current_project]
+update_compile_order -fileset sim_1 -quiet
+
+# Select the simulator
+# WARNING: Do this first before setting simulator specific properties!
+set_property target_simulator $simulator [current_project]
+
+# Vivado quirk when passing options to external simulators
+if [expr [string equal $simulator "XSim"] == 1] {
+ set_property verilog_define "WORKING_DIR=\"$working_dir\"" [get_filesets $sim_fileset]
+} else {
+ set_property verilog_define "WORKING_DIR=$working_dir" [get_filesets $sim_fileset]
+}
+
+# XSim specific settings
+set_property xsim.simulate.runtime "${sim_runtime}us" -objects [get_filesets $sim_fileset]
+set_property xsim.elaborate.debug_level "all" -objects [get_filesets $sim_fileset]
+set_property xsim.elaborate.unifast $sim_fast -objects [get_filesets $sim_fileset]
+# Set default timescale to prevent bogus warnings
+set_property xsim.elaborate.xelab.more_options -value {-timescale 1ns/1ns} -objects [get_filesets $sim_fileset]
+
+# Modelsim specific settings
+if [expr [string equal $simulator "Modelsim"] == 1] {
+ set sim_64bit $::env(VIV_SIM_64BIT)
+
+ set_property compxlib.modelsim_compiled_library_dir $sim_complibdir [current_project]
+ # Does not work yet (as of Vivado 2015.2), but will be useful for 32-bit support
+ # See: http://www.xilinx.com/support/answers/62210.html
+ set_property modelsim.64bit $sim_64bit -objects [get_filesets $sim_fileset]
+ set_property modelsim.simulate.runtime "${sim_runtime}ns" -objects [get_filesets $sim_fileset]
+ set_property modelsim.elaborate.acc "true" -objects [get_filesets $sim_fileset]
+ set_property modelsim.simulate.log_all_signals "true" -objects [get_filesets $sim_fileset]
+ set_property modelsim.simulate.vsim.more_options -value "-c" -objects [get_filesets $sim_fileset]
+ set_property modelsim.elaborate.unifast $sim_fast -objects [get_filesets $sim_fileset]
+ if [info exists ::env(VIV_SIM_USER_DO) ] {
+ set_property modelsim.simulate.custom_udo -value "$::env(VIV_SIM_USER_DO)" -objects [get_filesets $sim_fileset]
+ }
+}
+
+# Launch simulation
+launch_simulation
+
+# Synthesize requested modules
+foreach synth_top "$::env(VIV_SYNTH_TOP)" {
+ set_property top $synth_top [current_fileset]
+ synth_design -mode out_of_context
+ # Perform a simple regex-based search for all clock signals and constrain
+ # them to 500 MHz for the timing report.
+ set clk_regexp "(?i)^(?!.*en.*).*(clk|clock).*"
+ foreach clk_inst [get_ports -regexp $clk_regexp] {
+ create_clock -name $clk_inst -period 2.0 [get_ports $clk_inst]
+ }
+ report_utilization -no_primitives -file ${working_dir}/${synth_top}_synth.rpt
+ report_timing_summary -setup -max_paths 3 -unique_pins -no_header -append -file ${working_dir}/${synth_top}_synth.rpt
+ write_checkpoint -force ${working_dir}/${synth_top}_synth.dcp
+}
+
+# Close project
+if [string equal $vivado_mode "batch"] {
+ puts "BUILDER: Closing project"
+ close_project
+} else {
+ puts "BUILDER: In GUI mode. Leaving project open."
+}
diff --git a/fpga/usrp3/tools/scripts/viv_strategies.tcl b/fpga/usrp3/tools/scripts/viv_strategies.tcl
new file mode 100644
index 000000000..cbf9ea913
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_strategies.tcl
@@ -0,0 +1,170 @@
+#
+# Copyright 2015 Ettus Research
+#
+
+# ---------------------------------------------------
+# Create namespace and initialize global parameters
+# ---------------------------------------------------
+namespace eval ::vivado_strategies {
+ # Export commands
+ namespace export \
+ get_preset \
+ implement_design \
+ check_strategy \
+ print_strategy
+
+ variable g_viv_version [version -short]
+}
+
+# ---------------------------------------------------
+# Return a preset strategy with the most commonly used options
+# ---------------------------------------------------
+proc ::vivado_strategies::get_impl_preset {preset} {
+ variable g_viv_version
+
+ set strategy [dict create]
+ switch -nocase $preset {
+ "Default" {
+ dict set strategy "opt_design.is_enabled" 1
+ dict set strategy "opt_design.directive" "Default"
+ dict set strategy "post_opt_power_opt_design.is_enabled" 0
+ dict set strategy "place_design.directive" "Default"
+ dict set strategy "post_place_power_opt_design.is_enabled" 0
+ dict set strategy "post_place_phys_opt_design.is_enabled" 0
+ dict set strategy "post_place_phys_opt_design.directive" "Default"
+ dict set strategy "route_design.directive" "Default"
+ dict set strategy "route_design.more_options" ""
+ dict set strategy "post_route_phys_opt_design.is_enabled" 0
+ dict set strategy "post_route_phys_opt_design.directive" "Default"
+ }
+ "Performance_Explore" {
+ dict set strategy "opt_design.is_enabled" 1
+ dict set strategy "opt_design.directive" "Explore"
+ dict set strategy "post_opt_power_opt_design.is_enabled" 0
+ dict set strategy "place_design.directive" "Explore"
+ dict set strategy "post_place_power_opt_design.is_enabled" 0
+ dict set strategy "post_place_phys_opt_design.is_enabled" 1
+ dict set strategy "post_place_phys_opt_design.directive" "Explore"
+ dict set strategy "route_design.directive" "Explore"
+ dict set strategy "route_design.more_options" ""
+ dict set strategy "post_route_phys_opt_design.is_enabled" 0
+ dict set strategy "post_route_phys_opt_design.directive" "Explore"
+ }
+ "Performance_ExplorePostRoutePhysOpt" {
+ dict set strategy "opt_design.is_enabled" 1
+ dict set strategy "opt_design.directive" "Explore"
+ dict set strategy "post_opt_power_opt_design.is_enabled" 0
+ dict set strategy "place_design.directive" "Explore"
+ dict set strategy "post_place_power_opt_design.is_enabled" 0
+ dict set strategy "post_place_phys_opt_design.is_enabled" 1
+ dict set strategy "post_place_phys_opt_design.directive" "Explore"
+ dict set strategy "route_design.directive" "Explore"
+ dict set strategy "route_design.more_options" "-tns_cleanup"
+ dict set strategy "post_route_phys_opt_design.is_enabled" 1
+ dict set strategy "post_route_phys_opt_design.directive" "Explore"
+ }
+ }
+ return $strategy
+}
+
+# ---------------------------------------------------
+# Execute the specified implementation strategy
+# ---------------------------------------------------
+proc ::vivado_strategies::implement_design {strategy} {
+ variable g_viv_version
+
+ # Check strategy for validity and print
+ vivado_strategies::check_strategy $strategy
+ puts "BUILDER: Running implementation strategy with:"
+ vivado_strategies::print_strategy $strategy
+
+ # Optimize the current netlist.
+ # This will perform the retarget, propconst, sweep and bram_power_opt optimizations by default.
+ if [dict get $strategy "opt_design.is_enabled"] {
+ set opt_dir [dict get $strategy "opt_design.directive"]
+ opt_design -directive $opt_dir
+ }
+
+ # Optimize dynamic power using intelligent clock gating after optimization
+ if [dict get $strategy "post_opt_power_opt_design.is_enabled"] {
+ power_opt_design
+ }
+
+ # Automatically place ports and leaf-level instances
+ set pla_dir [dict get $strategy "place_design.directive"]
+ place_design -directive $pla_dir
+
+ # Optimize dynamic power using intelligent clock gating after placement
+ if [dict get $strategy "post_place_power_opt_design.is_enabled"] {
+ power_opt_design
+ }
+
+ # Optimize the current placed netlist
+ if [dict get $strategy "post_place_phys_opt_design.is_enabled"] {
+ set pp_physopt_dir [dict get $strategy "post_place_phys_opt_design.directive"]
+ phys_opt_design -directive $pp_physopt_dir
+ }
+
+ # Route the current design
+ set rt_dir [dict get $strategy "route_design.directive"]
+ puts "BUILDER: Choosing routing directive: $rt_dir"
+ if {[dict get $strategy "route_design.more_options"] eq ""} {
+ route_design -directive $rt_dir
+ } else {
+ set rt_more [dict get $strategy "route_design.more_options"]
+ puts "BUILDER: Choosing additional routing options: $rt_more"
+ route_design -directive $rt_dir $rt_more
+ }
+
+ # Optimize the current routed netlist.
+ if [dict get $strategy "post_route_phys_opt_design.is_enabled"] {
+ set pr_physopt_dir [dict get $strategy "post_route_phys_opt_design.directive"]
+ phys_opt_design -directive $pr_physopt_dir
+ }
+}
+
+# ---------------------------------------------------
+# Sanity-check the specified strategy
+# ---------------------------------------------------
+proc ::vivado_strategies::check_strategy {strategy} {
+ variable g_viv_version
+
+ set strategy_options [dict keys $strategy]
+ set required_options {\
+ opt_design.is_enabled \
+ opt_design.directive \
+ post_opt_power_opt_design.is_enabled \
+ place_design.directive \
+ post_place_power_opt_design.is_enabled \
+ post_place_phys_opt_design.is_enabled \
+ post_place_phys_opt_design.directive \
+ route_design.directive \
+ post_route_phys_opt_design.is_enabled \
+ post_route_phys_opt_design.directive \
+ }
+
+ set invalid 0
+ foreach req $required_options {
+ if [expr [lsearch $strategy_options $req] < 0] {
+ puts "BUILDER: ERROR: Invalid strategy. Missing option $req"
+ set invalid 1
+ }
+ }
+ if $invalid {
+ error "Strategy check failed!"
+ }
+}
+
+# ---------------------------------------------------
+# Print strategy parameters to the console
+# ---------------------------------------------------
+proc ::vivado_strategies::print_strategy {strategy} {
+ variable g_viv_version
+
+ foreach opt [dict keys $strategy] {
+ set val [dict get $strategy $opt]
+ puts " * $opt = $val"
+ }
+}
+
+
diff --git a/fpga/usrp3/tools/scripts/viv_synth.tcl b/fpga/usrp3/tools/scripts/viv_synth.tcl
new file mode 100644
index 000000000..b93de3ca9
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_synth.tcl
@@ -0,0 +1,16 @@
+#
+# Copyright 2019 Ettus Research, a National Instruments Brand
+#
+
+source $::env(VIV_TOOLS_DIR)/scripts/viv_utils.tcl
+source $::env(VIV_TOOLS_DIR)/scripts/viv_strategies.tcl
+
+# STEP#1: Create project, add sources, refresh IP
+vivado_utils::initialize_project
+
+# STEP#2: Run synthesis
+vivado_utils::synthesize_design
+vivado_utils::generate_post_synth_reports
+
+# Cleanup
+vivado_utils::close_batch_project
diff --git a/fpga/usrp3/tools/scripts/viv_utils.tcl b/fpga/usrp3/tools/scripts/viv_utils.tcl
new file mode 100644
index 000000000..32c67e874
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/viv_utils.tcl
@@ -0,0 +1,290 @@
+#
+# Copyright 2014-2015 Ettus Research
+#
+
+# ---------------------------------------------------
+# Create namespace and initialize global parameters
+# ---------------------------------------------------
+namespace eval ::vivado_utils {
+ # Export commands
+ namespace export \
+ initialize_project \
+ synthesize_design \
+ check_design \
+ generate_post_synth_reports \
+ generate_post_place_reports \
+ generate_post_route_reports \
+ write_implementation_outputs \
+ get_top_module \
+ get_part_name \
+ get_vivado_mode
+
+ # Required environment variables
+ variable g_tools_dir $::env(VIV_TOOLS_DIR)
+ variable g_top_module $::env(VIV_TOP_MODULE)
+ variable g_part_name $::env(VIV_PART_NAME)
+ variable g_output_dir $::env(VIV_OUTPUT_DIR)
+ variable g_source_files $::env(VIV_DESIGN_SRCS)
+ variable g_vivado_mode $::env(VIV_MODE)
+
+ # Optional environment variables
+ variable g_verilog_defs ""
+ if { [info exists ::env(VIV_VERILOG_DEFS) ] } {
+ set g_verilog_defs $::env(VIV_VERILOG_DEFS)
+ }
+ variable g_include_dirs ""
+ if { [info exists ::env(VIV_INCLUDE_DIRS) ] } {
+ set g_include_dirs $::env(VIV_INCLUDE_DIRS)
+ }
+}
+
+# ---------------------------------------------------
+# Create a new project in memory and add source files
+# ---------------------------------------------------
+proc ::vivado_utils::initialize_project { {save_to_disk 0} } {
+ variable g_top_module
+ variable g_part_name
+ variable g_output_dir
+ variable g_source_files
+
+ variable bd_files ""
+
+ file delete -force $g_output_dir/build.rpt
+
+ if {$save_to_disk == 1} {
+ puts "BUILDER: Creating Vivado project ${g_top_module}_project.xpr for part $g_part_name"
+ create_project -part $g_part_name ${g_top_module}_project
+ } else {
+ puts "BUILDER: Creating Vivado project in memory for part $g_part_name"
+ create_project -in_memory -part $g_part_name
+ }
+
+ foreach src_file $g_source_files {
+ set src_ext [file extension $src_file ]
+ if [expr [lsearch {.vhd .vhdl} $src_ext] >= 0] {
+ puts "BUILDER: Adding VHDL : $src_file"
+ read_vhdl -library work $src_file
+ } elseif [expr [lsearch {.v .vh .sv .svh} $src_ext] >= 0] {
+ puts "BUILDER: Adding Verilog : $src_file"
+ read_verilog $src_file
+ } elseif [expr [lsearch {.xdc} $src_ext] >= 0] {
+ puts "BUILDER: Adding XDC : $src_file"
+ read_xdc $src_file
+ } elseif [expr [lsearch {.xci} $src_ext] >= 0] {
+ puts "BUILDER: Adding IP : $src_file"
+ read_ip $src_file
+ set_property generate_synth_checkpoint true [get_files $src_file]
+ } elseif [expr [lsearch {.ngc .edif .edf} $src_ext] >= 0] {
+ puts "BUILDER: Adding Netlist : $src_file"
+ read_edif $src_file
+ } elseif [expr [lsearch {.bd} $src_ext] >= 0] {
+ puts "BUILDER: Adding Block Design to list (added after IP regeneration): $src_file"
+ append bd_files "$src_file "
+ } elseif [expr [lsearch {.bxml} $src_ext] >= 0] {
+ puts "BUILDER: Adding Block Design XML to list (added after IP regeneration): $src_file"
+ append bd_files "$src_file "
+ } elseif [expr [lsearch {.dat} $src_ext] >= 0] {
+ puts "BUILDER: Adding Data File : $src_file"
+ add_files $src_file
+ } else {
+ puts "BUILDER: \[WARNING\] File ignored!!!: $src_file"
+ }
+ }
+
+ # The 'synth_ip [get_ips *]' step causes builds in Windows to recompile various
+ # pieces of the IP. This is time-consuming and unnecessary behavior, thus is removed.
+ # These steps are redundant anyway since the IP builder performs both of them.
+ # puts "BUILDER: Refreshing IP"
+ # generate_target all [get_ips *]
+ # synth_ip [get_ips *]
+
+ #might seem silly, but we need to add the bd files after the ip regeneration.
+ foreach file $bd_files {
+ puts "BUILDER: Adding file from Block Design list: $file"
+ add_files -norecurse $file
+ }
+
+ puts "BUILDER: Setting $g_top_module as the top module"
+ set_property top $g_top_module [current_fileset]
+}
+
+# ---------------------------------------------------
+# Synthesize design (Shortcut for Vivado's synth_design)
+# ---------------------------------------------------
+proc ::vivado_utils::synthesize_design {args} {
+ variable g_top_module
+ variable g_part_name
+ variable g_verilog_defs
+ variable g_include_dirs
+
+ set vdef_args ""
+ foreach vdef $g_verilog_defs {
+ set vdef_args [concat $vdef_args "-verilog_define $vdef"]
+ }
+ set incdir_args ""
+ if { [string compare $g_include_dirs ""] != 0 } {
+ set incdir_args "-include_dirs $g_include_dirs"
+ }
+
+ set synth_cmd "synth_design -top $g_top_module -part $g_part_name"
+ set synth_cmd [concat $synth_cmd $vdef_args]
+ set synth_cmd [concat $synth_cmd $incdir_args]
+ set synth_cmd [concat $synth_cmd $args]
+ puts "BUILDER: Synthesizing design"
+ eval $synth_cmd
+}
+
+# ---------------------------------------------------
+# Check design (Shortcut for Vivado's synth_design -rtl)
+# ---------------------------------------------------
+proc ::vivado_utils::check_design {args} {
+ variable g_top_module
+ variable g_part_name
+ variable g_verilog_defs
+ variable g_include_dirs
+
+ set vdef_args ""
+ foreach vdef $g_verilog_defs {
+ set vdef_args [concat $vdef_args "-verilog_define $vdef"]
+ }
+ set incdir_args ""
+ if { [string compare $g_include_dirs ""] != 0 } {
+ set incdir_args "-include_dirs $g_include_dirs"
+ }
+
+ set synth_cmd "synth_design -top $g_top_module -part $g_part_name -rtl -rtl_skip_ip -rtl_skip_constraints"
+ set synth_cmd [concat $synth_cmd $vdef_args]
+ set synth_cmd [concat $synth_cmd $incdir_args]
+ set synth_cmd [concat $synth_cmd $args]
+ puts "BUILDER: Checking syntax and elaborating design"
+ eval $synth_cmd
+}
+
+# ---------------------------------------------------
+# Generate post synthesis reports and checkpoint
+# ---------------------------------------------------
+proc ::vivado_utils::generate_post_synth_reports {} {
+ variable g_output_dir
+
+ puts "BUILDER: Writing post-synthesis checkpoint"
+ write_checkpoint -force $g_output_dir/post_synth
+ puts "BUILDER: Writing post-synthesis reports"
+ report_utilization -file $g_output_dir/post_synth_util.rpt
+ report_utilization -hierarchical -file $g_output_dir/post_synth_util_hier.rpt
+ report_drc -ruledeck methodology_checks -file $g_output_dir/methodology.rpt
+ report_high_fanout_nets -file $g_output_dir/high_fanout_nets.rpt
+}
+
+# ---------------------------------------------------
+# Generate post placement reports and checkpoint
+# ---------------------------------------------------
+proc ::vivado_utils::generate_post_place_reports {} {
+ variable g_output_dir
+
+ puts "BUILDER: Writing post-placement checkpoint"
+ write_checkpoint -force $g_output_dir/post_place
+ puts "BUILDER: Writing post-placement reports"
+ report_clock_utilization -file $g_output_dir/clock_util.rpt
+ report_utilization -file $g_output_dir/post_place_util.rpt
+ report_utilization -hierarchical -file $g_output_dir/post_place_util_hier.rpt
+ report_timing -sort_by group -max_paths 5 -path_type summary -file $g_output_dir/post_place_timing.rpt
+}
+
+# ---------------------------------------------------
+# Generate post route reports and checkpoint
+# ---------------------------------------------------
+proc ::vivado_utils::generate_post_route_reports {} {
+ variable g_output_dir
+
+ puts "BUILDER: Writing post-route checkpoint"
+ write_checkpoint -force $g_output_dir/post_route
+ puts "BUILDER: Writing post-route reports"
+ if {[file exists "$g_output_dir/clock_util.rpt"] == 0} {
+ report_clock_utilization -file $g_output_dir/clock_util.rpt
+ }
+ report_timing_summary -file $g_output_dir/post_route_timing_summary.rpt
+ report_utilization -file $g_output_dir/post_route_util.rpt
+ report_utilization -hierarchical -file $g_output_dir/post_route_util_hier.rpt
+ report_power -file $g_output_dir/post_route_power.rpt
+ report_drc -file $g_output_dir/post_imp_drc.rpt
+ report_timing -sort_by group -max_paths 10 -path_type summary -file $g_output_dir/post_route_timing.rpt
+}
+
+# ---------------------------------------------------
+# Export implementation
+# ---------------------------------------------------
+proc ::vivado_utils::write_implementation_outputs { {byte_swap_bin 0} } {
+ variable g_output_dir
+ variable g_top_module
+ variable g_tools_dir
+
+ puts "BUILDER: Writing implementation netlist and XDC"
+ write_verilog -force $g_output_dir/${g_top_module}_impl_netlist.v
+ write_xdc -no_fixed_only -force $g_output_dir/${g_top_module}_impl.xdc
+ puts "BUILDER: Writing bitfile"
+ write_bitstream -force $g_output_dir/${g_top_module}.bit
+ puts "BUILDER: Writing config bitstream"
+ set binsize [expr [file size $g_output_dir/${g_top_module}.bit]/(1024*1024)]
+ set binsize_pow2 [expr {int(pow(2,ceil(log($binsize)/log(2))))}]
+ set bin_iface [expr $byte_swap_bin?"SMAPx32":"SMAPx8"]
+ write_cfgmem -force -quiet -interface $bin_iface -format BIN -size $binsize_pow2 -disablebitswap -loadbit "up 0x0 $g_output_dir/${g_top_module}.bit" $g_output_dir/${g_top_module}.bin
+ puts "BUILDER: Writing debug probes"
+ write_debug_probes -force $g_output_dir/${g_top_module}.ltx
+ puts "BUILDER: Writing export report"
+ report_utilization -omit_locs -file $g_output_dir/build.rpt
+ report_timing_summary -no_detailed_paths -file $g_output_dir/build.rpt -append
+ if {! [string match -nocase {*timing constraints are met*} [read [open $g_output_dir/build.rpt]]]} {
+ send_msg_id {Builder 0-0} error "The design did not satisfy timing constraints. (Implementation outputs were still generated)"
+ }
+}
+
+proc ::vivado_utils::write_netlist_outputs { {suffix ""} } {
+ variable g_output_dir
+ variable g_top_module
+
+ puts "BUILDER: Writing EDIF netlist and XDC"
+ set filename ${g_output_dir}/${g_top_module}
+ if { [expr [string length $suffix] > 0] } {
+ set filename ${filename}_${suffix}
+ }
+ write_edif -force ${filename}.edf
+ write_xdc -no_fixed_only -force ${filename}.xdc
+ puts "BUILDER: Writing export report"
+ report_utilization -omit_locs -file $g_output_dir/build.rpt
+ report_timing_summary -no_detailed_paths -file $g_output_dir/build.rpt -append
+ if {! [string match -nocase {*timing constraints are met*} [read [open $g_output_dir/build.rpt]]]} {
+ send_msg_id {Builder 0-0} error "The design did not meet all timing constraints. (Implementation outputs were still generated)"
+ }
+}
+
+# ---------------------------------------------------
+# Close project
+# ---------------------------------------------------
+proc ::vivado_utils::close_batch_project {} {
+ variable g_vivado_mode
+
+ if [string equal $g_vivado_mode "batch"] {
+ puts "BUILDER: Closing project"
+ close_project
+ } else {
+ puts "BUILDER: In GUI mode. Leaving project open."
+ }
+}
+
+# ---------------------------------------------------
+# Get state variables
+# ---------------------------------------------------
+proc ::vivado_utils::get_top_module {} {
+ variable g_top_module
+ return $g_top_module
+}
+
+proc ::vivado_utils::get_part_name {} {
+ variable g_part_name
+ return $g_part_name
+}
+
+proc ::vivado_utils::get_vivado_mode {} {
+ variable g_vivado_mode
+ return $g_vivado_mode
+}
diff --git a/fpga/usrp3/tools/scripts/xil_bitfile_parser.py b/fpga/usrp3/tools/scripts/xil_bitfile_parser.py
new file mode 100755
index 000000000..7201bde17
--- /dev/null
+++ b/fpga/usrp3/tools/scripts/xil_bitfile_parser.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+import argparse
+import os, sys
+import struct
+import re
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Parser for the Xilinx FPGA Bitfile')
+ parser.add_argument("bitfile", help="Input bitfile path")
+ parser.add_argument("--bin_out", help="Output bin file path")
+ parser.add_argument('--flip', action='store_true', default=False, help='Flip 32-bit endianess')
+ parser.add_argument('--info', action='store_true', default=False, help='Print bitfile info')
+ args = parser.parse_args()
+ if (not os.path.isfile(args.bitfile)):
+ print('ERROR: Bitfile ' + args.bitfile + ' could not be accessed or is not a file.\n')
+ parser.print_help()
+ sys.exit(1)
+ return args
+
+short = struct.Struct('>H')
+ulong = struct.Struct('>I')
+KEYNAMES = {'a':'design_name', 'b':'part_name', 'c':'date', 'd':'time'}
+
+# Parse bitfile
+def parse_bitfile(bitfile_bytes):
+ header = dict()
+ ptr = 0
+ #Field 1
+ if short.unpack(bitfile_bytes[ptr:ptr+2])[0] == 9 and ulong.unpack(bitfile_bytes[ptr+2:ptr+6])[0] == 0x0ff00ff0:
+ #Headers
+ ptr += short.unpack(bitfile_bytes[ptr:ptr+2])[0] + 2
+ ptr += short.unpack(bitfile_bytes[ptr:ptr+2])[0] + 1
+ #Fields a-d
+ for keynum in range(0, 4):
+ key = bitfile_bytes[ptr]; ptr += 1
+ val_len = short.unpack(bitfile_bytes[ptr:ptr+2])[0]; ptr += 2
+ val = bitfile_bytes[ptr:ptr+val_len]; ptr += val_len
+ header[KEYNAMES[key]] = str(val).rstrip('\0')
+ #Field e
+ ptr += 1
+ length = ulong.unpack(bitfile_bytes[ptr:ptr+4])[0]; ptr += 4
+ header['bitstream_len'] = length
+ header['header_len'] = ptr
+ data = bitfile_bytes[ptr:ptr+length]
+ return (header, data)
+ else:
+ raise Exception('Bitfile header validation failed!')
+
+# Flip 32-bit endianess
+def flip32(data):
+ sl = struct.Struct('<I')
+ sb = struct.Struct('>I')
+ b = buffer(data)
+ d = bytearray(len(data))
+ for offset in xrange(0, len(data), 4):
+ sb.pack_into(d, offset, sl.unpack_from(b, offset)[0])
+ return d
+
+def main():
+ args = get_options();
+ with open(args.bitfile, 'rb') as bit_file:
+ # Parse bytes into a header map and data buffer
+ (header, data) = parse_bitfile(bit_file.read())
+ # Print bitfile info
+ if args.info:
+ m = re.search('(.+);UserID=(.+);COMPRESS=(.+);Version=(.+)', header['design_name'])
+ if m:
+ print 'Design Name: ' + m.group(1)
+ print 'User ID: ' + m.group(2)
+ print 'Compression: ' + m.group(3)
+ print 'Vivado Version: ' + m.group(4)
+ else:
+ print 'Design Name: ' + header['design_name']
+ print 'Part Name: ' + header['part_name']
+ print 'Datestamp: ' + header['date'] + ' ' + header['time']
+ print 'Bitstream Size: ' + str(header['bitstream_len'])
+ # Write a bin file
+ if args.bin_out:
+ open(args.bin_out, 'wb').write(flip32(data) if args.flip else data)
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fpga/usrp3/tools/utils/README.md b/fpga/usrp3/tools/utils/README.md
new file mode 100644
index 000000000..fe02a417c
--- /dev/null
+++ b/fpga/usrp3/tools/utils/README.md
@@ -0,0 +1,4 @@
+Packaging FPGA (and other) images
+=================================
+
+Run `package_images.py` from the directory where the images are stored.
diff --git a/fpga/usrp3/tools/utils/gen_xdc_from_rinf.py b/fpga/usrp3/tools/utils/gen_xdc_from_rinf.py
new file mode 100755
index 000000000..469c4336a
--- /dev/null
+++ b/fpga/usrp3/tools/utils/gen_xdc_from_rinf.py
@@ -0,0 +1,334 @@
+#! /usr/bin/python
+
+import sys, os
+import collections
+import argparse
+import re
+
+#------------------------------------------------------------
+# Types
+#------------------------------------------------------------
+# Terminal definiion for each reference designator
+terminal_t = collections.namedtuple('terminal_t', 'name pin')
+# FPGA pin definiion
+fpga_pin_t = collections.namedtuple('fpga_pin_t', 'name loc iotype bank')
+
+# A (ref designator -> terminals) map
+# For each reference designator, this class maintains a list of all terminals
+# including names and pin locations. It also maintains a reverse mapping for all
+# terminal names to reference designator
+class terminal_db_t:
+ def __init__(self):
+ self.db = dict()
+ self.rev_db = dict()
+
+ def add(self, ref_des, net_name, pin_name):
+ if self.db.has_key(ref_des):
+ self.db[ref_des].append(terminal_t(net_name, pin_name))
+ else:
+ self.db[ref_des] = [terminal_t(net_name, pin_name)]
+ if self.rev_db.has_key(net_name):
+ self.rev_db[net_name].append(ref_des)
+ else:
+ self.rev_db[net_name] = [ref_des]
+
+ def get_terminals(self, ref_des):
+ return self.db[ref_des]
+
+ def lookup_endpoints(self, net_name):
+ return self.rev_db[net_name]
+
+# A (component -> properties) map
+# For each component, this class maintains all properties that are
+# listed in the RINF file
+class component_db_t:
+ def __init__(self):
+ self.db = dict()
+
+ def add_comp(self, ref_des, name):
+ self.db[ref_des] = {'Name':name}
+
+ def add_attr(self, ref_des, prop, value):
+ self.db[ref_des][prop] = value
+
+ def exists(self, comp_name):
+ return self.db.has_key(comp_name)
+
+ def lookup(self, comp_name):
+ return self.db[comp_name]
+
+ def attr_exists(self, comp_name, attr_name):
+ return self.exists(comp_name) and self.db[comp_name].has_key(attr_name)
+
+ def get_attr(self, comp_name, attr_name):
+ return self.db[comp_name][attr_name]
+
+# An FPGA (pin location -> properties) map
+# For each FPGA pin location, this class maintains a list of various pin properties
+# Also maintans all the IO Types to aid in filtering
+class fpga_pin_db_t:
+ def __init__(self, pkg_file, io_exclusions = []):
+ print 'INFO: Parsing Xilinx Package File ' + pkg_file + '...'
+ header = ['Pin','Pin Name','Memory Byte Group','Bank','VCCAUX Group','Super Logic Region','I/O Type','No-Connect']
+ self.pindb = dict()
+ self.iodb = set()
+ with open(pkg_file, 'r') as pkg_f:
+ for line in iter(pkg_f.readlines()):
+ tokens = collapse_tokens(line.strip().split(' '))
+ if len(tokens) == 8:
+ if tokens != header:
+ pin_info = dict()
+ for col in range(1,len(header)):
+ pin_info[header[col].strip()] = tokens[col].strip()
+ self.pindb[tokens[0].strip()] = pin_info
+ self.iodb.add(pin_info['I/O Type'])
+ if len(self.pindb.keys()) == 0 or len(self.iodb) == 0:
+ print 'ERROR: Could not parse Xilinx package file ' + pkg_file
+ sys.exit(1)
+
+ print 'INFO: * Found IO types: ' + ', '.join(self.iodb)
+ self.iodb.remove('NA')
+ for io in io_exclusions:
+ if io:
+ self.iodb.remove(io.rstrip().lstrip())
+ print 'INFO: * Using IO types: ' + ', '.join(self.iodb)
+
+ def iface_pins(self):
+ iface_pins = set()
+ for pin in self.pindb.keys():
+ if self.pindb[pin]['I/O Type'] in self.iodb:
+ iface_pins.add(pin)
+ return iface_pins
+
+ def is_iface_pin(self, pin):
+ return (self.pindb.has_key(pin)) and (self.pindb[pin]['I/O Type'] in self.iodb)
+
+ def get_pin_attr(self, pin, attr):
+ return self.pindb[pin][attr]
+
+#------------------------------------------------------------
+# Helper functions
+#------------------------------------------------------------
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Generate a template IO location XDC and Verilog stub from an RINF netlist and a Xilinx package file.')
+ parser.add_argument('--rinf', type=str, default=None, help='Input RINF netlist file (*.frs)')
+ parser.add_argument('--xil_pkg_file', type=str, default=None, help='Input Xilinx package pinout file (*.txt)')
+ parser.add_argument('--ref_des', type=str, default='U0', help='Reference designator for the FPGA')
+ parser.add_argument('--xdc_out', type=str, default='output.xdc', help='Output XDC file with location constraints')
+ parser.add_argument('--vstub_out', type=str, default=None, help='Output Verilog stub file with the portmap')
+ parser.add_argument('--exclude_io', type=str, default='MIO,DDR,CONFIG', help='Exlcude the specified FPGA IO types from consideration')
+ parser.add_argument('--suppress_warn', action='store_true', default=False, help='Suppress sanity check warnings')
+ parser.add_argument('--traverse_depth', type=int, default=1, help='How many linear components to traverse before finding a named net')
+ parser.add_argument('--fix_names', action='store_true', default=False, help='Fix net names when writing the XDC and Verilog')
+ args = parser.parse_args()
+ if not args.xil_pkg_file:
+ print 'ERROR: Please specify a Xilinx package file using the --xil_pkg_file option\n'
+ parser.print_help()
+ sys.exit(1)
+ if not args.rinf:
+ print 'ERROR: Please specify an input RINF file using the --rinf option\n'
+ parser.print_help()
+ sys.exit(1)
+ return args
+
+
+# Remove empty string from a token array
+def collapse_tokens(tokens):
+ retval = []
+ for tok in tokens:
+ tok = tok.rstrip().lstrip()
+ if tok:
+ retval.append(tok)
+ return retval
+
+# Parse user specified RINF file and return a terminal and component database
+def parse_rinf(rinf_path, suppress_warnings):
+ print 'INFO: Parsing RINF File ' + rinf_path + '...'
+ terminal_db = terminal_db_t()
+ component_db = component_db_t()
+ with open(rinf_path, 'r') as rinf_f:
+ net_name = '<UNDEF>'
+ state = '<UNDEF>'
+ line_num = 0
+ for line in iter(rinf_f.readlines()):
+ tokens = collapse_tokens(line.strip().split())
+ line_num = line_num + 1
+ if tokens:
+ if tokens[0].startswith('.'):
+ # State transition
+ state = tokens[0]
+ if state == '.ADD_COM':
+ component_db.add_comp(tokens[1], tokens[3])
+ elif state == '.ATT_COM':
+ component_db.add_attr(tokens[1], tokens[2].strip('"'), tokens[3].strip('"'))
+ elif state == '.ADD_TER':
+ net_name = tokens[3].strip('"')
+ terminal_db.add(tokens[1], net_name, tokens[2])
+ elif state == '.TER':
+ terminal_db.add(tokens[1], net_name, tokens[2])
+ elif state == '.END':
+ break
+ else:
+ # State continuation
+ if state == '.TER':
+ terminal_db.add(tokens[0], net_name, tokens[1])
+ else:
+ if not suppress_warnings:
+ print 'WARNING: Ignoring line continuation for ' + state + ' at line ' + str(line_num)
+ return (terminal_db, component_db)
+
+# From all the FPGA pins filter out the ones
+# relevant for creating an XDC
+def filter_fpga_pins(ref_des, terminal_db, fpga_pin_db, max_level):
+ terminals = terminal_db.get_terminals(ref_des)
+ pins = dict()
+ # Loop through all the terminals of the FPGA
+ for fpga_term in terminals:
+ term = fpga_term
+ level = 0
+ # For each net check if there is a valid (non $XXXXXX) name
+ # If yes, use it. If not, then traverse one component down and check again
+ # If the next net has a valid name, use that. One requirement for this
+ # traversal is that the downstream components must form a linear network
+ # i.e. no branching. As soon as this algorithm sees a brach, it aborts.
+ while term and term.name.startswith('$') and level < max_level:
+ level = level + 1
+ comps = terminal_db.lookup_endpoints(term.name)
+ if len(comps) == 2: #Check for branch
+ next_comp = comps[1] if comps[0] == ref_des else comps[0]
+ sec_terms = terminal_db.get_terminals(next_comp)
+ if len(sec_terms) == 2: #Check for branch
+ term = sec_terms[1] if sec_terms[0].name == term.name else sec_terms[0]
+ break
+ # At this point we either found a valid net of we reached the max_depth
+ # Check again before approving this as a valid connection
+ if term.name and (not term.name.startswith('$')) and fpga_pin_db.is_iface_pin(fpga_term.pin):
+ iotype = fpga_pin_db.get_pin_attr(fpga_term.pin, 'I/O Type')
+ bank = fpga_pin_db.get_pin_attr(fpga_term.pin, 'Bank')
+ pins[term.name] = fpga_pin_t(term.name, fpga_term.pin, iotype, bank)
+ return pins
+
+# Fix net names.
+# This function lists all the valid substitutions to make to net names
+def fix_net_name(name):
+ return re.sub(r'[\W_]', '_', name)
+
+# Write an XDC file with sanity checks and readability enhancements
+def write_output_files(xdc_path, vstub_path, fpga_pins, fix_names):
+ # Figure out the max pin name length for human readable text alignment
+ max_pin_len = reduce(lambda x,y:max(x,y), map(len, fpga_pins.keys()))
+ # Create a bus database. Collapse multi-bit buses into single entries
+ bus_db = dict()
+ for pin in sorted(fpga_pins.keys()):
+ m = re.search('([a-zA-Z0-9_()]+)\(([0-9]+)\)', pin)
+ if m:
+ bus_name = m.group(1)
+ bit_num = int(m.group(2))
+ if bus_db.has_key(bus_name):
+ bus_db[bus_name].append(bit_num)
+ else:
+ bus_db[bus_name] = [bit_num]
+ else:
+ bus_db[pin] = []
+ # Walk through the bus database and write the XDC file
+ with open(xdc_path, 'w') as xdc_f:
+ print 'INFO: Writing template XDC ' + xdc_path + '...'
+ for bus in sorted(bus_db.keys()):
+ if not re.match("[a-zA-Z].[a-zA-Z0-9_]*$", bus):
+ print ('CRITICAL WARNING: Invalid net name (bad Verilog syntax): ' + bus +
+ ('. Possibly fixed but please review.' if fix_names else '. Please review.'))
+ if bus_db[bus] == []:
+ xdc_pin = fix_net_name(bus.upper()) if fix_names else bus.upper()
+ xdc_loc = fpga_pins[bus].loc.upper().ljust(16)
+ xdc_iotype = fpga_pins[bus].iotype
+ xdc_iostd = ('<IOSTD_BANK' + fpga_pins[bus].bank + '>').ljust(16)
+ xdc_f.write('set_property PACKAGE_PIN ' + xdc_loc + (' [get_ports {' + xdc_pin + '}]').ljust(max_pin_len+16) + '\n')
+ xdc_f.write('set_property IOSTANDARD ' + xdc_iostd + ' [get_ports {' + xdc_pin + '}]\n')
+ xdc_f.write('\n')
+ else:
+ bits = sorted(bus_db[bus])
+ coherent = (bits == range(0, bits[-1]+1))
+ if not coherent:
+ print 'CRITICAL WARNING: Incoherent bus: ' + bus + '. Some bits may be missing. Please review.'
+ for bit in bits:
+ bus_full = bus + '(' + str(bit) + ')'
+ xdc_pin = bus.upper() + '[' + str(bit) + ']'
+ xdc_loc = fpga_pins[bus_full].loc.upper().ljust(16)
+ xdc_iotype = fpga_pins[bus_full].iotype
+ xdc_iostd = ('<IOSTD_BANK' + fpga_pins[bus_full].bank + '>').ljust(16)
+ xdc_f.write('set_property PACKAGE_PIN ' + xdc_loc + (' [get_ports {' + xdc_pin + '}]').ljust(max_pin_len+16) + '\n')
+ xdc_f.write('set_property IOSTANDARD ' + xdc_iostd + ' [get_ports {' + bus.upper() + '[*]}]\n')
+ xdc_f.write('\n')
+ # Walk through the bus database and write a stub Verilog file
+ if vstub_path:
+ with open(vstub_path, 'w') as vstub_f:
+ print 'INFO: Writing Verilog stub ' + vstub_path + '...'
+ vstub_f.write('module ' + os.path.splitext(os.path.basename(vstub_path))[0] + ' (\n')
+ i = 1
+ for bus in sorted(bus_db.keys()):
+ port_name = fix_net_name(bus.upper()) if fix_names else bus.upper()
+ port_loc = fpga_pins[bus].loc.upper() if (bus_db[bus] == []) else '<Multiple>'
+ port_dir_short = raw_input('[' + str(i) + '/' + str(len(bus_db.keys())) +'] Direction for ' + port_name + ' (' + port_loc + ')? {[i]nput,[o]utput,[b]oth}: ').lower()
+ if port_dir_short.startswith('i'):
+ port_dir = ' input '
+ elif port_dir_short.startswith('o'):
+ port_dir = ' output'
+ else:
+ port_dir = ' inout '
+
+ if bus_db[bus] == []:
+ vstub_f.write(port_dir + ' ' + port_name + ',\n')
+ else:
+ bus_def = str(sorted(bus_db[bus])[-1]) + ':0'
+ vstub_f.write(port_dir + (' [' + bus_def + '] ').ljust(10) + port_name + ',\n')
+ i = i + 1
+ vstub_f.write(');\n\nendmodule')
+
+# Report unconnected pins
+def report_unconnected_pins(fpga_pins, fpga_pin_db):
+ print 'WARNING: The following pins were not connected. Please review.'
+ # Collect all the pin locations that have been used for constrain/stub creation
+ iface_pins = set()
+ for net in fpga_pins.keys():
+ iface_pins.add(fpga_pins[net].loc)
+ # Loop through all possible pins and check if we have missed any
+ for pin in sorted(fpga_pin_db.iface_pins()):
+ if pin not in iface_pins:
+ print (' * ' + pin.ljust(6) + ': ' +
+ 'Bank = ' + str(fpga_pin_db.get_pin_attr(pin, 'Bank')).ljust(6) +
+ 'IO Type = ' + str(fpga_pin_db.get_pin_attr(pin, 'I/O Type')).ljust(10) +
+ 'Name = ' + str(fpga_pin_db.get_pin_attr(pin, 'Pin Name')).ljust(10))
+
+#------------------------------------------------------------
+# Main
+#------------------------------------------------------------
+def main():
+ args = get_options();
+ # Build FPGA pin database using Xilinx package file
+ fpga_pin_db = fpga_pin_db_t(args.xil_pkg_file, args.exclude_io.split(','))
+ # Parse RINF netlist
+ (terminal_db, component_db) = parse_rinf(args.rinf, args.suppress_warn)
+ # Look for desired reference designator and print some info about it
+ print 'INFO: Resolving reference designator ' + args.ref_des + '...'
+ if not component_db.exists(args.ref_des):
+ print 'ERROR: Reference designator not found in the netlist'
+ sys.exit(1)
+ fpga_info = component_db.lookup(args.ref_des)
+ print 'INFO: * Name = ' + fpga_info['Name']
+ print 'INFO: * Description = ' + fpga_info['Description']
+ # Build a list of all FPGA interface pins in the netlist
+ fpga_pins = filter_fpga_pins(args.ref_des, terminal_db, fpga_pin_db, args.traverse_depth)
+ if not fpga_pins:
+ print 'ERROR: Could not cross-reference pins for ' + args.ref_des + ' with FPGA device. Are you sure it is an FPGA?'
+ sys.exit(1)
+ # Write output XDC and Verilog
+ write_output_files(args.xdc_out, args.vstub_out, fpga_pins, args.fix_names)
+ print 'INFO: Output file(s) generated successfully!'
+ # Generate a report of all unconnected pins
+ if not args.suppress_warn:
+ report_unconnected_pins(fpga_pins, fpga_pin_db)
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/utils/image_package_mapping.py b/fpga/usrp3/tools/utils/image_package_mapping.py
new file mode 100644
index 000000000..da76dc989
--- /dev/null
+++ b/fpga/usrp3/tools/utils/image_package_mapping.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 Ettus Research, a National Instruments Company
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+"""
+Container for the list of image package targets, and the information about them
+"""
+PACKAGE_MAPPING = {
+ "e310_sg1": {
+ "type": "e3xx",
+ "package_name": "e3xx_e310_sg1_fpga_default-g{}.zip",
+ "files": ['usrp_e310_sg1_fpga.bit',
+ 'usrp_e310_sg1_fpga.bit.md5',
+ 'usrp_e310_sg1_fpga.dts',
+ 'usrp_e310_sg1_fpga.dts.md5',
+ 'usrp_e310_sg1_fpga.rpt',
+ 'usrp_e310_sg1_idle_fpga.bit',
+ 'usrp_e310_sg1_idle_fpga.bit.md5',
+ 'usrp_e310_sg1_idle_fpga.dts',
+ 'usrp_e310_sg1_idle_fpga.dts.md5',
+ 'usrp_e310_sg1_idle_fpga.rpt']
+ },
+ "e310_sg3": {
+ "type": "e3xx",
+ "package_name": "e3xx_e310_sg3_fpga_default-g{}.zip",
+ "files": ['usrp_e310_sg3_fpga.bit',
+ 'usrp_e310_sg3_fpga.bit.md5',
+ 'usrp_e310_sg3_fpga.dts',
+ 'usrp_e310_sg3_fpga.dts.md5',
+ 'usrp_e310_sg3_fpga.rpt',
+ 'usrp_e310_sg3_idle_fpga.bit',
+ 'usrp_e310_sg3_idle_fpga.bit.md5',
+ 'usrp_e310_sg3_idle_fpga.dts',
+ 'usrp_e310_sg3_idle_fpga.dts.md5',
+ 'usrp_e310_sg3_idle_fpga.rpt']
+ },
+ "e320": {
+ "type": "e3xx",
+ "package_name": "e3xx_e320_fpga_default-g{}.zip",
+ "files": ['usrp_e320_fpga_1G.bit',
+ 'usrp_e320_fpga_1G.bit.md5',
+ 'usrp_e320_fpga_1G.dts',
+ 'usrp_e320_fpga_1G.dts.md5',
+ 'usrp_e320_fpga_1G.rpt',
+ 'usrp_e320_fpga_XG.bit',
+ 'usrp_e320_fpga_XG.bit.md5',
+ 'usrp_e320_fpga_XG.dts',
+ 'usrp_e320_fpga_XG.dts.md5',
+ 'usrp_e320_fpga_XG.rpt',
+ 'usrp_e320_fpga_AA.bit',
+ 'usrp_e320_fpga_AA.bit.md5',
+ 'usrp_e320_fpga_AA.dts',
+ 'usrp_e320_fpga_AA.dts.md5',
+ 'usrp_e320_fpga_AA.rpt']
+ },
+ "x300": {
+ "type": "x3xx",
+ "package_name": "x3xx_x300_fpga_default-g{}.zip",
+ "files": ["usrp_x300_fpga_HG.lvbitx",
+ "usrp_x300_fpga_XG.lvbitx",
+ "usrp_x300_fpga_HG.bit",
+ "usrp_x300_fpga_HG.rpt",
+ "usrp_x300_fpga_XG.bit",
+ "usrp_x300_fpga_XG.rpt"]
+ },
+ "x310": {
+ "type": "x3xx",
+ "package_name": "x3xx_x310_fpga_default-g{}.zip",
+ "files": ["usrp_x310_fpga_HG.lvbitx",
+ "usrp_x310_fpga_XG.lvbitx",
+ "usrp_x310_fpga_HG.bit",
+ "usrp_x310_fpga_HG.rpt",
+ "usrp_x310_fpga_XG.bit",
+ "usrp_x310_fpga_XG.rpt"]
+ },
+ "n310": {
+ "type": "n3xx",
+ "package_name": "n3xx_n310_fpga_default-g{}.zip",
+ "files": ['usrp_n310_fpga_HG.bit',
+ 'usrp_n310_fpga_HG.bit.md5',
+ 'usrp_n310_fpga_HG.dts',
+ 'usrp_n310_fpga_HG.dts.md5',
+ 'usrp_n310_fpga_HG.rpt',
+ 'usrp_n310_fpga_XG.bit',
+ 'usrp_n310_fpga_XG.bit.md5',
+ 'usrp_n310_fpga_XG.dts',
+ 'usrp_n310_fpga_XG.dts.md5',
+ 'usrp_n310_fpga_XG.rpt',
+ 'usrp_n310_fpga_WX.bit',
+ 'usrp_n310_fpga_WX.bit.md5',
+ 'usrp_n310_fpga_WX.dts',
+ 'usrp_n310_fpga_WX.dts.md5',
+ 'usrp_n310_fpga_WX.rpt',
+ 'usrp_n310_fpga_AA.bit',
+ 'usrp_n310_fpga_AA.bit.md5',
+ 'usrp_n310_fpga_AA.dts',
+ 'usrp_n310_fpga_AA.dts.md5',
+ 'usrp_n310_fpga_AA.rpt'],
+ },
+ "n300": {
+ "type": "n3xx",
+ "package_name": "n3xx_n300_fpga_default-g{}.zip",
+ "files": ['usrp_n300_fpga_HG.bit',
+ 'usrp_n300_fpga_HG.bit.md5',
+ 'usrp_n300_fpga_HG.dts',
+ 'usrp_n300_fpga_HG.dts.md5',
+ 'usrp_n300_fpga_HG.rpt',
+ 'usrp_n300_fpga_XG.bit',
+ 'usrp_n300_fpga_XG.bit.md5',
+ 'usrp_n300_fpga_XG.dts',
+ 'usrp_n300_fpga_XG.dts.md5',
+ 'usrp_n300_fpga_XG.rpt',
+ 'usrp_n300_fpga_WX.bit',
+ 'usrp_n300_fpga_WX.bit.md5',
+ 'usrp_n300_fpga_WX.dts',
+ 'usrp_n300_fpga_WX.dts.md5',
+ 'usrp_n300_fpga_WX.rpt',
+ 'usrp_n300_fpga_AA.bit',
+ 'usrp_n300_fpga_AA.bit.md5',
+ 'usrp_n300_fpga_AA.dts',
+ 'usrp_n300_fpga_AA.dts.md5',
+ 'usrp_n300_fpga_AA.rpt'],
+ },
+ "n320": {
+ "type": "n3xx",
+ "package_name": "n3xx_n320_fpga_default-g{}.zip",
+ "files": ['usrp_n320_fpga_HG.bit',
+ 'usrp_n320_fpga_HG.bit.md5',
+ 'usrp_n320_fpga_HG.dts',
+ 'usrp_n320_fpga_HG.dts.md5',
+ 'usrp_n320_fpga_HG.rpt',
+ 'usrp_n320_fpga_XG.bit',
+ 'usrp_n320_fpga_XG.bit.md5',
+ 'usrp_n320_fpga_XG.dts',
+ 'usrp_n320_fpga_XG.dts.md5',
+ 'usrp_n320_fpga_XG.rpt',
+ 'usrp_n320_fpga_XQ.bit',
+ 'usrp_n320_fpga_XQ.bit.md5',
+ 'usrp_n320_fpga_XQ.dts',
+ 'usrp_n320_fpga_XQ.dts.md5',
+ 'usrp_n320_fpga_XQ.rpt',
+ 'usrp_n320_fpga_WX.bit',
+ 'usrp_n320_fpga_WX.bit.md5',
+ 'usrp_n320_fpga_WX.dts',
+ 'usrp_n320_fpga_WX.dts.md5',
+ 'usrp_n320_fpga_WX.rpt',
+ 'usrp_n320_fpga_AQ.bit',
+ 'usrp_n320_fpga_AQ.bit.md5',
+ 'usrp_n320_fpga_AQ.dts',
+ 'usrp_n320_fpga_AQ.dts.md5',
+ 'usrp_n320_fpga_AQ.rpt',],
+ },
+ "n320_cpld": {
+ "type": "n3xx",
+ "package_name": "n3xx_n320_cpld_default-g{}.zip",
+ "files": ['usrp_n320_rh_cpld.svf']
+ },
+ "n310_cpld": {
+ "type": "n3xx",
+ "package_name": "n3xx_n310_cpld_default-g{}.zip",
+ "files": ['usrp_n310_mg_cpld.svf']
+ },
+ 'n200': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_n200_fpga_default-g{}.zip',
+ 'files': ["usrp_n200_r2_fpga.bin",
+ "usrp_n200_r3_fpga.bin",
+ "usrp_n200_r4_fpga.bin",
+ "bit/usrp_n200_r3_fpga.bit",
+ "bit/usrp_n200_r4_fpga.bit"],
+ },
+ 'n210': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_n210_fpga_default-g{}.zip',
+ 'files': ["usrp_n210_r2_fpga.bin",
+ "usrp_n210_r3_fpga.bin",
+ "usrp_n210_r4_fpga.bin",
+ "bit/usrp_n210_r3_fpga.bit",
+ "bit/usrp_n210_r4_fpga.bit"],
+ },
+ 'n200_fw': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_n200_fw_default-g{}.zip',
+ 'files': ["usrp_n200_fw.bin"],
+ },
+ 'n210_fw': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_n210_fw_default-g{}.zip',
+ 'files': ["usrp_n210_fw.bin"],
+ },
+ 'usrp2': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_usrp2_fpga_default-g{}.zip',
+ 'files': ["usrp2_fpga.bin"],
+ },
+ 'usrp2_fw': {
+ 'type': 'usrp2',
+ 'package_name': 'usrp2_usrp2_fw_default-g{}.zip',
+ 'files': ["usrp2_fw.bin"],
+ },
+ 'b200': {
+ 'type': 'b2xx',
+ 'package_name': 'b2xx_b200_fpga_default-g{}.zip',
+ 'files': ["usrp_b200_fpga.bin"],
+ },
+ 'b200mini': {
+ 'type': 'b2xx',
+ 'package_name': 'b2xx_b200mini_fpga_default-g{}.zip',
+ 'files': ["usrp_b200mini_fpga.bin"],
+ },
+ 'b205mini': {
+ 'type': 'b2xx',
+ 'package_name': 'b2xx_b205mini_fpga_default-g{}.zip',
+ 'files': ["usrp_b205mini_fpga.bin"],
+ },
+ 'b210': {
+ 'type': 'b2xx',
+ 'package_name': 'b2xx_b210_fpga_default-g{}.zip',
+ 'files': ["usrp_b210_fpga.bin"],
+ },
+ 'b2xx_fw': {
+ 'type': 'b2xx',
+ 'package_name': 'b2xx_common_fw_default-g{}.zip',
+ 'files': ["usrp_b200_fw.hex",
+ "usrp_b200_bl.img"],
+ },
+ 'n230': {
+ 'type': 'n230',
+ 'package_name': 'n230_n230_fpga_default-g{}.zip',
+ 'files': ["usrp_n230_fpga.bin",
+ "usrp_n230_fpga.bit",
+ "usrp_n230_fpga.rpt"],
+ },
+ 'b100': {
+ 'type': 'usrp1',
+ 'package_name': 'usrp1_b100_fpga_default-g{}.zip',
+ 'files': ["usrp_b100_fpga_2rx.bin",
+ "usrp_b100_fpga.bin"],
+ },
+ 'b100_fw': {
+ 'type': 'usrp1',
+ 'package_name': 'usrp1_b100_fw_default-g{}.zip',
+ 'files': ["usrp_b100_fw.ihx"],
+ },
+ 'usrp1': {
+ 'type': 'usrp1',
+ 'package_name': 'usrp1_usrp1_fpga_default-g{}.zip',
+ 'files': ["usrp1_fpga_4rx.rbf",
+ "usrp1_fpga.rbf",
+ "usrp1_fw.ihx"],
+ },
+ 'octoclock': {
+ 'type': 'octoclock',
+ 'package_name': 'octoclock_octoclock_fw_default-g{}.zip',
+ 'files': ["octoclock_bootloader.hex",
+ "octoclock_r4_fw.hex"],
+ },
+ 'winusb_drv': {
+ 'type': 'usb',
+ 'package_name': 'usb_common_windrv_default-g{}.zip',
+ 'files': ["winusb_driver/",
+ "winusb_driver/erllc_uhd_b205mini.inf",
+ "winusb_driver/erllc_uhd_b100.inf",
+ "winusb_driver/erllc_uhd_b200_reinit.inf",
+ "winusb_driver/erllc_uhd_b200mini.inf",
+ "winusb_driver/erllc_uhd_b200.inf",
+ "winusb_driver/amd64/",
+ "winusb_driver/amd64/WdfCoInstaller01009.dll",
+ "winusb_driver/amd64/winusbcoinstaller2.dll",
+ "winusb_driver/x86/",
+ "winusb_driver/x86/WdfCoInstaller01009.dll",
+ "winusb_driver/x86/winusbcoinstaller2.dll",
+ "winusb_driver/erllc_uhd_usrp1.inf",
+ "winusb_driver/erllc_uhd_makecat.cdf",
+ "winusb_driver/erllc_uhd.cat"],
+ },
+}
diff --git a/fpga/usrp3/tools/utils/package_images.py b/fpga/usrp3/tools/utils/package_images.py
new file mode 100755
index 000000000..d50760b64
--- /dev/null
+++ b/fpga/usrp3/tools/utils/package_images.py
@@ -0,0 +1,369 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 Ettus Research, a National Instruments Company
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+"""
+Package image files into image archive packages
+
+Provides functions for packaging image files into image packages. Generate the intermediate files
+(like hash files), and create image archives from sets.
+"""
+from __future__ import print_function
+import argparse
+import copy
+import glob
+import hashlib
+import itertools
+import os
+import re
+import sys
+import tempfile
+import zipfile
+from image_package_mapping import PACKAGE_MAPPING
+
+
+def parse_args():
+ """Setup argument parser and parse"""
+ description = """UHD Image Packaging
+
+ Packages the contents of the current directory into archives within a directory structure that
+ matches the Ettus fileserver. It also produces files containing the MD5 checksums of all image
+ files, as well as a file containing the SHA256 checksums of all archive files created.
+
+ The script will also modify a manifest file with the information from the generated image
+ packages. That is, the repositories, Git hashes, and SHA256 checksums listed in the manifest
+ will be updated.
+
+ The script will run without any commandline arguments provided. However, some useful (crucial,
+ even) information will be lacking. The suggested usage is to invoke the following command from
+ the directory containing the image files
+
+ `python package_images.py --manifest /path/to/manifest --githash <REPO>-<GITHASH>`
+
+ where REPO is the repository used to create the images (ie 'fpga'), and GITHASH is the Git
+ hash of that repository used to create the images. When in doubt, please check with previous
+ image package listed in the manifest.
+ """
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
+ description=description)
+ parser.add_argument('--md5', action="store_true", default=False,
+ help="Generate MD5 files")
+ parser.add_argument('--sha256', action="store_true", default=False,
+ help="Generate SHA256 files")
+ parser.add_argument('-f', '--files', type=str, default="",
+ help="Comma separate list of files")
+ parser.add_argument('-o', '--output', type=str, default="",
+ help="Output file to put the hashes in")
+ parser.add_argument('-m', '--manifest', type=str, default="",
+ help="Update the manifest file at this path with the new SHAs")
+ parser.add_argument('-t', '--targets', type=str, default="",
+ help="RegEx to select image sets from the manifest file.")
+ parser.add_argument('-g', '--githash', type=str, default="",
+ help="Git hash directory name (eg. fpga-abc1234)")
+ return parser.parse_args()
+
+
+def gen_filelist(includes, excludes=None):
+ """
+ Generates a list of files, first generating
+ :param includes: [list of] expression[s] to include
+ :param excludes: [list of] expression[s] to exclude
+ :return: flat list of filenames
+ """
+ if isinstance(includes, str):
+ included = glob.glob(includes)
+ else:
+ included = list(itertools.chain(*[glob.iglob(filename) for filename in includes]))
+
+ if excludes is None:
+ excluded = []
+ elif isinstance(excludes, str):
+ excluded = glob.glob(excludes)
+ else:
+ excluded = list(itertools.chain(*[glob.iglob(filename) for filename in excludes]))
+ # Remove the excluded files from the include list
+ for filename in excluded:
+ if filename in included:
+ included.remove(filename)
+ return included
+
+
+def gen_md5(files_list, hash_filename=""):
+ """Generate the .md5 files for all input files"""
+ hashes = {}
+ for filename in files_list:
+ # Read and hash the input file
+ with open(filename, 'rb') as img_file:
+ md5_sum = hashlib.md5()
+ md5_sum.update(img_file.read())
+ # Write the hash to a *.md5 file
+ with open(filename + '.md5', 'w') as md5_file:
+ md5_hex = md5_sum.hexdigest()
+ newline = "{md5_hex} {filename}\n".format(filename=filename, md5_hex=md5_hex)
+ md5_file.write(newline)
+ # Also store it to write to a file of all the hashes
+ hashes[filename] = md5_hex
+
+ # Write the MD5 hashes to file
+ with open(hash_filename, 'a') as hash_file:
+ for filename, md5_hex in hashes.items():
+ newline = "{md5_hex} {filename}\n".format(filename=filename, md5_hex=md5_hex)
+ hash_file.write(newline)
+
+
+def gen_sha256(files_list, hash_filename=None, manifest_fn="", repo_and_hash=""):
+ """Generate the SHA256 files for all input file"""
+ # Input checking
+ if hash_filename is None:
+ hash_filename = "hashes.txt"
+ print("Generating SHA256 sums for:\n{}".format(
+ "\n".join("--{}".format(sha_fn) for sha_fn in files_list)))
+
+ # Make a dictionary to store the new SHA256 sums
+ sha256_dict = {}
+ with open(hash_filename, 'a') as hash_file:
+ for filename in files_list:
+ with open(filename, 'rb') as img_file:
+ sha256_sum = hashlib.sha256()
+ sha256_sum.update(img_file.read())
+ sha256_str = sha256_sum.hexdigest()
+ newline = "{sha_hex} {filename}\n".format(filename=filename,
+ sha_hex=sha256_str)
+ hash_file.write(newline)
+ # Add the sha256 to the dictionary
+ basename = os.path.basename(filename)
+ sha256_dict[basename] = sha256_str
+
+ # If there's a manifest file to edit, put the new information in
+ if os.path.isfile(manifest_fn):
+ edit_manifest(manifest_fn, repo_and_hash, sha256_dict)
+
+
+def gen_zip(zip_filename, files_list):
+ """Generate the zip file for a set of images"""
+ try:
+ with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zip_file:
+ for filename in files_list:
+ zip_file.write(filename)
+ return True
+ except Exception as ex:
+ print("Caught exception in gen_zip: {}".format(ex))
+ return False
+
+
+def do_gen_package(pkg_target, install_dir="", repo_and_hash=""):
+ """Generate the entire N3XX image package, from the start to the end"""
+ print("---Generating package for {}---".format(pkg_target))
+ filelist = PACKAGE_MAPPING[pkg_target]['files']
+ print("Required files:\n{}".format(
+ "\n".join("--{}".format(img_fn) for img_fn in filelist)))
+ md5_files = gen_filelist(includes=filelist, excludes=["*.rpt", "*.md5"])
+ print("Files to md5sum:\n{}".format(
+ "\n".join("--{}".format(md5_fn) for md5_fn in md5_files)))
+ gen_md5(md5_files, "md5_hashes.txt")
+
+ # Determine the current Git hash (w/o the repository)
+ githash_l = re.findall(r"[\d\w]+-([\d\w]{7,8})", repo_and_hash)
+ githash = githash_l[0] if githash_l else ""
+
+ zip_files = gen_filelist(includes=filelist)
+ zip_filename = os.path.join(install_dir, PACKAGE_MAPPING[pkg_target]['package_name'])\
+ .format(githash)
+ print("Files to zip:\n{}".format(
+ "\n".join("--{}".format(zip_fn) for zip_fn in zip_files)))
+ if not gen_zip(zip_filename, zip_files):
+ zip_filename = ""
+ return zip_filename
+
+
+def gen_package(pkg_targets=(), repo_and_hash="", manifest_fn=""):
+ """Generate the entire image package, and place it in the proper directory structure"""
+ # Make the cache/ directory if necessary
+ cache_path = os.path.join(os.getcwd(), "cache")
+ if not os.path.isdir(cache_path):
+ os.mkdir(cache_path)
+
+ sha_filenames = []
+ for pkg_target in pkg_targets:
+ if pkg_target in PACKAGE_MAPPING:
+ # Make the type directory
+ pkg_type = PACKAGE_MAPPING[pkg_target]["type"]
+ type_path = os.path.join(cache_path, pkg_type)
+ if not os.path.isdir(type_path):
+ os.mkdir(type_path)
+ # Make the 'repository-hash' directory
+ if not repo_and_hash:
+ repo_and_hash = "repo-githash"
+ git_path = os.path.join(type_path, repo_and_hash)
+ if not os.path.isdir(git_path):
+ os.mkdir(git_path)
+
+ # Generate the package and add the the zip filename to the SHA list
+ sha_filenames.append(do_gen_package(pkg_target,
+ install_dir=git_path,
+ repo_and_hash=repo_and_hash))
+ else:
+ print("Error: Specify a supported type from {}".format(
+ list(PACKAGE_MAPPING.keys())))
+ sha_filenames[:] = [sha_fn for sha_fn in sha_filenames if os.path.exists(sha_fn)]
+ gen_sha256(sha_filenames, hash_filename="hashes.txt",
+ manifest_fn=manifest_fn, repo_and_hash=repo_and_hash)
+ # Return the zipfiles we've created
+ return sha_filenames
+
+
+def list_differences(list1, list2):
+ """Returns two lists containing the unique elements of each input list"""
+ outlist1 = []
+ outlist2 = []
+ outlist1[:] = [elem for elem in list1 if elem not in list2]
+ outlist2[:] = [elem for elem in list2 if elem not in list1]
+ return outlist1, outlist2
+
+
+def get_target_name(zip_filename):
+ """Return the package target that created the given zip_filename"""
+ for target, target_info in PACKAGE_MAPPING.items():
+ # First we need to strip the Git hash out of the filename
+ githash = re.findall(r"-g([\d\w]{7,8})", zip_filename)[0]
+ stripped_filename = os.path.basename(zip_filename.replace(githash, "{}"))
+ if stripped_filename == target_info.get("package_name", ""):
+ return target
+ # If it doesn't match any targets
+ return ""
+
+
+def verify_package(zip_filename):
+ """Verify the contents of the image package match the expected list of files"""
+ # First, determine which target this was built for
+ pkg_target = get_target_name(os.path.split(zip_filename)[1])
+ if not pkg_target:
+ print("Error: Could not determine package from filename {}"
+ .format(zip_filename), file=sys.stderr)
+ return False
+
+ expected_filelist = PACKAGE_MAPPING[pkg_target]['files']
+ with zipfile.ZipFile(zip_filename, 'r') as zip_file:
+ actual_filelist = zip_file.namelist()
+
+ missing, extra = list_differences(expected_filelist, actual_filelist)
+ if missing or extra:
+ print("Error: image package does not include expected files ({})".format(pkg_target),
+ file=sys.stderr)
+ if missing:
+ print("Missing files: {}".format(missing), file=sys.stderr)
+ if extra:
+ print("Extra files: {}".format(extra), file=sys.stderr)
+ return False
+ return True
+
+
+def edit_manifest_line(line, new_repo_and_hash, new_hashes_dict):
+ """Edit the line in the manifest to (maybe) include the new repo, git hash, and SHA"""
+ # Check each value in your dictionary of new hashes
+ for filename, new_hash in new_hashes_dict.items():
+ # If the filename with a new hash shows up in the line
+ # Note: the filename has a Git hash in it, so we need to peel that off first
+ full_filename_matches = re.findall(r"([\d\w]+)-g([\da-fA-F]{7,8})", filename)
+ if full_filename_matches:
+ # We don't really need to store the Git hash in the found filename
+ stripped_filename, _ = full_filename_matches[0]
+ else:
+ return line
+
+ if stripped_filename in line:
+ # Replace the repo and git hash
+ old_repo_and_hash_matches = re.findall(r"([\w]+)-([\da-fA-F]{7,8})", line)
+ if old_repo_and_hash_matches:
+ # If we did find a repo and Git hash on this line, replace them
+ old_repo, old_githash = old_repo_and_hash_matches[0]
+ old_repo_and_hash = "{}-{}".format(old_repo, old_githash)
+ # We need to replace all instances <REPO>-<GITHASH> in this line
+ line = line.replace(old_repo_and_hash, new_repo_and_hash)
+ # We also need to replace -g<GITHASH> in the filename
+ # Find the new repo and githash
+ _, new_githash = re.findall(r"([\w]+)-([\da-fA-F]{7,8})", new_repo_and_hash)[0]
+ line = line.replace(old_githash, new_githash)
+
+ # Replace the SHA256
+ sha = re.findall(r"[\da-fA-F]{64}", line)
+ if sha:
+ sha = sha[0]
+ line = line.replace(sha, new_hash)
+
+ if not old_repo_and_hash_matches or not sha:
+ print("Error: repo, hash or SHA missing in line with new file")
+ print("Line: {}", format(line))
+ # If we found and replaced info, return the edited line
+ return line
+ # If we never edit the line, just return it
+ return line
+
+
+def edit_manifest(manifest_fn, new_repo_and_hash, new_hash_dict):
+ """Edit the provided manifest file to update the githash and SHA256"""
+ with tempfile.NamedTemporaryFile(mode='w', dir='.', delete=False) as tmp_manifest, \
+ open(manifest_fn, 'r') as old_manifest:
+ print("Trying to edit manifest with new repo and Git hash {}".format(new_repo_and_hash))
+ # Check each line in the manifest file
+ for line in old_manifest:
+ # If needed, put the new info in the line
+ line = edit_manifest_line(line, new_repo_and_hash, new_hash_dict)
+ # Always write the line back
+ tmp_manifest.write(line)
+ # Replace the manifest file with our temporary file that we created
+ os.rename(tmp_manifest.name, manifest_fn)
+
+
+def determine_targets():
+ """
+ Determine which image packages can be created by the files in the current directory
+ :return: list of valid targets
+ """
+ found_targets = []
+ for target, target_info in PACKAGE_MAPPING.items():
+ # Grab the list of files required, but remove any files that we're going to build here,
+ # like the hash files
+ required_files = copy.deepcopy(target_info['files'])
+ required_files[:] = [filename for filename in required_files if '.md5' not in filename]
+
+ check_required_files = [os.path.exists(img_file) for img_file in required_files]
+ if all(check_required_files):
+ found_targets.append(target)
+ elif any(check_required_files):
+ print("Not all package contents present for {}".format(target),
+ file=sys.stderr)
+ return found_targets
+
+
+def main():
+ """Generate image packages using commandline arguments"""
+ args = parse_args()
+ if args.md5 or args.sha256 or args.files or args.output:
+ print("Unsupported argument: only --pkg_targets is currently supported.")
+ # Check the provided Git hash
+ if not args.githash:
+ print("Please provide --githash `<REPO>-<GITHASH>'")
+ return False
+ elif not re.findall(r"[\d\w]+-[\d\w]{7,8}", args.githash):
+ print("--githash does not match expected form. Should be `<REPO>-<GITHASH>'")
+ return False
+
+ if args.targets:
+ pkg_targets = [ss.strip() for ss in args.targets.split(',')]
+ else:
+ pkg_targets = determine_targets()
+ print("Targets to package:\n{}".format(
+ "\n".join("--{}".format(pkg) for pkg in pkg_targets)))
+
+ zip_filenames = gen_package(pkg_targets=pkg_targets,
+ repo_and_hash=args.githash,
+ manifest_fn=args.manifest)
+ check_zips = [verify_package(zip_filename) for zip_filename in zip_filenames]
+ return all(check_zips)
+
+
+if __name__ == "__main__":
+ sys.exit(not main())
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/.gitignore b/fpga/usrp3/tools/utils/rfnoc-system-sim/.gitignore
new file mode 100644
index 000000000..0d20b6487
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/README b/fpga/usrp3/tools/utils/rfnoc-system-sim/README
new file mode 100644
index 000000000..514e9e43b
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/README
@@ -0,0 +1,6 @@
+Dependencies:
+- python2
+- graphviz
+- python-graphviz
+- python-numpy
+- python-matplotlib
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/colosseum_models.py b/fpga/usrp3/tools/utils/rfnoc-system-sim/colosseum_models.py
new file mode 100755
index 000000000..f13b1b194
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/colosseum_models.py
@@ -0,0 +1,593 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Ettus Research
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import rfnocsim
+import math
+import ni_hw_models as hw
+
+class ColGlobals():
+ BPI = 4 # Number of bytes per sample or coefficient
+ BPP = 1024 # Bytes per packet
+ MIN_SAMP_HOPS = 1 # Minimum number of hops an RX sample will take before it is used to compute a PP
+ MAX_SAMP_HOPS = 3 # Maximum number of hops an RX sample will take before it is used to compute a PP
+ MIN_PP_HOPS = 0 # Minimum number of hops a PP will take before it is used to compute a TX sample
+ MAX_PP_HOPS = 1 # Maximum number of hops a PP will take before it is used to compute a TX sample
+ ELASTIC_BUFF_FULLNESS = 0.5
+
+class PartialContribComputer(rfnocsim.Function):
+ """
+ Simulation model for function that computes the contribution of radio chans on other radio chans.
+ This function computes a NxM dot product of FFTs, one bin at a time.
+ Features:
+ - Supports computing the product in multiple cycles (for resource reuse)
+ - Supports deinterleaving data in streams (i.e. is Radio 0+1 data comes in thru the same ethernet)
+
+ Args:
+ sim_core: Simulator core object
+ name: Name of this function
+ size: Number of chans (inputs) for which contribution partial products are computed
+ fft_size: The length of the FFT in bins
+ dst_chans: Computes the contribution of the input chans on these dst_chans
+ items_per_stream: How many channels per stream can this function deinterleave?
+ ticks_per_exec: How many ticks for the function to generate a full output set
+ """
+ def __init__(self, sim_core, name, size, dst_chans, items_per_stream, app_settings):
+ ticks_per_exec = 1 # This function will run once every tick. No multi-cycle paths here.
+ rfnocsim.Function.__init__(self, sim_core, name, size, int(len(dst_chans)/items_per_stream), ticks_per_exec)
+ self.items_per_stream = items_per_stream # Each stream contains data from n radio chans
+ self.dst_chans = dst_chans # Where should the individual products go?
+ # This block has to buffer enough data to ensure
+ # sample alignment. How deep should those buffers be?
+ sync_buff_depth = (((ColGlobals.MAX_SAMP_HOPS - ColGlobals.MIN_SAMP_HOPS) *
+ hw.Bee7Fpga.IO_LN_LATENCY * float(app_settings['samp_rate'])) / ColGlobals.ELASTIC_BUFF_FULLNESS)
+
+ # Adder latency: log2(radix) adder stages + 2 pipeline flops
+ latency = math.ceil(math.log(size/len(dst_chans), 2)) + 2
+ # Synchronization latency based on buffer size
+ latency += (sync_buff_depth * ColGlobals.ELASTIC_BUFF_FULLNESS) * (self.get_tick_rate() / float(app_settings['samp_rate']))
+ # Packet alignment latency
+ latency += ColGlobals.BPP * (self.get_tick_rate() / hw.Bee7Fpga.IO_LN_BW)
+ self.estimate_resources(size*items_per_stream, len(dst_chans), app_settings, sync_buff_depth*size, latency)
+
+ def estimate_resources(self, N, M, app_settings, sync_buff_total_samps, pre_filt_latency):
+ rscrs = rfnocsim.HwRsrcs()
+
+ DSP_BLOCKS_PER_MAC = 3 # DSP blocks for a scaled complex MAC
+ MAX_DSP_RATE = 400e6 # Max clock rate for a DSP48E block
+ MAX_UNROLL_DEPTH = 2 # How many taps (or FFT bins) to compute in parallel?
+ COEFF_SETS = 1 # We need two copies of coefficients one live
+ # and one buffered for dynamic reload. If both
+ # live in BRAM, this should be 2. If the live
+ # set lives in registers, this should be 1
+
+ samp_rate = float(app_settings['samp_rate'])
+ dsp_cyc_per_samp = MAX_DSP_RATE / samp_rate
+
+ if app_settings['domain'] == 'time':
+ fir_taps = app_settings['fir_taps']
+ if (fir_taps <= dsp_cyc_per_samp):
+ unroll_factor = 1
+ dsp_rate = samp_rate * fir_taps
+ else:
+ unroll_factor = math.ceil((1.0 * fir_taps) / dsp_cyc_per_samp)
+ dsp_rate = MAX_DSP_RATE
+ if (unroll_factor > MAX_UNROLL_DEPTH):
+ raise self.SimCompError('Too many FIR coefficients! Reached loop unroll limit.')
+
+ rscrs.add('DSP', DSP_BLOCKS_PER_MAC * unroll_factor * N * M)
+ rscrs.add('BRAM_18kb', math.ceil(ColGlobals.BPI * app_settings['fir_dly_line'] / hw.Bee7Fpga.BRAM_BYTES) * N * M) # FIR delay line memory
+ rscrs.add('BRAM_18kb', math.ceil(ColGlobals.BPI * COEFF_SETS * fir_taps * unroll_factor * N * M / hw.Bee7Fpga.BRAM_BYTES)) # Coefficient storage
+
+ samp_per_tick = dsp_rate / self.get_tick_rate()
+ self.update_latency(func=pre_filt_latency + (fir_taps / (samp_per_tick * unroll_factor)))
+ else:
+ fft_size = app_settings['fft_size']
+ rscrs.add('DSP', DSP_BLOCKS_PER_MAC * N * M * MAX_UNROLL_DEPTH) # MACs
+ rscrs.add('BRAM_18kb', math.ceil(ColGlobals.BPI * N * M * fft_size * COEFF_SETS / hw.Bee7Fpga.BRAM_BYTES)) # Coeff storage
+
+ samp_per_tick = MAX_DSP_RATE / self.get_tick_rate()
+ self.update_latency(func=pre_filt_latency + (fft_size / samp_per_tick))
+
+ rscrs.add('BRAM_18kb', math.ceil(ColGlobals.BPI * sync_buff_total_samps / hw.Bee7Fpga.BRAM_BYTES))
+ self.update_rsrcs(rscrs)
+
+ def do_func(self, in_data):
+ """
+ Gather FFT data from "size" channels, compute a dot product with the coeffieicnt
+ matrix and spit the partial products out. The dot product is computed for each
+ FFT bin serially.
+ """
+ out_data = list()
+ src_chans = []
+ # Iterate over each input
+ for di in in_data:
+ if len(di.items) != self.items_per_stream:
+ raise RuntimeError('Incorrect items per stream. Expecting ' + str(self.items_per_stream))
+ # Deinterleave data
+ for do in range(len(di.items)):
+ (sid, coords) = rfnocsim.DataStream.submatrix_parse(di.items[do])
+ if sid != 'rx':
+ raise RuntimeError('Incorrect items. Expecting radio data (rx) but got ' + sid)
+ src_chans.extend(coords[0])
+ bpi = in_data[0].bpi
+ count = in_data[0].count
+ # Iterate through deinterleaved channels
+ for i in range(0, len(self.dst_chans), self.items_per_stream):
+ items = []
+ for j in range(self.items_per_stream):
+ # Compute partial products:
+ # pp = partial product of "src_chans" on "self.dst_chans[i+j]"
+ items.append(rfnocsim.DataStream.submatrix_gen('pp', [src_chans, self.dst_chans[i+j]]))
+ out_data.append(self.create_outdata_stream(bpi, items, count))
+ return out_data
+
+class PartialContribCombiner(rfnocsim.Function):
+ """
+ Simulation model for function that adds multiple partial contributions (products) into a larger
+ partial product. The combiner can optionally reduce a very large product into a smaller one.
+ Ex: pp[31:0,i] (contribution on chan 0..31 on i) can alias to tx[i] if there are 32 channels.
+
+ Args:
+ sim_core: Simulator core object
+ name: Name of this function
+ radix: Number of partial products that are combined (Number of inputs)
+ reducer_filter: A tuple that represents what pp channels to alias to what
+ items_per_stream: How many channels per stream can this function deinterleave?
+ """
+
+ def __init__(self, sim_core, name, radix, app_settings, reducer_filter = (None, None), items_per_stream = 2):
+ rfnocsim.Function.__init__(self, sim_core, name, radix, 1)
+ self.radix = radix
+ self.reducer_filter = reducer_filter
+ self.items_per_stream = items_per_stream
+
+ # This block has to buffer enough data to ensure
+ # sample alignment. How deep should those buffers be?
+ sync_buff_depth = (((ColGlobals.MAX_PP_HOPS - ColGlobals.MIN_PP_HOPS) *
+ hw.Bee7Fpga.IO_LN_LATENCY * float(app_settings['samp_rate'])) / ColGlobals.ELASTIC_BUFF_FULLNESS)
+ # Figure out latency based on sync buffer and delay line
+ latency = math.ceil(math.log(radix, 2)) + 2 # log2(radix) adder stages + 2 pipeline flops
+ # Synchronization latency based on buffer size
+ latency += (sync_buff_depth * ColGlobals.ELASTIC_BUFF_FULLNESS) * (self.get_tick_rate() / float(app_settings['samp_rate']))
+ # Packet alignment latency
+ latency += ColGlobals.BPP * (self.get_tick_rate() / hw.Bee7Fpga.IO_LN_BW)
+
+ self.update_latency(func=latency)
+ self.estimate_resources(radix, sync_buff_depth)
+
+ def estimate_resources(self, radix, sync_buff_depth):
+ rscrs = rfnocsim.HwRsrcs()
+ # Assume that pipelined adders are inferred in logic (not DSP)
+ # Assume that buffering uses BRAM
+ rscrs.add('BRAM_18kb', math.ceil(ColGlobals.BPI * sync_buff_depth * radix / hw.Bee7Fpga.BRAM_BYTES))
+ self.update_rsrcs(rscrs)
+
+ def do_func(self, in_data):
+ """
+ Gather partial dot products from inputs, add them together and spit them out
+ Perform sanity check to ensure that we are adding the correct things
+ """
+ out_chans = dict()
+ # Iterate over each input
+ for di in in_data:
+ if len(di.items) != self.items_per_stream:
+ raise self.SimCompError('Incorrect items per stream. Expecting ' + str(self.items_per_stream))
+ # Deinterleave data
+ for do in range(len(di.items)):
+ (sid, coords) = rfnocsim.DataStream.submatrix_parse(di.items[do])
+ if sid == 'null':
+ continue
+ elif sid != 'pp':
+ raise self.SimCompError('Incorrect items. Expecting partial produts (pp) but got ' + sid)
+ if len(coords[1]) != 1:
+ raise self.SimCompError('Incorrect partial product. Target must be a single channel')
+ if coords[1][0] in out_chans:
+ out_chans[coords[1][0]].extend(coords[0])
+ else:
+ out_chans[coords[1][0]] = coords[0]
+ # Check if keys (targets) for partial products == items_per_stream
+ if len(list(out_chans.keys())) != self.items_per_stream:
+ raise self.SimCompError('Inconsistent partial products. Too many targets.')
+ # Verify that all influencers for each target are consistent
+ if not all(x == list(out_chans.values())[0] for x in list(out_chans.values())):
+ raise self.SimCompError('Inconsistent partial products. Influencers dont match.')
+ contrib_chans = list(out_chans.values())[0]
+ # Combine partial products and return
+ out_items = []
+ for ch in list(out_chans.keys()):
+ if sorted(self.reducer_filter[0]) == sorted(contrib_chans):
+ out_items.append(rfnocsim.DataStream.submatrix_gen(self.reducer_filter[1], [ch]))
+ else:
+ out_items.append(rfnocsim.DataStream.submatrix_gen('pp', [list(out_chans.values())[0], ch]))
+ return self.create_outdata_stream(in_data[0].bpi, out_items, in_data[0].count)
+
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# NOTE: The Torus Topology has not been maintained. Use at your own risk
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+class Topology_2D_4x4_Torus:
+ @classmethod
+ def config_bitstream(cls, bee7fpga, app_settings, in_chans, out_chans, total_num_chans, is_radio_node):
+ if len(in_chans) != 64:
+ raise bee7fpga.SimCompError('in_chans must be 64 channels wide. Got ' + str(len(in_chans)))
+ if len(out_chans) != 16:
+ raise bee7fpga.SimCompError('out_chans must be 16 channels wide. Got ' + str(len(out_chans)))
+ GRP_LEN = 16 / 2 # 2 radio channesl per USRP
+
+ # Broadcast raw data streams to all internal and external FPGAs
+ for i in range(GRP_LEN):
+ in_ln = bee7fpga.EXT_IO_LANES[bee7fpga.BP_BASE+i]
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[in_ln], 0, bee7fpga.serdes_o[bee7fpga.EW_IO_LANES[i]], 0)
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[in_ln], 0, bee7fpga.serdes_o[bee7fpga.NS_IO_LANES[i]], 0)
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[in_ln], 0, bee7fpga.serdes_o[bee7fpga.XX_IO_LANES[i]], 0)
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[in_ln], 0, bee7fpga.serdes_o[bee7fpga.EXT_IO_LANES[bee7fpga.BP_BASE+8+i]], 0)
+ # Create an internal bus to hold the generated partial products
+ bee7fpga.pp_bus = dict()
+ for i in range(GRP_LEN):
+ bee7fpga.pp_bus[i] = rfnocsim.Channel(bee7fpga.sim_core, '%s/_INTERNAL_PP_%02d' % (bee7fpga.name,i))
+ # We need to compute partial products of the data that is broadcast to us
+ # pp_input_lanes represents the IO lanes that hold this data
+ pp_input_lanes = bee7fpga.EXT_IO_LANES[bee7fpga.BP_BASE:bee7fpga.BP_BASE+GRP_LEN] + \
+ bee7fpga.EW_IO_LANES[0:GRP_LEN] + bee7fpga.NS_IO_LANES[0:GRP_LEN] + bee7fpga.XX_IO_LANES[0:GRP_LEN]
+ # The function that computes the partial products
+ func = PartialContribComputer(
+ sim_core=bee7fpga.sim_core, name=bee7fpga.name + '/pp_computer/', size=len(pp_input_lanes),
+ dst_chans=out_chans,
+ items_per_stream=2, app_settings=app_settings)
+ for i in range(len(pp_input_lanes)):
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[pp_input_lanes[i]], 0, func, i)
+ for i in range(GRP_LEN): #Outputs of function
+ bee7fpga.sim_core.connect(func, i, bee7fpga.pp_bus[i], 0)
+ bee7fpga.add_function(func)
+ # Add a function combine all partial products (one per IO lane)
+ for i in range(GRP_LEN):
+ func = PartialContribCombiner(
+ sim_core=bee7fpga.sim_core, name=bee7fpga.name + '/pp_combiner_%d/' % (i),
+ radix=2, app_settings=app_settings, reducer_filter=(list(range(total_num_chans)), 'tx'))
+ # Partial products generated internally have to be added to a partial
+ # sum coming from outside
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[bee7fpga.EXT_IO_LANES[bee7fpga.FP_BASE+i]], 0, func, 0)
+ bee7fpga.sim_core.connect(bee7fpga.pp_bus[i], 0, func, 1)
+ # If this FPGA is hooked up to the radio then send partial products
+ # back to when samples came from. Otherwise send it out to the PP output bus
+ if is_radio_node:
+ bee7fpga.sim_core.connect(func, 0, bee7fpga.serdes_o[bee7fpga.EXT_IO_LANES[bee7fpga.BP_BASE+i]], 0)
+ else:
+ bee7fpga.sim_core.connect(func, 0, bee7fpga.serdes_o[bee7fpga.EXT_IO_LANES[bee7fpga.FP_BASE+8+i]], 0)
+ bee7fpga.add_function(func)
+
+ @classmethod
+ def connect(cls, sim_core, usrps, bee7blades, hosts, app_settings):
+ USRPS_PER_BLADE = 32
+
+ # Create NULL source of "zero" partial products
+ null_items = ['null[(0);(0)]', 'null[(0);(0)]']
+ null_src = rfnocsim.Producer(sim_core, 'NULL_SRC', 4, null_items)
+ if app_settings['domain'] == 'frequency':
+ null_src.set_rate(app_settings['samp_rate']*(1.0 +
+ (float(app_settings['fft_overlap'])/app_settings['fft_size'])))
+ else:
+ null_src.set_rate(app_settings['samp_rate'])
+
+ # Reshape BEE7s
+ # The blades are arranged in 2D Torus network with 4 blades across
+ # each dimension (4x4 = 16)
+ bee7grid = []
+ for r in range(4):
+ bee7row = []
+ for c in range(4):
+ blade = bee7blades[4*r + c]
+ pp_chans = list(range(64*c,64*(c+1)))
+ for i in range(4):
+ Topology_2D_4x4_Torus.config_bitstream(
+ blade.fpgas[i], app_settings, pp_chans, pp_chans[i*16:(i+1)*16], 256, (r==c))
+ bee7row.append(blade)
+ bee7grid.append(bee7row)
+
+ # USRP-Bee7 Connections
+ # Blades across the diagonal are connected to USRPs
+ for b in range(4):
+ for u in range(USRPS_PER_BLADE):
+ sim_core.connect_bidir(
+ usrps[USRPS_PER_BLADE*b + u], 0, bee7grid[b][b],
+ len(hw.Bee7Fpga.EXT_IO_LANES)*(u/8) + hw.Bee7Fpga.BP_BASE+(u%8), 'SAMP')
+ sim_core.connect_bidir(
+ hosts[b], 0, bee7grid[b][b], hw.Bee7Fpga.FP_BASE+8, 'CONFIG', ['blue','blue'])
+
+ # Bee7-Bee7 Connections
+ null_srcs = []
+ for r in range(4): # Traverse across row
+ for c in range(4): # Traverse across col
+ for f in range(4):
+ samp_in_base = len(hw.Bee7Fpga.EXT_IO_LANES)*f + hw.Bee7Fpga.BP_BASE
+ samp_out_base = len(hw.Bee7Fpga.EXT_IO_LANES)*f + hw.Bee7Fpga.BP_BASE+8
+ pp_in_base = len(hw.Bee7Fpga.EXT_IO_LANES)*f + hw.Bee7Fpga.FP_BASE
+ pp_out_base = len(hw.Bee7Fpga.EXT_IO_LANES)*f + hw.Bee7Fpga.FP_BASE+8
+ if r != c:
+ sim_core.connect_multi_bidir(
+ bee7grid[r][(c+3)%4], list(range(samp_out_base,samp_out_base+8)),
+ bee7grid[r][c], list(range(samp_in_base,samp_in_base+8)),
+ 'SAMP_O2I', ['black','blue'])
+ sim_core.connect_multi_bidir(
+ bee7grid[r][c], list(range(pp_out_base,pp_out_base+8)),
+ bee7grid[(r+1)%4][c], list(range(pp_in_base,pp_in_base+8)),
+ 'PP_O2I', ['black','blue'])
+ else:
+ for i in range(8):
+ sim_core.connect(null_src, 0, bee7grid[(r+1)%4][c], pp_in_base + i)
+
+class Topology_3D_4x4_FLB:
+ @classmethod
+ def get_radio_num(cls, router_addr, radio_idx, concentration):
+ """
+ Returns the global radio index given local radio info
+
+ (global_radio_idx) = get_radio_num(router_addr, radio_idx, concentration) where:
+ - router_addr: Address of the current FPGA (router) in 3-D space
+ - radio_idx: The local index of the radio for the current router_addr
+ - concentration: Number of USRPs connected to each router
+ """
+ DIM_SIZE = 4
+ multiplier = concentration
+ radio_num = 0
+ for dim in ['Z','Y','X']:
+ radio_num += router_addr[dim] * multiplier
+ multiplier *= DIM_SIZE
+ return radio_num + radio_idx
+
+ @classmethod
+ def get_portmap(cls, node_addr):
+ """
+ Returns the router and terminal connections for the current FPGA
+
+ (router_map, terminal_map) = get_portmap(node_addr) where:
+ - node_addr: Address of the current FPGA in 3-D space
+ - router_map: A double map indexed by the dimension {X,Y,Z} and the
+ FPGA address in that dimension that returns the Aurora
+ lane index that connects the current node to the neighbor.
+ Example: if node_addr = [0,0,0] then router_map['X'][1] will
+ hold the IO lane index that connects the current node with
+ its X-axis neighbor with address 1
+ - terminal_map: A single map that maps a dimension {X,Y,Z} to the starting
+ IO lane index for terminals (like USRPs) in that dimension.
+ A terminal is a leaf node in the network.
+ """
+ router_map = dict()
+ terminal_map = dict()
+ # If "node_addr" is the address of the current FPGA in the (X,Y,Z) space,
+ # then build a list of other addresses (neighbors) in each dimension
+ DIM_SIZE = 4
+ for dim in ['X','Y','Z']:
+ all_addrs = list(range(DIM_SIZE))
+ all_addrs.remove(node_addr[dim])
+ router_map[dim] = dict()
+ for dst in all_addrs:
+ router_map[dim][dst] = 0 # Assign lane index as 0 for now
+ # Assign Aurora lanes for all external connections between BEE7s
+ io_base = hw.Bee7Fpga.EXT_IO_LANES[0]
+
+ # ---- X-axis ----
+ # All BEE7s in the X dimension are connected via the RTM
+ # The fist quad on the RTM is reserved for SFP+ peripherals like
+ # the USRPs, Ethernet switch ports, etc
+ # All others are used for inter BEE connections over QSFP+
+ terminal_map['X'] = io_base + hw.Bee7Fpga.BP_BASE
+ xdst = terminal_map['X'] + DIM_SIZE
+ for dst in router_map['X']:
+ router_map['X'][dst] = xdst
+ xdst += DIM_SIZE
+
+ # ---- Z-axis ----
+ # All BEE7s in the Z dimension are connected via FMC IO cards (front panel)
+ # To be symmetric with the X-axis the first quad on the FMC bus is also
+ # reserved (regardless of all quads being symmetric)
+ terminal_map['Z'] = io_base + hw.Bee7Fpga.FP_BASE
+ zdst = terminal_map['Z'] + DIM_SIZE
+ for dst in router_map['Z']:
+ router_map['Z'][dst] = zdst
+ zdst += DIM_SIZE
+
+ # ---- Y-axis ----
+ # Within a BEE7, FPGAs re connected in the Y-dimension:
+ # 0 - 1
+ # | X |
+ # 2 - 3
+ Y_LANE_MAP = {
+ 0:{1:hw.Bee7Fpga.EW_IO_LANES[0], 2:hw.Bee7Fpga.NS_IO_LANES[0], 3:hw.Bee7Fpga.XX_IO_LANES[0]},
+ 1:{0:hw.Bee7Fpga.EW_IO_LANES[0], 2:hw.Bee7Fpga.XX_IO_LANES[0], 3:hw.Bee7Fpga.NS_IO_LANES[0]},
+ 2:{0:hw.Bee7Fpga.NS_IO_LANES[0], 1:hw.Bee7Fpga.XX_IO_LANES[0], 3:hw.Bee7Fpga.EW_IO_LANES[0]},
+ 3:{0:hw.Bee7Fpga.XX_IO_LANES[0], 1:hw.Bee7Fpga.NS_IO_LANES[0], 2:hw.Bee7Fpga.EW_IO_LANES[0]}}
+ for dst in router_map['Y']:
+ router_map['Y'][dst] = Y_LANE_MAP[node_addr['Y']][dst]
+
+ return (router_map, terminal_map)
+
+ @classmethod
+ def config_bitstream(cls, bee7fpga, app_settings, fpga_addr):
+ """
+ Defines the FPGA behavior for the current FPGA. This function will make
+ create the necessary simulation functions, connect them to IO lanes and
+ define the various utilization metrics for the image.
+
+ config_bitstream(bee7fpga, app_settings, fpga_addr):
+ - bee7fpga: The FPGA simulation object being configured
+ - fpga_addr: Address of the FPGA in 3-D space
+ - app_settings: Application information
+ """
+ if len(fpga_addr) != 3:
+ raise bee7fpga.SimCompError('fpga_addr must be 3-dimensional. Got ' + str(len(fpga_addr)))
+
+ # Map that stores lane indices for all neighbors of this node
+ (router_map, terminal_map) = cls.get_portmap(fpga_addr)
+ # USRPs are connected in the X dimension (RTM) because it has SFP+ ports
+ base_usrp_lane = terminal_map['X']
+
+ DIM_WIDTH = 4 # Dimension size for the 3-D network
+ MAX_USRPS = 4 # Max USRPs that can possibly be connected to each FPGA
+ NUM_USRPS = 2 # Number of USRPs actually connected to each FPGA
+ CHANS_PER_USRP = 2 # How many radio channels does each USRP have
+ ALL_CHANS = list(range(pow(DIM_WIDTH, 3) * NUM_USRPS * CHANS_PER_USRP))
+
+ # Each FPGA will forward the sample stream from each USRP to all of its
+ # X-axis neighbors
+ for ri in router_map['X']:
+ for li in range(MAX_USRPS): # li = GT Lane index
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[base_usrp_lane + li], 0, bee7fpga.serdes_o[router_map['X'][ri] + li], 0)
+
+ # Consequently, this FPGA will receive the USRP sample streams from each of
+ # its X-axis neighbors. Define an internal bus to aggregate all the neighbor
+ # streams with the native ones. Order the streams such that each FPGA sees the
+ # same data streams.
+ bee7fpga.int_samp_bus = dict()
+ for i in range(DIM_WIDTH):
+ for li in range(MAX_USRPS): # li = GT Lane index
+ bee7fpga.int_samp_bus[(MAX_USRPS*i) + li] = rfnocsim.Channel(
+ bee7fpga.sim_core, '%s/_INT_SAMP_%02d' % (bee7fpga.name,(MAX_USRPS*i) + li))
+ ln_base = base_usrp_lane if i == fpga_addr['X'] else router_map['X'][i]
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[ln_base + li], 0, bee7fpga.int_samp_bus[(MAX_USRPS*i) + li], 0)
+
+ # Forward the X-axis aggregated sample streams to all Y-axis neighbors
+ for ri in router_map['Y']:
+ for li in range(DIM_WIDTH*DIM_WIDTH): # li = GT Lane index
+ bee7fpga.sim_core.connect(bee7fpga.int_samp_bus[li], 0, bee7fpga.serdes_o[router_map['Y'][ri] + li], 0)
+
+ # What partial products will this FPGA compute?
+ # Generate channel list to compute partial products
+ pp_chans = list()
+ for cg in range(DIM_WIDTH): # cg = Channel group
+ for r in range(NUM_USRPS):
+ radio_num = cls.get_radio_num({'X':fpga_addr['X'], 'Y':fpga_addr['Y'], 'Z':cg}, r, NUM_USRPS)
+ for ch in range(CHANS_PER_USRP):
+ pp_chans.append(radio_num*CHANS_PER_USRP + ch)
+
+ # Instantiate partial product computer
+ bee7fpga.func_pp_comp = PartialContribComputer(
+ sim_core=bee7fpga.sim_core, name=bee7fpga.name+'/pp_computer/', size=DIM_WIDTH*DIM_WIDTH*NUM_USRPS,
+ dst_chans=pp_chans,
+ items_per_stream=CHANS_PER_USRP, app_settings=app_settings)
+ bee7fpga.add_function(bee7fpga.func_pp_comp)
+
+ # Partial product computer takes inputs from all Y-axis links
+ for sg in range(DIM_WIDTH): # sg = Group of sexdectects
+ for qi in range(DIM_WIDTH): # qi = GT Quad index
+ for li in range(NUM_USRPS):
+ func_inln = (sg * DIM_WIDTH * NUM_USRPS) + (qi * NUM_USRPS) + li
+ if sg == fpga_addr['Y']:
+ bee7fpga.sim_core.connect(bee7fpga.int_samp_bus[(qi * DIM_WIDTH) + li], 0,
+ bee7fpga.func_pp_comp, func_inln)
+ else:
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[router_map['Y'][sg] + (qi * DIM_WIDTH) + li], 0,
+ bee7fpga.func_pp_comp, func_inln)
+
+ # Internal bus to hold aggregated partial products
+ bee7fpga.pp_bus = dict()
+ for i in range(DIM_WIDTH*NUM_USRPS):
+ bee7fpga.pp_bus[i] = rfnocsim.Channel(bee7fpga.sim_core, '%s/_INT_PP_%02d' % (bee7fpga.name,i))
+ bee7fpga.sim_core.connect(bee7fpga.func_pp_comp, i, bee7fpga.pp_bus[i], 0)
+
+ # Forward partial products to Z-axis neighbors
+ for ri in router_map['Z']:
+ for li in range(NUM_USRPS): # li = GT Lane index
+ bee7fpga.sim_core.connect(bee7fpga.pp_bus[ri*NUM_USRPS + li], 0, bee7fpga.serdes_o[router_map['Z'][ri] + li], 0)
+
+ # Instantiate partial product adder
+ bee7fpga.func_pp_comb = dict()
+ for i in range(NUM_USRPS):
+ bee7fpga.func_pp_comb[i] = PartialContribCombiner(
+ sim_core=bee7fpga.sim_core, name=bee7fpga.name + '/pp_combiner_%d/'%(i),
+ radix=DIM_WIDTH, app_settings=app_settings, reducer_filter=(ALL_CHANS, 'tx'),
+ items_per_stream=CHANS_PER_USRP)
+ bee7fpga.add_function(bee7fpga.func_pp_comb[i])
+
+ # Aggregate partial products from Z-axis neighbors
+ for u in range(NUM_USRPS):
+ for ri in range(DIM_WIDTH):
+ if ri in router_map['Z']:
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[router_map['Z'][ri] + u], 0, bee7fpga.func_pp_comb[u], ri)
+ else:
+ bee7fpga.sim_core.connect(bee7fpga.pp_bus[ri*NUM_USRPS + u], 0, bee7fpga.func_pp_comb[u], ri)
+
+ # Instantiate partial product adder
+ for u in range(NUM_USRPS):
+ bee7fpga.sim_core.connect(bee7fpga.func_pp_comb[u], 0, bee7fpga.serdes_o[base_usrp_lane + u], 0)
+
+ # Coefficient consumer
+ bee7fpga.coeff_sink = rfnocsim.Consumer(bee7fpga.sim_core, bee7fpga.name + '/coeff_sink', 10e9/8, 0.0)
+ bee7fpga.sim_core.connect(bee7fpga.serdes_i[terminal_map['X'] + NUM_USRPS], 0, bee7fpga.coeff_sink, 0)
+
+ @classmethod
+ def connect(cls, sim_core, usrps, bee7blades, hosts, app_settings):
+ NUM_USRPS = 2
+
+ # Reshape BEE7s
+ # The blades are arranged in 3D Flattened Butterfly configuration
+ # with a dimension width of 4. The X and Z dimension represent row, col
+ # and the Y dimension represents the internal connections
+ bee7grid = []
+ for r in range(4):
+ bee7row = []
+ for c in range(4):
+ blade = bee7blades[4*r + c]
+ for f in range(blade.NUM_FPGAS):
+ cls.config_bitstream(blade.fpgas[f], app_settings, {'X':r, 'Y':f, 'Z':c})
+ bee7row.append(blade)
+ bee7grid.append(bee7row)
+
+ # USRP-Bee7 Connections
+ # Blades across the diagonal are connected to USRPs
+ for x in range(4):
+ for y in range(4):
+ for z in range(4):
+ for u in range(NUM_USRPS):
+ usrp_num = cls.get_radio_num({'X':x,'Y':y,'Z':z}, u, NUM_USRPS)
+ (router_map, terminal_map) = cls.get_portmap({'X':x,'Y':y,'Z':z})
+ sim_core.connect_bidir(
+ usrps[usrp_num], 0,
+ bee7grid[x][z], hw.Bee7Blade.io_lane(y, terminal_map['X'] + u), 'SAMP')
+
+ # Bee7-Bee7 Connections
+ null_srcs = []
+ for row in range(4):
+ for col in range(4):
+ for fpga in range(4):
+ (src_map, t) = cls.get_portmap({'X':row,'Y':fpga,'Z':col})
+ for dst in range(4):
+ if row != dst:
+ (dst_map, t) = cls.get_portmap({'X':dst,'Y':fpga,'Z':col})
+ sim_core.connect_multi(
+ bee7grid[row][col],
+ list(range(hw.Bee7Blade.io_lane(fpga, src_map['X'][dst]), hw.Bee7Blade.io_lane(fpga, src_map['X'][dst]+4))),
+ bee7grid[dst][col],
+ list(range(hw.Bee7Blade.io_lane(fpga, dst_map['X'][row]), hw.Bee7Blade.io_lane(fpga, dst_map['X'][row]+4))),
+ 'SAMP')
+ if col != dst:
+ (dst_map, t) = cls.get_portmap({'X':row,'Y':fpga,'Z':dst})
+ sim_core.connect_multi(
+ bee7grid[row][col],
+ list(range(hw.Bee7Blade.io_lane(fpga, src_map['Z'][dst]), hw.Bee7Blade.io_lane(fpga, src_map['Z'][dst]+4))),
+ bee7grid[row][dst],
+ list(range(hw.Bee7Blade.io_lane(fpga, dst_map['Z'][col]), hw.Bee7Blade.io_lane(fpga, dst_map['Z'][col]+4))),
+ 'PP', 'blue')
+
+ # Host connection
+ for row in range(4):
+ for col in range(4):
+ for fpga in range(4):
+ (router_map, terminal_map) = cls.get_portmap({'X':row,'Y':row,'Z':col})
+ sim_core.connect_bidir(
+ hosts[row], col*4 + fpga,
+ bee7grid[row][col], hw.Bee7Blade.io_lane(fpga, terminal_map['X'] + NUM_USRPS), 'COEFF', 'red')
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/ni_hw_models.py b/fpga/usrp3/tools/utils/rfnoc-system-sim/ni_hw_models.py
new file mode 100755
index 000000000..815003c5f
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/ni_hw_models.py
@@ -0,0 +1,261 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Ettus Research
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import rfnocsim
+import math
+
+class UsrpX310(rfnocsim.SimComp):
+ # Hardware specific constants
+ RADIO_LATENCY = 1e-6
+ IO_LATENCY = 1e-6
+ MAX_SAMP_RATE = 300e6 # Limited by 10GbE
+ BPI = 4 # Bytes per sample (item)
+
+ """
+ Simulation model for the USRP X310
+ - Has two producers and consumers of FFT data
+ - Computes bandwidth and latency using FFT size and overlap
+ """
+ def __init__(self, sim_core, index, app_settings):
+ rfnocsim.SimComp.__init__(self, sim_core, name='USRP_%03d' % (index), ctype=rfnocsim.comptype.hardware)
+ # USRP i carries data for radio 2i and 2i+1 interleaved into one stream
+ self.index = index
+ items = [rfnocsim.DataStream.submatrix_gen('rx', [2*index]),
+ rfnocsim.DataStream.submatrix_gen('rx', [2*index+1])]
+ # Samples are 4 bytes I and Q
+ latency = (self.RADIO_LATENCY + self.IO_LATENCY/2) * self.get_tick_rate()
+ if app_settings['domain'] == 'frequency':
+ # Max latency per direction depends on the FFT size and sample rate
+ latency += self.__get_fft_latency(
+ app_settings['fft_size'], app_settings['samp_rate'], self.get_tick_rate())
+ # An X310 Radio has two producers (RX data) and consumers (TX data) (i.e. two ethernet ports)
+ # Both ports can carry data from both radio frontends
+ self.sources = ([
+ rfnocsim.Producer(sim_core, self.name + '/TX0', self.BPI, items, self.MAX_SAMP_RATE, latency),
+ rfnocsim.Producer(sim_core, self.name + '/TX1', self.BPI, items, self.MAX_SAMP_RATE, latency)])
+ self.sinks = ([
+ rfnocsim.Consumer(sim_core, self.name + '/RX0', self.BPI * self.MAX_SAMP_RATE, latency),
+ rfnocsim.Consumer(sim_core, self.name + '/RX1', self.BPI * self.MAX_SAMP_RATE, latency)])
+ # The actual sample rate depends over the wire depends on the radio sample rate,
+ # the FFT size and FFT overlap
+ for src in self.sources:
+ if app_settings['domain'] == 'frequency':
+ src.set_rate(app_settings['samp_rate'] *
+ (1.0 + (float(app_settings['fft_overlap'])/app_settings['fft_size'])))
+ else:
+ src.set_rate(app_settings['samp_rate'])
+
+ def inputs(self, i, bind=False):
+ return self.sinks[i].inputs(0, bind)
+
+ def connect(self, i, dest):
+ self.sources[i].connect(0, dest)
+
+ def get_utilization(self, what):
+ return 0.0
+
+ def get_util_attrs(self):
+ return []
+
+ def validate(self, chan):
+ recvd = self.sinks[chan].get_items()
+ idxs = []
+ for i in recvd:
+ (str_id, idx) = rfnocsim.DataStream.submatrix_parse(i)
+ if str_id != 'tx':
+ raise RuntimeError(self.name + ' received incorrect TX data on channel ' + str(chan))
+ idxs.append(idx[0][0])
+ if sorted(idxs) != [self.index*2, self.index*2 + 1]:
+ raise RuntimeError(self.name + ' received incorrect TX data. Got: ' + str(sorted(idxs)))
+
+ def __get_fft_latency(self, fft_size, samp_rate, tick_rate):
+ FFT_CLK_RATE = 200e6
+ fft_cycles = {128:349, 256:611, 512:1133, 1024:2163, 2048:4221, 4096:8323}
+ latency = max(
+ fft_cycles[fft_size] / FFT_CLK_RATE, #Min time to leave FFT
+ fft_size / samp_rate) #Min time to enter FFT
+ return latency * tick_rate
+
+
+class Bee7Fpga(rfnocsim.SimComp):
+ """
+ Simulation model for a single Beecube BEE7 FPGA
+ - Type = hardware
+ - Contains 80 IO lanes per FPGA: 16 each to neighboring
+ FPGAs and 32 lanes going outside
+ """
+ # IO lanes (How the various IO lanes in an FPGA are allocated)
+ EW_IO_LANES = list(range(0,16))
+ NS_IO_LANES = list(range(16,32))
+ XX_IO_LANES = list(range(32,48))
+ EXT_IO_LANES = list(range(48,80))
+ # External IO lane connections
+ FP_BASE = 0 # Front panel FMC
+ FP_LANES = 16
+ BP_BASE = 16 # Backplane RTM
+ BP_LANES = 16
+
+ # Hardware specific constants
+ IO_LN_LATENCY = 1.5e-6
+ IO_LN_BW = 10e9/8
+ ELASTIC_BUFF_FULLNESS = 0.5
+ BRAM_BYTES = 18e3/8
+
+ def __init__(self, sim_core, name):
+ self.sim_core = sim_core
+ rfnocsim.SimComp.__init__(self, sim_core, name, rfnocsim.comptype.hardware)
+ # Max resources from Virtex7 datasheet
+ self.max_resources = rfnocsim.HwRsrcs()
+ self.max_resources.add('DSP', 3600)
+ self.max_resources.add('BRAM_18kb', 2940)
+ self.resources = rfnocsim.HwRsrcs()
+ # Each FPGA has 80 SERDES lanes
+ self.max_io = 80
+ self.serdes_i = dict()
+ self.serdes_o = dict()
+ # Each lane can carry at most 10GB/s
+ # Each SERDES needs to have some buffering. We assume elastic buffering (50% full on avg).
+ io_buff_size = (self.IO_LN_BW * self.IO_LN_LATENCY) / self.ELASTIC_BUFF_FULLNESS
+ # Worst case lane latency
+ lane_latency = self.IO_LN_LATENCY * self.get_tick_rate()
+ for i in range(self.max_io):
+ self.serdes_i[i] = rfnocsim.Channel(sim_core, self.__ioln_name(i)+'/I', self.IO_LN_BW, lane_latency / 2)
+ self.serdes_o[i] = rfnocsim.Channel(sim_core, self.__ioln_name(i)+'/O', self.IO_LN_BW, lane_latency / 2)
+ self.resources.add('BRAM_18kb', 1 + math.ceil(io_buff_size / self.BRAM_BYTES)) #input buffering per lane
+ self.resources.add('BRAM_18kb', 1) #output buffering per lane
+ # Other resources
+ self.resources.add('BRAM_18kb', 72) # BPS infrastructure + microblaze
+ self.resources.add('BRAM_18kb', 128) # 2 MIGs
+
+ self.functions = dict()
+
+ def inputs(self, i, bind=False):
+ return self.serdes_i[i].inputs(0, bind)
+
+ def connect(self, i, dest):
+ self.serdes_o[i].connect(0, dest)
+
+ def get_utilization(self, what):
+ if self.max_resources.get(what) != 0:
+ return self.resources.get(what) / self.max_resources.get(what)
+ else:
+ return 0.0
+
+ def get_util_attrs(self):
+ return ['DSP', 'BRAM_18kb']
+
+ def rename(self, name):
+ self.name = name
+
+ def add_function(self, func):
+ if func.name not in self.functions:
+ self.functions[func.name] = func
+ else:
+ raise RuntimeError('Function ' + self.name + ' already defined in ' + self.name)
+ self.resources.merge(func.get_rsrcs())
+
+ def __ioln_name(self, i):
+ if i in self.EW_IO_LANES:
+ return '%s/SER_EW_%02d'%(self.name,i-self.EW_IO_LANES[0])
+ elif i in self.NS_IO_LANES:
+ return '%s/SER_NS_%02d'%(self.name,i-self.NS_IO_LANES[0])
+ elif i in self.XX_IO_LANES:
+ return '%s/SER_XX_%02d'%(self.name,i-self.XX_IO_LANES[0])
+ else:
+ return '%s/SER_EXT_%02d'%(self.name,i-self.EXT_IO_LANES[0])
+
+class Bee7Blade(rfnocsim.SimComp):
+ """
+ Simulation model for a single Beecube BEE7
+ - Contains 4 FPGAs (fully connected with 16 lanes)
+ """
+ NUM_FPGAS = 4
+ # FPGA positions in the blade
+ NW_FPGA = 0
+ NE_FPGA = 1
+ SW_FPGA = 2
+ SE_FPGA = 3
+
+ def __init__(self, sim_core, index):
+ self.sim_core = sim_core
+ self.name = name='BEE7_%03d' % (index)
+ # Add FPGAs
+ names = ['FPGA_NW', 'FPGA_NE', 'FPGA_SW', 'FPGA_SE']
+ self.fpgas = []
+ for i in range(self.NUM_FPGAS):
+ self.fpgas.append(Bee7Fpga(sim_core, name + '/' + names[i]))
+ # Build a fully connected network of FPGA
+ # 4 FPGAs x 3 Links x 2 directions = 12 connections
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.NW_FPGA], Bee7Fpga.EW_IO_LANES, self.fpgas[self.NE_FPGA], Bee7Fpga.EW_IO_LANES)
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.NW_FPGA], Bee7Fpga.NS_IO_LANES, self.fpgas[self.SW_FPGA], Bee7Fpga.NS_IO_LANES)
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.NW_FPGA], Bee7Fpga.XX_IO_LANES, self.fpgas[self.SE_FPGA], Bee7Fpga.XX_IO_LANES)
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.NE_FPGA], Bee7Fpga.XX_IO_LANES, self.fpgas[self.SW_FPGA], Bee7Fpga.XX_IO_LANES)
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.NE_FPGA], Bee7Fpga.NS_IO_LANES, self.fpgas[self.SE_FPGA], Bee7Fpga.NS_IO_LANES)
+ self.sim_core.connect_multi_bidir(
+ self.fpgas[self.SW_FPGA], Bee7Fpga.EW_IO_LANES, self.fpgas[self.SE_FPGA], Bee7Fpga.EW_IO_LANES)
+
+ def inputs(self, i, bind=False):
+ IO_PER_FPGA = len(Bee7Fpga.EXT_IO_LANES)
+ return self.fpgas[int(i/IO_PER_FPGA)].inputs(Bee7Fpga.EXT_IO_LANES[i%IO_PER_FPGA], bind)
+
+ def connect(self, i, dest):
+ IO_PER_FPGA = len(Bee7Fpga.EXT_IO_LANES)
+ self.fpgas[int(i/IO_PER_FPGA)].connect(Bee7Fpga.EXT_IO_LANES[i%IO_PER_FPGA], dest)
+
+ @staticmethod
+ def io_lane(fpga, fpga_lane):
+ IO_PER_FPGA = len(Bee7Fpga.EXT_IO_LANES)
+ return (fpga_lane - Bee7Fpga.EXT_IO_LANES[0]) + (fpga * IO_PER_FPGA)
+
+class ManagementHostandSwitch(rfnocsim.SimComp):
+ """
+ Simulation model for a management host computer
+ - Sources channel coefficients
+ - Configures radio
+ """
+ def __init__(self, sim_core, index, num_coeffs, switch_ports, app_settings):
+ rfnocsim.SimComp.__init__(self, sim_core, name='MGMT_HOST_%03d'%(index), ctype=rfnocsim.comptype.other)
+ if app_settings['domain'] == 'frequency':
+ k = app_settings['fft_size']
+ else:
+ k = app_settings['fir_taps']
+
+ self.sources = dict()
+ self.sinks = dict()
+ for l in range(switch_ports):
+ self.sources[l] = rfnocsim.Producer(
+ sim_core, '%s/COEFF_%d'%(self.name,l), 4, ['coeff_%03d[%d]'%(index,l)], (10e9/8)/switch_ports, 0)
+ self.sinks[l] = rfnocsim.Consumer(sim_core, self.name + '%s/ACK%d'%(self.name,l))
+ self.sources[l].set_rate(k*num_coeffs*app_settings['coherence_rate'])
+
+ def inputs(self, i, bind=False):
+ return self.sinks[i].inputs(0, bind)
+
+ def connect(self, i, dest):
+ self.sources[i].connect(0, dest)
+
+ def get_utilization(self, what):
+ return 0.0
+
+ def get_util_attrs(self):
+ return []
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/rfnocsim.py b/fpga/usrp3/tools/utils/rfnoc-system-sim/rfnocsim.py
new file mode 100644
index 000000000..d841cc06b
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/rfnocsim.py
@@ -0,0 +1,757 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Ettus Research
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import collections
+import copy
+import re
+import math
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.ticker as mticker
+from graphviz import Digraph
+
+#------------------------------------------------------------
+# Simulator Core Components
+#------------------------------------------------------------
+class comptype():
+ """
+ Simulation component type enumeration
+ """
+ producer = 'Producer'
+ consumer = 'Consumer'
+ channel = 'Channel'
+ function = 'Function'
+ hardware = 'Hardware'
+ other = 'Other'
+
+class SimulatorCore:
+ """
+ Core simulation engine:
+ This class owns all the simulation components and
+ manages time and other housekeeping operations.
+ """
+
+ def __init__(self, tick_rate):
+ self.__ticks = 0
+ self.__tick_rate = tick_rate
+ self.__tick_aware_comps = list()
+ self.__all_comps = dict()
+ self.__edge_render_db = list()
+
+ def register(self, comp, tick_aware):
+ if comp.name not in self.__all_comps:
+ self.__all_comps[comp.name] = comp
+ else:
+ raise RuntimeError('Duplicate component ' + comp.name)
+ if tick_aware:
+ self.__tick_aware_comps.append(comp)
+
+ def connect(self, src, srcport, dst, dstport, render_label=None, render_color=None):
+ src.connect(srcport, dst.inputs(dstport, bind=True))
+ if render_label:
+ self.__edge_render_db.append(
+ (src.name, dst.name, 1.0, render_label, render_color))
+
+ def connect_bidir(self, ep1, ep1port, ep2, ep2port, render_labels=None, render_colors=None):
+ if render_labels:
+ if not isinstance(render_labels, (list, tuple)):
+ render_labels = [render_labels, render_labels]
+ else:
+ render_labels = [None, None]
+ if render_colors:
+ if not isinstance(render_colors, (list, tuple)):
+ render_colors = [render_colors, render_colors]
+ else:
+ render_colors = [None, None]
+ self.connect(ep1, ep1port, ep2, ep2port, render_labels[0], render_colors[0])
+ self.connect(ep2, ep2port, ep1, ep1port, render_labels[1], render_colors[1])
+
+ def connect_multi(self, src, srcports, dst, dstports, render_label=None, render_color=None):
+ if len(srcports) != len(dstports):
+ raise RuntimeError(
+ 'Source and destination ports should be of the same length')
+ for i in range(len(srcports)):
+ src.connect(srcports[i], dst.inputs(dstports[i], bind=True))
+ if render_label:
+ self.__edge_render_db.append((src.name, dst.name, float(len(srcports)), render_label, render_color))
+
+ def connect_multi_bidir(self, ep1, ep1port, ep2, ep2port, render_labels=None, render_colors=None):
+ if render_labels:
+ if not isinstance(render_labels, (list, tuple)):
+ render_labels = [render_labels, render_labels]
+ else:
+ render_labels = [None, None]
+ if render_colors:
+ if not isinstance(render_colors, (list, tuple)):
+ render_colors = [render_colors, render_colors]
+ else:
+ render_colors = [None, None]
+ self.connect_multi(ep1, ep1port, ep2, ep2port, render_labels[0], render_colors[0])
+ self.connect_multi(ep2, ep2port, ep1, ep1port, render_labels[1], render_colors[1])
+
+ def list_components(self, comptype='', name_filt=''):
+ if not comptype:
+ return sorted([c for c in list(self.__all_comps.keys())
+ if (re.match(name_filt, self.__all_comps[c].name))])
+ else:
+ return sorted([c for c in list(self.__all_comps.keys())
+ if (self.__all_comps[c].type == comptype and
+ re.match(name_filt, self.__all_comps[c].name))])
+
+ def lookup(self, comp_name):
+ return self.__all_comps[comp_name]
+
+ def tick(self):
+ self.__ticks += 1
+ for c in self.__tick_aware_comps:
+ c.tick()
+
+ def run(self, time_s):
+ for i in range(int(time_s * self.__tick_rate)):
+ self.tick()
+
+ def get_ticks(self):
+ return self.__ticks
+
+ def get_tick_rate(self):
+ return self.__tick_rate
+
+ def network_to_dot(self):
+ dot = Digraph(comment='RFNoC Network Topology')
+ node_ids = dict()
+ next_node_id = 1
+ for edgeinfo in self.__edge_render_db:
+ for i in range(2):
+ node = edgeinfo[i]
+ if node not in node_ids:
+ node_id = next_node_id
+ node_ids[node] = node_id
+ dot.node(str(node_id), node)
+ next_node_id += 1
+ for edgeinfo in self.__edge_render_db:
+ dot.edge(
+ tail_name=str(node_ids[edgeinfo[0]]),
+ head_name=str(node_ids[edgeinfo[1]]),
+ label=edgeinfo[3],
+ weight=str(edgeinfo[2]), penwidth=str(edgeinfo[2]/2),
+ color=str(edgeinfo[4] if edgeinfo[4] else 'black'))
+ return dot
+
+class SimComp:
+ """
+ Base simulation component:
+ All components must inherit from SimComp.
+ """
+
+ def __init__(self, sim_core, name, ctype):
+ self.__sim_core = sim_core
+ self.name = name
+ self.type = ctype
+ self.__sim_core.register(self, (ctype == comptype.producer))
+
+ def get_ticks(self):
+ return self.__sim_core.get_ticks()
+
+ def get_tick_rate(self):
+ return self.__sim_core.get_tick_rate()
+
+ def SimCompError(self, msg):
+ raise RuntimeError(msg + ' [' + self.name + ']')
+
+#------------------------------------------------------------
+# Data stream components
+#------------------------------------------------------------
+class HwRsrcs():
+ """
+ Hardware Resources Container:
+ This object holds physical hardware resource information
+ that can be used to report utilization. Resource items are
+ generic and can be defined by the actual simulation.
+ """
+
+ def __init__(self):
+ self.__rsrcs = dict()
+
+ def get(self, what):
+ if what in self.__rsrcs:
+ return self.__rsrcs[what]
+ else:
+ return 0.0
+
+ def set(self, what, value):
+ self.__rsrcs[what] = float(value)
+
+ def add(self, what, value):
+ if what in self.__rsrcs:
+ self.__rsrcs[what] += float(value)
+ else:
+ self.__rsrcs[what] = float(value)
+
+ def merge(self, other_rsrcs):
+ for attr in other_rsrcs.get_attrs():
+ self.add(attr, other_rsrcs.get(attr))
+
+ def get_attrs(self):
+ return list(self.__rsrcs.keys())
+
+ def reset(self, what = None):
+ if what is not None:
+ if what in self.__rsrcs:
+ self.__rsrcs[what] = 0.0
+ else:
+ self.__rsrcs = dict()
+
+class DataStream:
+ """
+ Data Stream Object:
+ Holds information about a date stream that passes through various block.
+ The simulator simulates event on the actual stream so each stream Object
+ must have a unique payload (items) to disambiguate it from the rest.
+ """
+ HopInfo = collections.namedtuple('HopInfo', ['location', 'latency'])
+
+ class HopDb():
+ def __init__(self, hops):
+ self.__hops = hops
+
+ def get_src(self):
+ return self.__hops[0].location
+
+ def get_dst(self):
+ return self.__hops[-1].location
+
+ def get_hops(self):
+ hoparr = []
+ for h in self.__hops:
+ hoparr.append(h.location)
+ return hoparr
+
+ def get_latency(self, ticks, location = ''):
+ latency = ticks - self.__hops[0].latency #Hop0 always has the init timestamp
+ if (self.__hops[0].location != location):
+ for i in range(1,len(self.__hops)):
+ latency += self.__hops[i].latency
+ if (self.__hops[i].location == location):
+ break
+ return latency
+
+ def __init__(self, bpi, items, count, producer=None, parent=None):
+ self.bpi = bpi
+ self.items = []
+ self.items.extend(items)
+ self.count = count
+ self.__hops = list()
+ if producer and parent:
+ raise RuntimeError('Data stream cannot have both a producer and a parent stream')
+ elif producer:
+ self.__hops.append(self.HopInfo(location='Gen@'+producer.name, latency=producer.get_ticks()))
+ elif parent:
+ self.__hops.extend(parent.get_hops())
+ else:
+ raise RuntimeError('Data stream must have a producer or a parent stream')
+
+ def add_hop(self, location, latency):
+ self.__hops.append(self.HopInfo(location=location, latency=latency))
+
+ def get_hops(self):
+ return self.__hops
+
+ def get_bytes(self):
+ return self.bpi * len(self.items) * self.count
+
+ """
+ Type specific methods
+ """
+ @staticmethod
+ def submatrix_gen(matrix_id, coordinates):
+ coord_arr = []
+ for c in coordinates:
+ if isinstance(c, collections.Iterable):
+ coord_arr.append('(' + (','.join(str(x) for x in c)) + ')')
+ else:
+ coord_arr.append('(' + str(c) + ')')
+ return matrix_id + '[' + ';'.join(coord_arr) + ']'
+
+ @staticmethod
+ def submatrix_parse(stream_id):
+ m = re.match('(.+)\[(.*)\]', stream_id)
+ matrix_id = m.group(1)
+ coords = []
+ for cstr in m.group(2).split(';'):
+ coords.append([int(x) for x in re.match('\((.+)\)', cstr).group(1).split(',')])
+ return (matrix_id, coords)
+
+#------------------------------------------------------------
+# Basic Network components
+#------------------------------------------------------------
+
+# Producer object.
+class Producer(SimComp):
+ """
+ Producer Block:
+ Generates data at a constant rate
+ """
+
+ def __init__(self, sim_core, name, bpi, items, max_samp_rate = float('inf'), latency = 0):
+ SimComp.__init__(self, sim_core, name, comptype.producer)
+ self.__bpi = bpi
+ self.__items = items
+ self.__bw = max_samp_rate * bpi
+ self.__latency = latency
+ self.__dests = list()
+ self.__data_count = 0
+ self.__byte_count = 0
+ self.__backpressure_ticks = 0
+ self.set_rate(self.get_tick_rate())
+
+ def inputs(self, i, bind=False):
+ raise self.SimCompError('This is a producer block. Cannot connect another block to it.')
+
+ def connect(self, i, dest):
+ self.__dests.append(dest)
+
+ def set_rate(self, samp_rate):
+ self.__data_count = samp_rate / self.get_tick_rate()
+
+ def tick(self):
+ if len(self.__dests) > 0:
+ ready = True
+ for dest in self.__dests:
+ ready = ready and dest.is_ready()
+ if ready:
+ data = DataStream(
+ bpi=self.__bpi, items=self.__items, count=self.__data_count, producer=self)
+ if self.__backpressure_ticks > 0:
+ data.add_hop('BP@'+self.name, self.__backpressure_ticks)
+ data.add_hop(self.name, self.__latency)
+ for dest in self.__dests:
+ dest.push(copy.deepcopy(data))
+ self.__byte_count += data.get_bytes()
+ self.__backpressure_ticks = 0
+ else:
+ self.__backpressure_ticks += 1
+
+ def get_bytes(self):
+ return self.__byte_count
+
+ def get_util_attrs(self):
+ return ['bandwidth']
+
+ def get_utilization(self, what):
+ if what in self.get_util_attrs():
+ return ((self.__byte_count / (self.get_ticks() / self.get_tick_rate())) /
+ self.__bw)
+ else:
+ return 0.0
+
+# Consumer object.
+class Consumer(SimComp):
+ """
+ Consumes Block:
+ Consumes data at a constant rate
+ """
+
+ def __init__(self, sim_core, name, bw = float("inf"), latency = 0):
+ SimComp.__init__(self, sim_core, name, comptype.consumer)
+ self.__byte_count = 0
+ self.__item_db = dict()
+ self.__bw = bw
+ self.__latency = latency
+ self.__bound = False
+
+ def inputs(self, i, bind=False):
+ if bind and self.__bound:
+ raise self.SimCompError('Input ' + str(i) + ' is already driven (bound).')
+ self.__bound = bind
+ return self
+
+ def connect(self, i, dest):
+ raise self.SimCompError('This is a consumer block. Cannot connect to another block.')
+
+ def is_ready(self):
+ return True #TODO: Readiness can depend on bw and byte_count
+
+ def push(self, data):
+ data.add_hop(self.name, self.__latency)
+ for item in data.items:
+ self.__item_db[item] = DataStream.HopDb(data.get_hops())
+ self.__byte_count += data.get_bytes()
+
+ def get_items(self):
+ return list(self.__item_db.keys())
+
+ def get_bytes(self):
+ return self.__byte_count
+
+ def get_hops(self, item):
+ return self.__item_db[item].get_hops()
+
+ def get_latency(self, item, hop=None):
+ if not hop:
+ hop = self.get_hops(item)[-1]
+ return self.__item_db[item].get_latency(self.get_ticks(), hop) / self.get_tick_rate()
+
+ def get_util_attrs(self):
+ return ['bandwidth']
+
+ def get_utilization(self, what):
+ if what in self.get_util_attrs():
+ return ((self.__byte_count / (self.get_ticks() / self.get_tick_rate())) /
+ self.__bw)
+ else:
+ return 0.0
+
+# Channel
+class Channel(SimComp):
+ """
+ A resource limited IO pipe:
+ From the data stream perspective, this is a passthrough
+ """
+
+ def __init__(self, sim_core, name, bw = float("inf"), latency = 0, lossy = True):
+ SimComp.__init__(self, sim_core, name, comptype.channel)
+ self.__bw = bw
+ self.__latency = latency
+ self.__lossy = lossy
+ self.__dests = list()
+ self.__byte_count = 0
+ self.__bound = False
+
+ def get_bytes(self):
+ return self.__byte_count
+
+ def inputs(self, i, bind=False):
+ if (i != 0):
+ raise self.SimCompError('An IO lane has only one input.')
+ if bind and self.__bound:
+ raise self.SimCompError('Input ' + str(i) + ' is already driven (bound).')
+ self.__bound = bind
+ return self
+
+ def connect(self, i, dest):
+ self.__dests.append(dest)
+
+ def is_connected(self):
+ return len(self.__dests) > 0
+
+ def is_bound(self):
+ return self.__bound
+
+ def is_ready(self):
+ # If nothing is hooked up to a lossy lane, it will drop data
+ if self.__lossy and not self.is_connected():
+ return True
+ ready = self.is_connected()
+ for dest in self.__dests:
+ ready = ready and dest.is_ready()
+ return ready
+
+ def push(self, data):
+ # If nothing is hooked up to a lossy lane, it will drop data
+ if self.__lossy and not self.is_connected():
+ return
+ data.add_hop(self.name, self.__latency)
+ for dest in self.__dests:
+ dest.push(copy.deepcopy(data))
+ self.__byte_count += data.get_bytes()
+
+ def get_util_attrs(self):
+ return ['bandwidth']
+
+ def get_utilization(self, what):
+ if what in self.get_util_attrs():
+ return ((self.__byte_count / (self.get_ticks() / self.get_tick_rate())) /
+ self.__bw)
+ else:
+ return 0.0
+
+# Function
+class Function(SimComp):
+ """
+ A Function Component:
+ A function block is something that does anything interesting with a data stream.
+ A function can have multiple input and output streams.
+ """
+
+ class Arg:
+ def __init__(self, num, base_func):
+ self.__num = num
+ self.__data = None
+ self.__base_func = base_func
+ self.__bound = False
+
+ def get_num(self):
+ return self.__num
+
+ def is_ready(self):
+ return self.__base_func.is_ready() and not self.__data
+
+ def push(self, data):
+ self.__data = data
+ self.__base_func.notify(self.__num)
+
+ def pop(self):
+ if self.__data:
+ data = self.__data
+ self.__data = None
+ return data
+ else:
+ raise RuntimeError('Nothing to pop.')
+
+ def bind(self, bind):
+ retval = self.__bound
+ self.__bound = bind
+ return retval
+
+ Latencies = collections.namedtuple('Latencies', ['func','inarg','outarg'])
+
+ def __init__(self, sim_core, name, num_in_args, num_out_args, ticks_per_exec = 1):
+ SimComp.__init__(self, sim_core, name, comptype.function)
+ self.__ticks_per_exec = ticks_per_exec
+ self.__last_exec_ticks = 0
+ self.__in_args = list()
+ for i in range(num_in_args):
+ self.__in_args.append(Function.Arg(i, self))
+ self.__dests = list()
+ for i in range(num_out_args):
+ self.__dests.append(None)
+ self.__in_args_pushed = dict()
+ # Resources required by this function to do its job in one tick
+ self.__rsrcs = HwRsrcs()
+ self.__latencies = self.Latencies(func=0, inarg=[0]*num_in_args, outarg=[0]*num_out_args)
+
+ def get_rsrcs(self):
+ return self.__rsrcs
+
+ def update_rsrcs(self, rsrcs):
+ self.__rsrcs = rsrcs
+
+ def update_latency(self, func, inarg=None, outarg=None):
+ self.__latencies = self.Latencies(
+ func=func,
+ inarg=inarg if inarg else [0]*len(self.__in_args),
+ outarg=outarg if outarg else [0]*len(self.__dests))
+
+ def inputs(self, i, bind=False):
+ if bind and self.__in_args[i].bind(True):
+ raise self.SimCompError('Input argument ' + str(i) + ' is already driven (bound).')
+ return self.__in_args[i]
+
+ def connect(self, i, dest):
+ self.__dests[i] = dest
+
+ def is_ready(self):
+ ready = len(self.__dests) > 0
+ for dest in self.__dests:
+ ready = ready and dest.is_ready()
+ exec_ready = (self.get_ticks() - self.__last_exec_ticks) >= self.__ticks_per_exec
+ return ready and exec_ready
+
+ def create_outdata_stream(self, bpi, items, count):
+ return DataStream(
+ bpi=bpi, items=items, count=count, parent=self.__max_latency_input)
+
+ def notify(self, arg_i):
+ self.__in_args_pushed[arg_i] = True
+ # Wait for all input args to come in
+ if (sorted(self.__in_args_pushed.keys()) == list(range(len(self.__in_args)))):
+ # Pop data out of each input arg
+ max_in_latency = 0
+ self.__max_latency_input = None
+ arg_data_in = list()
+ for arg in self.__in_args:
+ d = arg.pop()
+ arg_data_in.append(d)
+ lat = DataStream.HopDb(d.get_hops()).get_latency(self.get_ticks())
+ if lat > max_in_latency:
+ max_in_latency = lat
+ self.__max_latency_input = d
+ # Call the function
+ arg_data_out = self.do_func(arg_data_in)
+ if not isinstance(arg_data_out, collections.Iterable):
+ arg_data_out = [arg_data_out]
+ # Update output args
+ for i in range(len(arg_data_out)):
+ arg_data_out[i].add_hop(self.name,
+ max(self.__latencies.inarg) + self.__latencies.func + self.__latencies.outarg[i])
+ self.__dests[i].push(arg_data_out[i])
+ # Cleanup
+ self.__last_exec_ticks = self.get_ticks()
+ self.__in_args_pushed = dict()
+
+ def get_util_attrs(self):
+ return []
+
+ def get_utilization(self, what):
+ return 0.0
+
+#------------------------------------------------------------
+# Plotting Functions
+#------------------------------------------------------------
+class Visualizer():
+ def __init__(self, sim_core):
+ self.__sim_core = sim_core
+ self.__figure = None
+ self.__fig_dims = None
+
+ def show_network(self, engine='fdp'):
+ dot = self.__sim_core.network_to_dot()
+ dot.format = 'png'
+ dot.engine = engine
+ dot.render('/tmp/rfnoc_sim.dot', view=True, cleanup=True)
+
+ def dump_consumed_streams(self, consumer_filt='.*'):
+ comps = self.__sim_core.list_components(comptype.consumer, consumer_filt)
+ print('=================================================================')
+ print('Streams Received by Consumers matching (%s) at Tick = %04d'%(consumer_filt,self.__sim_core.get_ticks()))
+ print('=================================================================')
+ for c in sorted(comps):
+ comp = self.__sim_core.lookup(c)
+ for s in sorted(comp.get_items()):
+ print(' - %s: (%s) Latency = %gs'%(s,c,comp.get_latency(s)))
+ print('=================================================================')
+
+ def dump_debug_audit_log(self, ctype, name_filt='.*'):
+ if ctype != comptype.channel:
+ raise NotImplementedError('Component type not yet supported: ' + ctype)
+
+ comps = self.__sim_core.list_components(ctype, name_filt)
+ print('=================================================================')
+ print('Debug Audit for all %s Components matching (%s)'%(ctype,name_filt))
+ print('=================================================================')
+ for c in sorted(comps):
+ comp = self.__sim_core.lookup(c)
+ status = 'Unknown'
+ if comp.is_bound() and comp.is_connected():
+ status = 'Good'
+ elif comp.is_bound() and not comp.is_connected():
+ status = 'WARNING (Driven but Unused)'
+ elif not comp.is_bound() and comp.is_connected():
+ status = 'WARNING (Used but Undriven)'
+ else:
+ status = 'Unused'
+ print(' - %s: Status = %s'%(c,status))
+ print('=================================================================')
+
+ def new_figure(self, grid_dims=[1,1], fignum=1, figsize=(16, 9), dpi=72):
+ self.__figure = plt.figure(num=fignum, figsize=figsize, dpi=dpi)
+ self.__fig_dims = grid_dims
+
+ def show_figure(self):
+ plt.show()
+ self.__figure = None
+
+ def plot_utilization(self, ctype, name_filt='.*', grid_pos=1):
+ colors = ['b','r','g','y']
+ comps = self.__sim_core.list_components(ctype, name_filt)
+ attrs = set()
+ for c in comps:
+ attrs |= set(self.__sim_core.lookup(c).get_util_attrs())
+ attrs = sorted(list(attrs))
+
+ if not self.__figure:
+ self.new_figure()
+ show = True
+ else:
+ show = False
+ self.__figure.subplots_adjust(bottom=0.25)
+ ax = self.__figure.add_subplot(*(self.__fig_dims + [grid_pos]))
+ title = 'Resource utilization for all %s\ncomponents matching \"%s\"' % \
+ (ctype, name_filt)
+ ax.set_title(title)
+ ax.set_ylabel('Resource Utilization (%)')
+ if comps:
+ ind = np.arange(len(comps))
+ width = 0.95/len(attrs)
+ rects = []
+ ymax = 100
+ for i in range(len(attrs)):
+ utilz = [self.__sim_core.lookup(c).get_utilization(attrs[i]) * 100 for c in comps]
+ rects.append(ax.bar(ind + width*i, utilz, width, color=colors[i%len(colors)]))
+ ymax = max(ymax, int(math.ceil(max(utilz) / 100.0)) * 100)
+ ax.set_ylim([0,ymax])
+ ax.set_yticks(list(range(0,ymax,10)))
+ ax.set_xticks(ind + 0.5)
+ ax.set_xticklabels(comps, rotation=90)
+ ax.legend(rects, attrs)
+ ax.grid(b=True, which='both', color='0.65',linestyle='--')
+ ax.plot([0, len(comps)], [100, 100], "k--", linewidth=3.0)
+ if show:
+ self.show_figure()
+
+ def plot_consumption_latency(self, stream_filt='.*', consumer_filt='.*', grid_pos=1):
+ streams = list()
+ for c in sorted(self.__sim_core.list_components(comptype.consumer, consumer_filt)):
+ for s in sorted(self.__sim_core.lookup(c).get_items()):
+ if (re.match(stream_filt, s)):
+ streams.append((c, s, c + '/' + s))
+
+ if not self.__figure:
+ self.new_figure()
+ show = True
+ else:
+ show = False
+ self.__figure.subplots_adjust(bottom=0.25)
+ ax = self.__figure.add_subplot(*(self.__fig_dims + [grid_pos]))
+ title = 'Latency of Maximal Path Terminating in\nStream(s) matching \"%s\"\n(Consumer Filter = \"%s\")' % \
+ (stream_filt, consumer_filt)
+ ax.set_title(title)
+ ax.set_ylabel('Maximal Source-to-Sink Latency (s)')
+ if streams:
+ ind = np.arange(len(streams))
+ latency = [self.__sim_core.lookup(c_s_d1[0]).get_latency(c_s_d1[1]) for c_s_d1 in streams]
+ rects = [ax.bar(ind, latency, 1.0, color='b')]
+ ax.set_xticks(ind + 0.5)
+ ax.set_xticklabels([c_s_d[2] for c_s_d in streams], rotation=90)
+ attrs = ['latency']
+ ax.legend(rects, attrs)
+ ax.yaxis.set_major_formatter(mticker.FormatStrFormatter('%.2e'))
+ ax.grid(b=True, which='both', color='0.65',linestyle='--')
+ if show:
+ self.show_figure()
+
+ def plot_path_latency(self, stream_id, consumer_filt = '.*', grid_pos=1):
+ path = []
+ latencies = []
+ for c in self.__sim_core.list_components(comptype.consumer, consumer_filt):
+ for s in self.__sim_core.lookup(c).get_items():
+ if (stream_id == s):
+ for h in self.__sim_core.lookup(c).get_hops(s):
+ path.append(h)
+ latencies.append(self.__sim_core.lookup(c).get_latency(s, h))
+ break
+ if not self.__figure:
+ self.new_figure()
+ show = True
+ else:
+ show = False
+ self.__figure.subplots_adjust(bottom=0.25)
+ ax = self.__figure.add_subplot(*(self.__fig_dims + [grid_pos]))
+ title = 'Accumulated Latency per Hop for Stream \"%s\"\n(Consumer Filter = \"%s\")' % \
+ (stream_id, consumer_filt)
+ ax.set_title(title)
+ ax.set_ylabel('Maximal Source-to-Sink Latency (s)')
+ if path:
+ ind = np.arange(len(path))
+ rects = [ax.plot(ind, latencies, '--rs')]
+ ax.set_xticks(ind)
+ ax.set_xticklabels(path, rotation=90)
+ ax.yaxis.set_major_formatter(mticker.FormatStrFormatter('%.2e'))
+ ax.grid(b=True, which='both', color='0.65',linestyle='--')
+ if show:
+ self.show_figure()
diff --git a/fpga/usrp3/tools/utils/rfnoc-system-sim/sim_colosseum.py b/fpga/usrp3/tools/utils/rfnoc-system-sim/sim_colosseum.py
new file mode 100755
index 000000000..81ef6cbf9
--- /dev/null
+++ b/fpga/usrp3/tools/utils/rfnoc-system-sim/sim_colosseum.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Ettus Research
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import rfnocsim
+import ni_hw_models as hw
+import colosseum_models
+import argparse
+import re
+
+def main():
+ # Arguments
+ parser = argparse.ArgumentParser(description='Simulate the Colosseum network')
+ parser.add_argument('--topology', type=str, default='flb', choices=['torus','flb'], help='Topology')
+ parser.add_argument('--domain', type=str, default='time', choices=['time','frequency'], help='Domain')
+ parser.add_argument('--fir_taps', type=int, default=4, help='FIR Filter Taps (Time domain only)')
+ parser.add_argument('--fir_dly_line', type=int, default=512, help='FIR Delay Line (Time domain only)')
+ parser.add_argument('--fft_size', type=int, default=512, help='FFT Size (Frequency domain only)')
+ parser.add_argument('--fft_overlap', type=int, default=256, help='FFT Overlap (Frequency domain only)')
+ parser.add_argument('--samp_rate', type=float, default=100e6, help='Radio Channel Sample Rate')
+ parser.add_argument('--coherence_rate', type=float, default=1000, help='Channel coefficient update rate')
+ args = parser.parse_args()
+
+ sim_core = rfnocsim.SimulatorCore(tick_rate=100e6)
+ NUM_USRPS = 128
+ NUM_HOSTS = 4
+ NUM_BLADES = 16
+ NUM_CHANS = NUM_USRPS * 2
+
+ # Build an application settings structure
+ app_settings = dict()
+ app_settings['domain'] = args.domain
+ app_settings['samp_rate'] = args.samp_rate
+ app_settings['coherence_rate'] = args.coherence_rate
+ if args.domain == 'frequency':
+ app_settings['fft_size'] = args.fft_size
+ app_settings['fft_overlap'] = args.fft_overlap
+ else:
+ app_settings['fir_taps'] = args.fir_taps
+ app_settings['fir_dly_line'] = args.fir_dly_line
+
+ print('[INFO] Instantiating hardware resources...')
+ # Create USRPs
+ usrps = []
+ for i in range(NUM_USRPS):
+ usrps.append(hw.UsrpX310(sim_core, index=i, app_settings=app_settings))
+ # Create BEE7s
+ bee7blades = []
+ for i in range(NUM_BLADES):
+ bee7blades.append(hw.Bee7Blade(sim_core, index=i))
+ # Create Management Hosts
+ hosts = []
+ for i in range(NUM_HOSTS):
+ hosts.append(hw.ManagementHostandSwitch(sim_core, index=i,
+ num_coeffs=pow(NUM_CHANS,2)/NUM_HOSTS, switch_ports=16, app_settings=app_settings))
+
+ # Build topology
+ print('[INFO] Building topology...')
+ if args.topology == 'torus':
+ colosseum_models.Topology_2D_4x4_Torus.connect(sim_core, usrps, bee7blades, hosts, app_settings)
+ elif args.topology == 'flb':
+ colosseum_models.Topology_3D_4x4_FLB.connect(sim_core, usrps, bee7blades, hosts, app_settings)
+ else:
+ raise RuntimeError('Invalid topology: ' + args.topology)
+
+ print('[INFO] Running simulation...')
+ sim_core.run(16e-9)
+
+ # Sanity checks
+ print('[INFO] Validating correctness...')
+ for u in sim_core.list_components(rfnocsim.comptype.hardware, 'USRP.*'):
+ sim_core.lookup(u).validate(0)
+ print('[INFO] Validating feasibility...')
+ for u in sim_core.list_components('', '.*'):
+ c = sim_core.lookup(u)
+ for a in c.get_util_attrs():
+ if c.get_utilization(a) > 1.0:
+ print('[WARN] %s: %s overutilized by %.1f%%' % (u,a,(c.get_utilization(a)-1)*100))
+ print('[INFO] Validating BEE7 FPGA image IO consistency...')
+ master_fpga = 'BEE7_000/FPGA_NE'
+ master_stats = dict()
+ for u in sim_core.list_components('', master_fpga + '/.*SER_.*'):
+ c = sim_core.lookup(u)
+ m = re.match('(.+)/(SER_.*)', u)
+ master_stats[m.group(2)] = c.get_utilization('bandwidth')
+ for ln in master_stats:
+ for u in sim_core.list_components('', '.*/' + ln):
+ c = sim_core.lookup(u)
+ m = re.match('(.+)/(SER_.*)', u)
+ if (c.get_utilization('bandwidth') != master_stats[ln]):
+ print('[WARN] Data flowing over ' + ln + ' is probably different between ' + master_fpga + ' and ' + m.group(1))
+
+ # Visualize various metrics
+ vis = rfnocsim.Visualizer(sim_core)
+ vis.show_network()
+ vis.new_figure([1,2])
+ vis.plot_utilization(rfnocsim.comptype.hardware, 'BEE7.*', 1)
+ vis.plot_utilization(rfnocsim.comptype.producer, 'USRP.*', 2)
+ vis.show_figure()
+ vis.new_figure([1,2])
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_000.*FPGA_NW.*EXT.*', 1)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_006.*FPGA_SE.*EXT.*', 2)
+ vis.show_figure()
+ vis.new_figure([1,3])
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_NW.*SER_EW_.*', 1)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_NW.*SER_NS_.*', 2)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_NW.*SER_XX_.*', 3)
+ vis.show_figure()
+ vis.new_figure([1,4])
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_000.*FPGA_NW.*EXT.*', 1)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_001.*FPGA_NW.*EXT.*', 2)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_002.*FPGA_NW.*EXT.*', 3)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_003.*FPGA_NW.*EXT.*', 4)
+ vis.show_figure()
+ vis.new_figure([1,4])
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_NW.*EXT.*', 1)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_NE.*EXT.*', 2)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_SW.*EXT.*', 3)
+ vis.plot_utilization(rfnocsim.comptype.channel, 'BEE7_010.*FPGA_SE.*EXT.*', 4)
+ vis.show_figure()
+ vis.new_figure([1,2])
+ vis.plot_consumption_latency('.*','.*USRP_.*', 1)
+ vis.plot_path_latency('tx[(0)]', '.*', 2)
+ vis.show_figure()
+ vis.plot_utilization(rfnocsim.comptype.producer, '.*MGMT_HOST.*')
+
+if __name__ == '__main__':
+ main()
diff --git a/fpga/usrp3/tools/utils/run_testbenches.py b/fpga/usrp3/tools/utils/run_testbenches.py
new file mode 100755
index 000000000..bcfb7e5c6
--- /dev/null
+++ b/fpga/usrp3/tools/utils/run_testbenches.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python3
+#
+# Copyright 2018 Ettus Research, a National Instruments Company
+#
+# SPDX-License-Identifier: LGPL-3.0-or-later
+#
+
+import argparse
+import os
+import sys
+import subprocess
+import logging
+import re
+import io
+import time
+import datetime
+from queue import Queue
+from threading import Thread
+
+#-------------------------------------------------------
+# Utilities
+#-------------------------------------------------------
+
+SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+BASE_DIR = os.path.split(os.path.split(SCRIPT_DIR)[0])[0]
+
+_LOG = logging.getLogger(os.path.basename(__file__))
+_LOG.setLevel(logging.INFO)
+_STDOUT = logging.StreamHandler()
+_LOG.addHandler(_STDOUT)
+_FORMATTER = logging.Formatter('[%(name)s] - %(levelname)s - %(message)s')
+_STDOUT.setFormatter(_FORMATTER)
+
+RETCODE_SUCCESS = 0
+RETCODE_PARSE_ERR = -1
+RETCODE_EXEC_ERR = -2
+RETCODE_COMPILE_ERR = -3
+RETCODE_UNKNOWN_ERR = -4
+
+def retcode_to_str(code):
+ """ Convert internal status code to string
+ """
+ code = int(code)
+ if code > RETCODE_SUCCESS:
+ return 'AppError({code})'.format(code=code)
+ else:
+ return {RETCODE_SUCCESS:'OK',
+ RETCODE_PARSE_ERR:'ParseError',
+ RETCODE_EXEC_ERR:'ExecError',
+ RETCODE_COMPILE_ERR:'CompileError',
+ RETCODE_UNKNOWN_ERR:'UnknownError'
+ }[code]
+
+def log_with_header(what, minlen = 0, ch = '#'):
+ """ Print with a header around the text
+ """
+ padlen = max(int((minlen - len(what))/2), 1)
+ toprint = (' '*padlen) + what + (' '*padlen)
+ _LOG.info(ch * len(toprint))
+ _LOG.info(toprint)
+ _LOG.info(ch * len(toprint))
+
+#-------------------------------------------------------
+# Simulation Functions
+#-------------------------------------------------------
+
+def read_excludes_file(excludes_fname):
+ if excludes_fname:
+ return [ l.strip() for l in open(excludes_fname) if (l.strip() and '#' not in l)]
+ else:
+ return []
+
+def find_sims_on_fs(basedir, excludes):
+ """ Find all testbenches in the specific basedir
+ Testbenches are defined as directories with a
+ Makefile that includes viv_sim_preamble.mak
+ """
+ sims = {}
+ for root, _, files in os.walk(basedir):
+ name = os.path.relpath(root, basedir)
+ if 'Makefile' in files:
+ with open(os.path.join(root, 'Makefile'), 'r') as mfile:
+ for l in mfile.readlines():
+ if re.match('.*include.*viv_sim_preamble.mak.*', l) is not None:
+ if name not in excludes:
+ sims.update({name: root})
+ break
+ return sims
+
+def gather_target_sims(basedir, targets, excludes):
+ """ Parse the specified targets and gather simulations to run
+ Remove duplicates and sort alphabetically
+ """
+ fs_sims = find_sims_on_fs(basedir, excludes)
+ if not isinstance(targets, list):
+ targets = [targets]
+ sim_names = set()
+ for target in targets:
+ for name in sorted(fs_sims):
+ if re.match(target, name) is not None:
+ sim_names.add(name)
+ target_sims = []
+ for name in sorted(sim_names):
+ target_sims.append((name, fs_sims[name]))
+ return target_sims
+
+def parse_output(simout):
+ # Gather results (basic metrics)
+ results = {'retcode':RETCODE_SUCCESS, 'stdout':simout, 'passed':False}
+ # Look for the following in the log:
+ # - A start timestamp (indicates that Vivado started)
+ # - The testbench infrastructure start header (indicates that the TB started)
+ # - A stop timestamp (indicates that the TB stopped)
+ tb_started = False
+ compile_started = False
+ results['start_time'] = '<unknown>'
+ results['wall_time'] = '<unknown>'
+ for line in simout.split(b'\n'):
+ tsm = re.match(rb'TESTBENCH STARTED: (.+)', line)
+ if tsm is not None:
+ tb_started = True
+ csm = re.match(rb'source .*viv_sim_project.tcl', line)
+ if csm is not None:
+ compile_started = True
+ vsm = re.match(rb'# Start of session at: (.+)', line)
+ if vsm is not None:
+ results['start_time'] = str(vsm.group(1), 'ascii')
+ tfm = re.match(rb'launch_simulation:.*; elapsed = (.+) \..*', line)
+ if tfm is not None:
+ results['wall_time'] = str(tfm.group(1), 'ascii')
+ # Parse testbench results
+ # We have two possible formats to parse because we have two simulation
+ # test executors.
+ tb_match_fmt0 = ([
+ b'.*TESTBENCH FINISHED: (.+)\n',
+ b' - Time elapsed: (.+) ns.*\n',
+ b' - Tests Expected: (.+)\n',
+ b' - Tests Run: (.+)\n',
+ b' - Tests Passed: (.+)\n',
+ b'Result: (PASSED|FAILED).*',
+ ])
+ m_fmt0 = re.match(b''.join(tb_match_fmt0), simout, re.DOTALL)
+ tb_match_fmt1 = ([
+ b'.*TESTBENCH FINISHED: (.*)\n',
+ b' - Time elapsed: (.+) ns.*\n',
+ b' - Tests Run: (.+)\n',
+ b' - Tests Passed: (.+)\n',
+ b' - Tests Failed: (.+)\n',
+ b'Result: (PASSED|FAILED).*',
+ ])
+ m_fmt1 = re.match(b''.join(tb_match_fmt1), simout, re.DOTALL)
+
+ # Remove escape characters (colors) from Vivado output
+ ansi_escape = re.compile(r'(?:\x1B[\(@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
+ plain_simout = ansi_escape.sub('', simout.decode("utf-8"))
+
+ # Check for $error() and $fatal() output, which may be missed by the
+ # testbench or may occur in a subsequent instance, after a pass.
+ tb_match_error = ([
+ '\n',
+ '(Error|Fatal): .*\n',
+ 'Time: .+\n',
+ ])
+ m_error = re.search(''.join(tb_match_error), plain_simout)
+
+ # Figure out the returncode
+ retcode = RETCODE_UNKNOWN_ERR
+ if m_fmt0 is not None or m_fmt1 is not None:
+ retcode = RETCODE_SUCCESS
+ if m_fmt0 is not None:
+ results['passed'] = (m_fmt0.group(6) == b'PASSED' and m_error is None)
+ results['module'] = m_fmt0.group(1)
+ results['sim_time_ns'] = int(m_fmt0.group(2))
+ results['tc_expected'] = int(m_fmt0.group(3))
+ results['tc_run'] = int(m_fmt0.group(4))
+ results['tc_passed'] = int(m_fmt0.group(5))
+ else:
+ results['passed'] = (m_fmt1.group(6) == b'PASSED' and m_error is None)
+ results['module'] = m_fmt1.group(1)
+ results['sim_time_ns'] = int(m_fmt1.group(2))
+ results['tc_expected'] = int(m_fmt1.group(3))
+ results['tc_run'] = int(m_fmt1.group(3))
+ results['tc_passed'] = int(m_fmt1.group(4))
+ elif tb_started:
+ retcode = RETCODE_PARSE_ERR
+ elif compile_started:
+ retcode = RETCODE_COMPILE_ERR
+ else:
+ retcode = RETCODE_EXEC_ERR
+ results['retcode'] = retcode
+ return results
+
+def run_sim(path, simulator, basedir, setupenv):
+ """ Run the simulation at the specified path
+ The simulator can be specified as the target
+ A environment script can be run optionally
+ """
+ try:
+ # Optionally run an environment setup script
+ if setupenv is None:
+ setupenv = ''
+ # Check if environment was setup
+ if 'VIVADO_PATH' not in os.environ:
+ return {'retcode': RETCODE_EXEC_ERR, 'passed':False, 'stdout':bytes('Simulation environment was not initialized\n', 'utf-8')}
+ else:
+ setupenv = '. ' + os.path.realpath(setupenv) + ';'
+ # Run the simulation
+ return parse_output(
+ subprocess.check_output(
+ 'cd {workingdir}; /bin/bash -c "{setupenv} make {simulator} 2>&1"'.format(
+ workingdir=os.path.join(basedir, path), setupenv=setupenv, simulator=simulator), shell=True))
+ except subprocess.CalledProcessError as e:
+ return {'retcode': int(abs(e.returncode)), 'passed':False, 'stdout':e.output}
+ except Exception as e:
+ _LOG.error('Target ' + path + ' failed to run:\n' + str(e))
+ return {'retcode': RETCODE_EXEC_ERR, 'passed':False, 'stdout':bytes(str(e), 'utf-8')}
+ except:
+ _LOG.error('Target ' + path + ' failed to run')
+ return {'retcode': RETCODE_UNKNOWN_ERR, 'passed':False, 'stdout':bytes('Unknown Exception', 'utf-8')}
+
+def run_sim_queue(run_queue, out_queue, simulator, basedir, setupenv):
+ """ Thread worker for a simulation runner
+ Pull a job from the run queue, run the sim, then place
+ output in out_queue
+ """
+ while not run_queue.empty():
+ (name, path) = run_queue.get()
+ try:
+ _LOG.info('Starting: %s', name)
+ result = run_sim(path, simulator, basedir, setupenv)
+ out_queue.put((name, result))
+ _LOG.info('FINISHED: %s (%s, %s)', name, retcode_to_str(result['retcode']), 'PASS' if result['passed'] else 'FAIL!')
+ except KeyboardInterrupt:
+ _LOG.warning('Target ' + name + ' received SIGINT. Aborting...')
+ out_queue.put((name, {'retcode': RETCODE_EXEC_ERR, 'passed':False, 'stdout':bytes('Aborted by user', 'utf-8')}))
+ except Exception as e:
+ _LOG.error('Target ' + name + ' failed to run:\n' + str(e))
+ out_queue.put((name, {'retcode': RETCODE_UNKNOWN_ERR, 'passed':False, 'stdout':bytes(str(e), 'utf-8')}))
+ finally:
+ run_queue.task_done()
+
+#-------------------------------------------------------
+# Script Actions
+#-------------------------------------------------------
+
+def do_list(args):
+ """ List all simulations that can be run
+ """
+ excludes = read_excludes_file(args.excludes)
+ for (name, path) in gather_target_sims(args.basedir, args.target, excludes):
+ print(name)
+ return 0
+
+def do_run(args):
+ """ Build a simulation queue based on the specified
+ args and process it
+ """
+ run_queue = Queue(maxsize=0)
+ out_queue = Queue(maxsize=0)
+ _LOG.info('Queueing the following targets to simulate:')
+ excludes = read_excludes_file(args.excludes)
+ name_maxlen = 0
+ for (name, path) in gather_target_sims(args.basedir, args.target, excludes):
+ run_queue.put((name, path))
+ name_maxlen = max(name_maxlen, len(name))
+ _LOG.info('* ' + name)
+ # Spawn tasks to run builds
+ num_sims = run_queue.qsize()
+ num_jobs = min(num_sims, int(args.jobs))
+ _LOG.info('Started ' + str(num_jobs) + ' job(s) to process queue...')
+ results = {}
+ for i in range(num_jobs):
+ worker = Thread(target=run_sim_queue, args=(run_queue, out_queue, args.simulator, args.basedir, args.setupenv))
+ worker.setDaemon(False)
+ worker.start()
+ # Wait for build queue to become empty
+ start = datetime.datetime.now()
+ try:
+ while out_queue.qsize() < num_sims:
+ tdiff = str(datetime.datetime.now() - start).split('.', 2)[0]
+ print("\r>>> [%s] (%d/%d simulations completed) <<<" % (tdiff, out_queue.qsize(), num_sims), end='\r', flush=True)
+ time.sleep(1.0)
+ sys.stdout.write("\n")
+ except (KeyboardInterrupt):
+ _LOG.warning('Received SIGINT. Aborting... (waiting for pending jobs to finish)')
+ # Flush run queue
+ while not run_queue.empty():
+ (name, path) = run_queue.get()
+ raise SystemExit(1)
+
+ results = {}
+ result_all = 0
+ while not out_queue.empty():
+ (name, result) = out_queue.get()
+ results[name] = result
+ log_with_header(name)
+ sys.stdout.buffer.write(result['stdout'])
+ if not result['passed']:
+ result_all += 1
+ sys.stdout.write('\n\n\n')
+ sys.stdout.flush()
+ time.sleep(1.0)
+
+ hdr_len = name_maxlen + 62 # 62 is the report line length
+ log_with_header('RESULTS', hdr_len)
+ for name in sorted(results):
+ r = results[name]
+ if 'module' in r:
+ _LOG.info('* %s : %s (Expected=%02d, Run=%02d, Passed=%02d, Elapsed=%s)',
+ name.ljust(name_maxlen), ('Passed' if r['passed'] else 'FAILED'), r['tc_expected'], r['tc_run'], r['tc_passed'], r['wall_time'])
+ else:
+ _LOG.info('* %s : %s (Status = %s)', name.ljust(name_maxlen), ('Passed' if r['passed'] else 'FAILED'),
+ retcode_to_str(r['retcode']))
+ _LOG.info('='*hdr_len)
+ _LOG.info('SUMMARY: %d out of %d tests passed. Time elapsed was %s'%(num_sims - result_all, num_sims, str(datetime.datetime.now() - start).split('.', 2)[0]))
+ _LOG.info('#'*hdr_len)
+ return result_all
+
+
+def do_cleanup(args):
+ """ Run make cleanall for all simulations
+ """
+ setupenv = args.setupenv
+ if setupenv is None:
+ setupenv = ''
+ # Check if environment was setup
+ if 'VIVADO_PATH' not in os.environ:
+ raise RuntimeError('Simulation environment was not initialized')
+ else:
+ setupenv = '. ' + os.path.realpath(setupenv) + ';'
+ excludes = read_excludes_file(args.excludes)
+ for (name, path) in gather_target_sims(args.basedir, args.target, excludes):
+ _LOG.info('Cleaning up %s', name)
+ os.chdir(os.path.join(args.basedir, path))
+ subprocess.Popen('{setupenv} make cleanall'.format(setupenv=setupenv), shell=True).wait()
+ return 0
+
+def do_report(args):
+ """ List all simulations that can be run
+ """
+ keys = ['module', 'status', 'retcode', 'start_time', 'wall_time',
+ 'sim_time_ns', 'tc_expected', 'tc_run', 'tc_passed']
+ with open(args.report, 'w') as repfile:
+ repfile.write((','.join([x.upper() for x in keys])) + '\n')
+ excludes = read_excludes_file(args.excludes)
+ for (name, path) in gather_target_sims(args.basedir, args.target, excludes):
+ results = {'module': str(name), 'status':'NOT_RUN', 'retcode':'<unknown>',
+ 'start_time':'<unknown>', 'wall_time':'<unknown>', 'sim_time_ns':0,
+ 'tc_expected':0, 'tc_run':0, 'tc_passed':0}
+ logpath = os.path.join(path, args.simulator + '.log')
+ if os.path.isfile(logpath):
+ with open(logpath, 'rb') as logfile:
+ r = parse_output(logfile.read())
+ if r['retcode'] != RETCODE_SUCCESS:
+ results['retcode'] = retcode_to_str(r['retcode'])
+ results['status'] = 'ERROR'
+ results['start_time'] = r['start_time']
+ else:
+ results = r
+ results['module'] = name
+ results['status'] = 'PASSED' if r['passed'] else 'FAILED'
+ results['retcode'] = retcode_to_str(r['retcode'])
+ repfile.write((','.join([str(results[x]) for x in keys])) + '\n')
+ _LOG.info('Testbench report written to ' + args.report)
+ return 0
+
+# Parse command line options
+def get_options():
+ parser = argparse.ArgumentParser(description='Batch testbench execution script')
+ parser.add_argument('-d', '--basedir', default=BASE_DIR, help='Base directory for the usrp3 codebase')
+ parser.add_argument('-s', '--simulator', choices=['xsim', 'vsim'], default='xsim', help='Simulator name')
+ parser.add_argument('-e', '--setupenv', default=None, help='Optional environment setup script to run for each TB')
+ parser.add_argument('-r', '--report', default='testbench_report.csv', help='Name of the output report file')
+ parser.add_argument('-x', '--excludes', default=None, help='Name of the excludes file. It contains all targets to exlude.')
+ parser.add_argument('-j', '--jobs', default=1, help='Number of parallel simulation jobs to run')
+ parser.add_argument('action', choices=['run', 'cleanup', 'list', 'report'], default='list', help='What to do?')
+ parser.add_argument('target', nargs='*', default='.*', help='Space separated simulation target regexes')
+ return parser.parse_args()
+
+def main():
+ args = get_options()
+ actions = {'list': do_list, 'run': do_run, 'cleanup': do_cleanup, 'report': do_report}
+ return actions[args.action](args)
+
+if __name__ == '__main__':
+ exit(main())
diff --git a/fpga/usrp3/tools/utils/testbenches.excludes b/fpga/usrp3/tools/utils/testbenches.excludes
new file mode 100644
index 000000000..7ac5b134f
--- /dev/null
+++ b/fpga/usrp3/tools/utils/testbenches.excludes
@@ -0,0 +1,15 @@
+# This file contains all testbenches to exlcude from the filter
+# list discovered by run_testbenches.py
+# NOTE: Lines containing "#" are treated as a comment
+
+lib/rfnoc/noc_block_eq_tb
+lib/rfnoc/noc_block_ofdm_tb
+lib/rfnoc/noc_block_schmidl_cox_tb
+top/e31x/sim/dram_test
+top/n3xx/sim/arm_to_sfp_loopback
+top/n3xx/sim/aurora_loopback
+top/n3xx/sim/one_gig_eth_loopback
+top/n3xx/sim/ten_gig_eth_loopback
+top/x300/sim/x300_pcie_int
+top/n3xx/dboards/eiscat/radio/noc_block_ddc_eiscat_tb
+top/n3xx/dboards/eiscat/radio/noc_block_radio_core_eiscat_tb