configs: Fix Python 3 iterator and exec compatibility issues

Python 2.7 used to return lists for operations such as map and range,
this has changed in Python 3. To make the configs Python 3 compliant,
add explicit conversions from iterators to lists where needed, replace
xrange with range, and fix changes to exec syntax.

This change doesn't fix import paths since that might require us to
restructure the configs slightly.

Change-Id: Idcea8482b286779fc98b4e144ca8f54069c08024
Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/c/16002
Reviewed-by: Gabe Black <gabeblack@google.com>
diff --git a/configs/common/BPConfig.py b/configs/common/BPConfig.py
index 5e5b92f..c4e40e7 100644
--- a/configs/common/BPConfig.py
+++ b/configs/common/BPConfig.py
@@ -79,7 +79,7 @@
 
 def bp_names():
     """Return a list of valid Branch Predictor names."""
-    return _bp_classes.keys()
+    return list(_bp_classes.keys())
 
 # Add all BPs in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_bp_class):
diff --git a/configs/common/Benchmarks.py b/configs/common/Benchmarks.py
index b7d10b5..f7d1b4d 100644
--- a/configs/common/Benchmarks.py
+++ b/configs/common/Benchmarks.py
@@ -141,6 +141,6 @@
                             None, 'android-ics')]
 }
 
-benchs = Benchmarks.keys()
+benchs = list(Benchmarks.keys())
 benchs.sort()
 DefinedBenchmarks = ", ".join(benchs)
diff --git a/configs/common/CacheConfig.py b/configs/common/CacheConfig.py
index 3fa3676..368356f 100644
--- a/configs/common/CacheConfig.py
+++ b/configs/common/CacheConfig.py
@@ -97,7 +97,7 @@
     if options.memchecker:
         system.memchecker = MemChecker()
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         if options.caches:
             icache = icache_class(size=options.l1i_size,
                                   assoc=options.l1i_assoc)
diff --git a/configs/common/CpuConfig.py b/configs/common/CpuConfig.py
index 1524b16..80e3766 100644
--- a/configs/common/CpuConfig.py
+++ b/configs/common/CpuConfig.py
@@ -99,7 +99,7 @@
 
 def cpu_names():
     """Return a list of valid CPU names."""
-    return _cpu_classes.keys()
+    return list(_cpu_classes.keys())
 
 def config_etrace(cpu_cls, cpu_list, options):
     if issubclass(cpu_cls, m5.objects.DerivO3CPU):
diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py
index fc21519..13c29ef 100644
--- a/configs/common/FSConfig.py
+++ b/configs/common/FSConfig.py
@@ -548,7 +548,7 @@
     # Set up the Intel MP table
     base_entries = []
     ext_entries = []
-    for i in xrange(numCPUs):
+    for i in range(numCPUs):
         bp = X86IntelMPProcessor(
                 local_apic_id = i,
                 local_apic_version = 0x14,
diff --git a/configs/common/GPUTLBConfig.py b/configs/common/GPUTLBConfig.py
index 3e47f1d..80aad0b 100644
--- a/configs/common/GPUTLBConfig.py
+++ b/configs/common/GPUTLBConfig.py
@@ -69,7 +69,7 @@
 def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name):
     # arguments: options, TLB level, number of private structures for this Level,
     # TLB name and  Coalescer name
-    for i in xrange(my_index):
+    for i in range(my_index):
         TLB_name.append(eval(TLB_constructor(my_level)))
         Coalescer_name.append(eval(Coalescer_constructor(my_level)))
 
@@ -109,7 +109,7 @@
     # Create the hiearchy
     # Call the appropriate constructors and add objects to the system
 
-    for i in xrange(len(TLB_hierarchy)):
+    for i in range(len(TLB_hierarchy)):
         hierarchy_level = TLB_hierarchy[i]
         level = i+1
         for TLB_type in hierarchy_level:
@@ -143,7 +143,7 @@
     # Each TLB is connected with its Coalescer through a single port.
     # There is a one-to-one mapping of TLBs to Coalescers at a given level
     # This won't be modified no matter what the hierarchy looks like.
-    for i in xrange(len(TLB_hierarchy)):
+    for i in range(len(TLB_hierarchy)):
         hierarchy_level = TLB_hierarchy[i]
         level = i+1
         for TLB_type in hierarchy_level:
@@ -159,7 +159,7 @@
         name = TLB_type['name']
         num_TLBs = TLB_type['width']
         if name == 'l1':     # L1 D-TLBs
-            tlb_per_cu = num_TLBs / n_cu
+            tlb_per_cu = num_TLBs // n_cu
             for cu_idx in range(n_cu):
                 if tlb_per_cu:
                     for tlb in range(tlb_per_cu):
diff --git a/configs/common/HMC.py b/configs/common/HMC.py
index 10d8a71..61e521d 100644
--- a/configs/common/HMC.py
+++ b/configs/common/HMC.py
@@ -337,16 +337,16 @@
                      num_lanes=opt.num_lanes_per_link,
                      link_speed=opt.serial_link_speed,
                      delay=opt.total_ctrl_latency) for i in
-          xrange(opt.num_serial_links)]
+          range(opt.num_serial_links)]
     system.hmc_host.seriallink = sl
 
     # enable global monitor
     if opt.enable_global_monitor:
         system.hmc_host.lmonitor = [CommMonitor() for i in
-                                    xrange(opt.num_serial_links)]
+                                    range(opt.num_serial_links)]
 
     # set the clock frequency for serial link
-    for i in xrange(opt.num_serial_links):
+    for i in range(opt.num_serial_links):
         clk = opt.link_controller_frequency
         vd = VoltageDomain(voltage='1V')
         scd = SrcClockDomain(clock=clk, voltage_domain=vd)
@@ -357,7 +357,7 @@
     hh = system.hmc_host
     if opt.arch == "distributed":
         mb = system.membus
-        for i in xrange(opt.num_links_controllers):
+        for i in range(opt.num_links_controllers):
             if opt.enable_global_monitor:
                 mb.master = hh.lmonitor[i].slave
                 hh.lmonitor[i].master = hh.seriallink[i].slave
@@ -375,7 +375,7 @@
             mb.master = hh.seriallink[1].slave
 
     if opt.arch == "same":
-        for i in xrange(opt.num_links_controllers):
+        for i in range(opt.num_links_controllers):
             if opt.enable_global_monitor:
                 hh.lmonitor[i].master = hh.seriallink[i].slave
 
@@ -395,7 +395,7 @@
     system.mem_ranges = addr_ranges_vaults
 
     if opt.enable_link_monitor:
-        lm = [CommMonitor() for i in xrange(opt.num_links_controllers)]
+        lm = [CommMonitor() for i in range(opt.num_links_controllers)]
         system.hmc_dev.lmonitor = lm
 
     # 4 HMC Crossbars located in its logic-base (LoB)
@@ -403,17 +403,17 @@
                           frontend_latency=opt.xbar_frontend_latency,
                           forward_latency=opt.xbar_forward_latency,
                           response_latency=opt.xbar_response_latency) for i in
-          xrange(opt.number_mem_crossbar)]
+          range(opt.number_mem_crossbar)]
     system.hmc_dev.xbar = xb
 
-    for i in xrange(opt.number_mem_crossbar):
+    for i in range(opt.number_mem_crossbar):
         clk = opt.xbar_frequency
         vd = VoltageDomain(voltage='1V')
         scd = SrcClockDomain(clock=clk, voltage_domain=vd)
         system.hmc_dev.xbar[i].clk_domain = scd
 
     # Attach 4 serial link to 4 crossbar/s
-    for i in xrange(opt.num_serial_links):
+    for i in range(opt.num_serial_links):
         if opt.enable_link_monitor:
             system.hmc_host.seriallink[i].master = \
                 system.hmc_dev.lmonitor[i].slave
@@ -429,7 +429,7 @@
         # create a list of buffers
         system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req,
                                          resp_size=opt.xbar_buffer_size_resp)
-                                  for i in xrange(numx*(opt.mem_chunk-1))]
+                                  for i in range(numx*(opt.mem_chunk-1))]
 
         # Buffer iterator
         it = iter(range(len(system.hmc_dev.buffers)))
diff --git a/configs/common/MemConfig.py b/configs/common/MemConfig.py
index 3603580..b6e6663 100644
--- a/configs/common/MemConfig.py
+++ b/configs/common/MemConfig.py
@@ -86,7 +86,7 @@
 
 def mem_names():
     """Return a list of valid memory names."""
-    return _mem_classes.keys()
+    return list(_mem_classes.keys())
 
 # Add all memory controllers in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_mem_class):
@@ -215,7 +215,7 @@
     # array of controllers and set their parameters to match their
     # address mapping in the case of a DRAM
     for r in system.mem_ranges:
-        for i in xrange(nbr_mem_ctrls):
+        for i in range(nbr_mem_ctrls):
             mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
                                        intlv_size)
             # Set the number of ranks based on the command-line
@@ -233,7 +233,7 @@
     subsystem.mem_ctrls = mem_ctrls
 
     # Connect the controllers to the membus
-    for i in xrange(len(subsystem.mem_ctrls)):
+    for i in range(len(subsystem.mem_ctrls)):
         if opt_mem_type == "HMC_2500_1x32":
             subsystem.mem_ctrls[i].port = xbar[i/4].master
             # Set memory device size. There is an independent controller for
diff --git a/configs/common/Options.py b/configs/common/Options.py
index 7963013..7b231c7 100644
--- a/configs/common/Options.py
+++ b/configs/common/Options.py
@@ -339,8 +339,9 @@
     # System options
     parser.add_option("--kernel", action="store", type="string")
     parser.add_option("--os-type", action="store", type="choice",
-            choices=os_types[buildEnv['TARGET_ISA']], default="linux",
-            help="Specifies type of OS to boot")
+                      choices=os_types[str(buildEnv['TARGET_ISA'])],
+                      default="linux",
+                      help="Specifies type of OS to boot")
     parser.add_option("--script", action="store", type="string")
     parser.add_option("--frame-capture", action="store_true",
             help="Stores changed frame buffers from the VNC server to compressed "\
diff --git a/configs/common/PlatformConfig.py b/configs/common/PlatformConfig.py
index 306b732..ae55d1a 100644
--- a/configs/common/PlatformConfig.py
+++ b/configs/common/PlatformConfig.py
@@ -103,7 +103,7 @@
 
 def platform_names():
     """Return a list of valid Platform names."""
-    return _platform_classes.keys() + _platform_aliases.keys()
+    return list(_platform_classes.keys()) + list(_platform_aliases.keys())
 
 # Add all Platforms in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_platform_class):
diff --git a/configs/common/Simulation.py b/configs/common/Simulation.py
index 19bd962..5b1ab01 100644
--- a/configs/common/Simulation.py
+++ b/configs/common/Simulation.py
@@ -453,18 +453,18 @@
     switch_cpus = None
 
     if options.prog_interval:
-        for i in xrange(np):
+        for i in range(np):
             testsys.cpu[i].progress_interval = options.prog_interval
 
     if options.maxinsts:
-        for i in xrange(np):
+        for i in range(np):
             testsys.cpu[i].max_insts_any_thread = options.maxinsts
 
     if cpu_class:
         switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
-                       for i in xrange(np)]
+                       for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             if options.fast_forward:
                 testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
             switch_cpus[i].system = testsys
@@ -489,7 +489,7 @@
             CpuConfig.config_etrace(cpu_class, switch_cpus, options)
 
         testsys.switch_cpus = switch_cpus
-        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
+        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in range(np)]
 
     if options.repeat_switch:
         switch_class = getCPUClass(options.cpu_type)[0]
@@ -502,9 +502,9 @@
             sys.exit(1)
 
         repeat_switch_cpus = [switch_class(switched_out=True, \
-                                               cpu_id=(i)) for i in xrange(np)]
+                                               cpu_id=(i)) for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             repeat_switch_cpus[i].system = testsys
             repeat_switch_cpus[i].workload = testsys.cpu[i].workload
             repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
@@ -520,18 +520,18 @@
 
         if cpu_class:
             repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
-                                      for i in xrange(np)]
+                                      for i in range(np)]
         else:
             repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
-                                      for i in xrange(np)]
+                                      for i in range(np)]
 
     if options.standard_switch:
         switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
-                       for i in xrange(np)]
+                       for i in range(np)]
         switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
-                        for i in xrange(np)]
+                        for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             switch_cpus[i].system =  testsys
             switch_cpus_1[i].system =  testsys
             switch_cpus[i].workload = testsys.cpu[i].workload
@@ -572,8 +572,12 @@
 
         testsys.switch_cpus = switch_cpus
         testsys.switch_cpus_1 = switch_cpus_1
-        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
-        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
+        switch_cpu_list = [
+            (testsys.cpu[i], switch_cpus[i]) for i in range(np)
+        ]
+        switch_cpu_list1 = [
+            (switch_cpus[i], switch_cpus_1[i]) for i in range(np)
+        ]
 
     # set the checkpoint in the cpu before m5.instantiate is called
     if options.take_checkpoints != None and \
@@ -581,7 +585,7 @@
         offset = int(options.take_checkpoints)
         # Set an instruction break point
         if options.simpoint:
-            for i in xrange(np):
+            for i in range(np):
                 if testsys.cpu[i].workload[0].simpoint == 0:
                     fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                 checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
@@ -592,7 +596,7 @@
             options.take_checkpoints = offset
             # Set all test cpus with the right number of instructions
             # for the upcoming simulation
-            for i in xrange(np):
+            for i in range(np):
                 testsys.cpu[i].max_insts_any_thread = offset
 
     if options.take_simpoint_checkpoints != None:
diff --git a/configs/common/SysPaths.py b/configs/common/SysPaths.py
index 9a234cc..17d5fb8 100644
--- a/configs/common/SysPaths.py
+++ b/configs/common/SysPaths.py
@@ -26,6 +26,8 @@
 #
 # Authors: Ali Saidi
 
+
+from six import string_types
 import os, sys
 
 config_path = os.path.dirname(os.path.abspath(__file__))
@@ -35,7 +37,7 @@
     _sys_paths = None
 
     def __init__(self, subdirs, sys_paths=None):
-        if isinstance(subdirs, basestring):
+        if isinstance(subdirs, string_types):
             subdirs = [subdirs]
         self._subdir = os.path.join(*subdirs)
         if sys_paths:
@@ -55,16 +57,16 @@
             paths = filter(os.path.isdir, paths)
 
             if not paths:
-                raise IOError, "Can't find a path to system files."
+                raise IOError("Can't find a path to system files.")
 
-            self._sys_paths = paths
+            self._sys_paths = list(paths)
 
         filepath = os.path.join(self._subdir, filename)
         paths = (os.path.join(p, filepath) for p in self._sys_paths)
         try:
             return next(p for p in paths if os.path.exists(p))
         except StopIteration:
-            raise IOError, "Can't find file '%s' on path." % filename
+            raise IOError("Can't find file '%s' on path." % filename)
 
 disk = PathSearchFunc('disks')
 binary = PathSearchFunc('binaries')
diff --git a/configs/common/cores/arm/HPI.py b/configs/common/cores/arm/HPI.py
index 2efb7df..d105790 100644
--- a/configs/common/cores/arm/HPI.py
+++ b/configs/common/cores/arm/HPI.py
@@ -177,7 +177,7 @@
 
         defns = []
         # Then apply them to the produced new env
-        for i in xrange(0, len(bindings)):
+        for i in range(0, len(bindings)):
             name, binding_expr = bindings[i]
             defns.append(binding_expr(new_env))
 
diff --git a/configs/common/cpu2000.py b/configs/common/cpu2000.py
index da87507..8143e45 100644
--- a/configs/common/cpu2000.py
+++ b/configs/common/cpu2000.py
@@ -93,13 +93,13 @@
         try:
             func = getattr(self.__class__, input_set)
         except AttributeError:
-            raise AttributeError, \
-                  'The benchmark %s does not have the %s input set' % \
-                  (self.name, input_set)
+            raise AttributeError(
+                'The benchmark %s does not have the %s input set' % \
+                (self.name, input_set))
 
         executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
         if not isfile(executable):
-            raise AttributeError, '%s not found' % executable
+            raise AttributeError('%s not found' % executable)
         self.executable = executable
 
         # root of tree for input & output data files
@@ -113,7 +113,7 @@
         self.input_set = input_set
 
         if not isdir(inputs_dir):
-            raise AttributeError, '%s not found' % inputs_dir
+            raise AttributeError('%s not found' % inputs_dir)
 
         self.inputs_dir = [ inputs_dir ]
         if isdir(all_dir):
@@ -670,7 +670,7 @@
         elif (isa == 'sparc' or isa == 'sparc32'):
             self.endian = 'bendian'
         else:
-            raise AttributeError, "unknown ISA %s" % isa
+            raise AttributeError("unknown ISA %s" % isa)
 
         super(vortex, self).__init__(isa, os, input_set)
 
diff --git a/configs/dist/sw.py b/configs/dist/sw.py
index e7f31c0..8dca62f 100644
--- a/configs/dist/sw.py
+++ b/configs/dist/sw.py
@@ -57,7 +57,7 @@
                                       sync_repeat = options.dist_sync_repeat,
                                       is_switch = True,
                                       num_nodes = options.dist_size)
-                       for i in xrange(options.dist_size)]
+                       for i in range(options.dist_size)]
 
     for (i, link) in enumerate(switch.portlink):
         link.int0 = switch.interface[i]
diff --git a/configs/dram/lat_mem_rd.py b/configs/dram/lat_mem_rd.py
index dc80bd2..a1aa77d 100644
--- a/configs/dram/lat_mem_rd.py
+++ b/configs/dram/lat_mem_rd.py
@@ -188,7 +188,7 @@
     protolib.encodeMessage(proto_out, header)
 
     # create a list of every single address to touch
-    addrs = range(0, max_addr, burst_size)
+    addrs = list(range(0, max_addr, burst_size))
 
     import random
     random.shuffle(addrs)
diff --git a/configs/dram/low_power_sweep.py b/configs/dram/low_power_sweep.py
index 2aa6490..e9714a6 100644
--- a/configs/dram/low_power_sweep.py
+++ b/configs/dram/low_power_sweep.py
@@ -166,11 +166,11 @@
 
 # We sweep itt max using the multipliers specified by the user.
 itt_max_str = args.itt_list.strip().split()
-itt_max_multiples = map(lambda x : int(x), itt_max_str)
+itt_max_multiples = [ int(x) for x in itt_max_str ]
 if len(itt_max_multiples) == 0:
     fatal("String for itt-max-list detected empty\n")
 
-itt_max_values = map(lambda m : pd_entry_time * m, itt_max_multiples)
+itt_max_values = [ pd_entry_time * m for m in itt_max_multiples ]
 
 # Generate request addresses in the entire range, assume we start at 0
 max_addr = mem_range.end
diff --git a/configs/example/apu_se.py b/configs/example/apu_se.py
index bba0d0f..146863d 100644
--- a/configs/example/apu_se.py
+++ b/configs/example/apu_se.py
@@ -225,7 +225,7 @@
 
 # List of compute units; one GPU can have multiple compute units
 compute_units = []
-for i in xrange(n_cu):
+for i in range(n_cu):
     compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
                                      num_SIMDs = options.simds_per_cu,
                                      wfSize = options.wf_size,
@@ -255,8 +255,8 @@
                                              options.outOfOrderDataDelivery))
     wavefronts = []
     vrfs = []
-    for j in xrange(options.simds_per_cu):
-        for k in xrange(shader.n_wf):
+    for j in range(options.simds_per_cu):
+        for k in range(shader.n_wf):
             wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
                                         wfSize = options.wf_size))
         vrfs.append(VectorRegisterFile(simd_id=j,
@@ -311,7 +311,7 @@
     future_cpu_list = []
 
     # Initial CPUs to be used during fast-forwarding.
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         cpu = CpuClass(cpu_id = i,
                        clk_domain = SrcClockDomain(
                            clock = options.CPUClock,
@@ -328,7 +328,7 @@
     MainCpuClass = CpuClass
 
 # CPs to be used throughout the simulation.
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
     cp = MainCpuClass(cpu_id = options.num_cpus + i,
                       clk_domain = SrcClockDomain(
                           clock = options.CPUClock,
@@ -337,7 +337,7 @@
     cp_list.append(cp)
 
 # Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
-for i in xrange(options.num_cpus):
+for i in range(options.num_cpus):
     cpu = MainCpuClass(cpu_id = i,
                        clk_domain = SrcClockDomain(
                            clock = options.CPUClock,
@@ -400,7 +400,7 @@
     cp.workload = host_cpu.workload
 
 if fast_forward:
-    for i in xrange(len(future_cpu_list)):
+    for i in range(len(future_cpu_list)):
         future_cpu_list[i].workload = cpu_list[i].workload
         future_cpu_list[i].createThreads()
 
@@ -408,7 +408,7 @@
 # List of CPUs that must be switched when moving between KVM and simulation
 if fast_forward:
     switch_cpu_list = \
-        [(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)]
+        [(cpu_list[i], future_cpu_list[i]) for i in range(options.num_cpus)]
 
 # Full list of processing cores in the system. Note that
 # dispatcher is also added to cpu_list although it is
@@ -431,7 +431,7 @@
     have_kvm_support = 'BaseKvmCPU' in globals()
     if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
         system.vm = KvmVM()
-        for i in xrange(len(host_cpu.workload)):
+        for i in range(len(host_cpu.workload)):
             host_cpu.workload[i].useArchPT = True
             host_cpu.workload[i].kvmInSE = True
     else:
@@ -479,15 +479,15 @@
 gpu_port_idx = gpu_port_idx - options.num_cp * 2
 
 wavefront_size = options.wf_size
-for i in xrange(n_cu):
+for i in range(n_cu):
     # The pipeline issues wavefront_size number of uncoalesced requests
     # in one GPU issue cycle. Hence wavefront_size mem ports.
-    for j in xrange(wavefront_size):
+    for j in range(wavefront_size):
         system.cpu[shader_idx].CUs[i].memory_port[j] = \
                   system.ruby._cpu_ports[gpu_port_idx].slave[j]
     gpu_port_idx += 1
 
-for i in xrange(n_cu):
+for i in range(n_cu):
     if i > 0 and not i % options.cu_per_sqc:
         print("incrementing idx on ", i)
         gpu_port_idx += 1
@@ -496,7 +496,7 @@
 gpu_port_idx = gpu_port_idx + 1
 
 # attach CP ports to Ruby
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
     system.cpu[cp_idx].createInterruptController()
     system.cpu[cp_idx].dcache_port = \
                 system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
diff --git a/configs/example/fs.py b/configs/example/fs.py
index 6be9ba2..70275a0 100644
--- a/configs/example/fs.py
+++ b/configs/example/fs.py
@@ -138,7 +138,7 @@
 
     # For now, assign all the CPUs to the same clock domain
     test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
-                    for i in xrange(np)]
+                    for i in range(np)]
 
     if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
         test_sys.kvm_vm = KvmVM()
@@ -194,7 +194,7 @@
             if np > 1:
                 fatal("SimPoint generation not supported with more than one CPUs")
 
-        for i in xrange(np):
+        for i in range(np):
             if options.simpoint_profile:
                 test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
             if options.checker:
@@ -277,7 +277,7 @@
     # memory bus
     drive_sys.mem_ctrls = [DriveMemClass(range = r)
                            for r in drive_sys.mem_ranges]
-    for i in xrange(len(drive_sys.mem_ctrls)):
+    for i in range(len(drive_sys.mem_ctrls)):
         drive_sys.mem_ctrls[i].port = drive_sys.membus.master
 
     drive_sys.init_param = options.init_param
diff --git a/configs/example/garnet_synth_traffic.py b/configs/example/garnet_synth_traffic.py
index 92fb3a0..f5b7690 100644
--- a/configs/example/garnet_synth_traffic.py
+++ b/configs/example/garnet_synth_traffic.py
@@ -87,7 +87,8 @@
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile(open(os.path.join(config_root, "common", "Options.py")).read(),
+             os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -112,7 +113,7 @@
                      inj_vnet=options.inj_vnet,
                      precision=options.precision,
                      num_dest=options.num_dirs) \
-         for i in xrange(options.num_cpus) ]
+         for i in range(options.num_cpus) ]
 
 # create the desired simulated system
 system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
diff --git a/configs/example/hmctest.py b/configs/example/hmctest.py
index c370d0a..091ed8b 100644
--- a/configs/example/hmctest.py
+++ b/configs/example/hmctest.py
@@ -57,17 +57,17 @@
     system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd)
     # add traffic generators to the system
     system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in
-                   xrange(options.num_tgen)]
+                   range(options.num_tgen)]
     # Config memory system with given HMC arch
     MemConfig.config_mem(options, system)
     # Connect the traffic generatiors
     if options.arch == "distributed":
-        for i in xrange(options.num_tgen):
+        for i in range(options.num_tgen):
             system.tgen[i].port = system.membus.slave
         # connect the system port even if it is not used in this example
         system.system_port = system.membus.slave
     if options.arch == "mixed":
-        for i in xrange(int(options.num_tgen/2)):
+        for i in range(int(options.num_tgen/2)):
             system.tgen[i].port = system.membus.slave
         hh = system.hmc_host
         if options.enable_global_monitor:
@@ -82,7 +82,7 @@
         system.system_port = system.membus.slave
     if options.arch == "same":
         hh = system.hmc_host
-        for i in xrange(options.num_links_controllers):
+        for i in range(options.num_links_controllers):
             if options.enable_global_monitor:
                 system.tgen[i].port = hh.lmonitor[i].slave
             else:
diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py
index c2eed19..1dae86f 100644
--- a/configs/example/memcheck.py
+++ b/configs/example/memcheck.py
@@ -246,9 +246,9 @@
      # The levels are indexing backwards through the list
      ntesters = testerspec[len(cachespec) - level]
 
-     testers = [proto_tester() for i in xrange(ntesters)]
+     testers = [proto_tester() for i in range(ntesters)]
      checkers = [MemCheckerMonitor(memchecker = system.memchecker) \
-                      for i in xrange(ntesters)]
+                      for i in range(ntesters)]
      if ntesters:
           subsys.tester = testers
           subsys.checkers = checkers
@@ -264,8 +264,8 @@
           # Create and connect the caches, both the ones fanning out
           # to create the tree, and the ones used to connect testers
           # on this level
-          tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
-          tester_caches = [proto_l1() for i in xrange(ntesters)]
+          tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+          tester_caches = [proto_l1() for i in range(ntesters)]
 
           subsys.cache = tester_caches + tree_caches
           for cache in tree_caches:
diff --git a/configs/example/memtest.py b/configs/example/memtest.py
index d293164..81c826a 100644
--- a/configs/example/memtest.py
+++ b/configs/example/memtest.py
@@ -257,7 +257,7 @@
      limit = (len(cachespec) - level + 1) * 100000000
      testers = [proto_tester(interval = 10 * (level * level + 1),
                              progress_check = limit) \
-                     for i in xrange(ntesters)]
+                     for i in range(ntesters)]
      if ntesters:
           subsys.tester = testers
 
@@ -272,8 +272,8 @@
           # Create and connect the caches, both the ones fanning out
           # to create the tree, and the ones used to connect testers
           # on this level
-          tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
-          tester_caches = [proto_l1() for i in xrange(ntesters)]
+          tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+          tester_caches = [proto_l1() for i in range(ntesters)]
 
           subsys.cache = tester_caches + tree_caches
           for cache in tree_caches:
diff --git a/configs/example/read_config.py b/configs/example/read_config.py
index 3c17d4b..0d60ec4 100644
--- a/configs/example/read_config.py
+++ b/configs/example/read_config.py
@@ -280,7 +280,7 @@
             # Assume that unnamed ports are unconnected
             peers = self.config.get_port_peers(object_name, port_name)
 
-            for index, peer in zip(xrange(0, len(peers)), peers):
+            for index, peer in zip(range(0, len(peers)), peers):
                 parsed_ports.append((
                     PortConnection(object_name, port.name, index),
                     PortConnection.from_string(peer)))
diff --git a/configs/example/ruby_gpu_random_test.py b/configs/example/ruby_gpu_random_test.py
index 162d3ff..1757177 100644
--- a/configs/example/ruby_gpu_random_test.py
+++ b/configs/example/ruby_gpu_random_test.py
@@ -76,7 +76,9 @@
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -97,7 +99,7 @@
 assert(options.num_compute_units >= 1)
 n_cu = options.num_compute_units
 
-options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
+options.num_sqc = int((n_cu + options.cu_per_sqc - 1) // options.cu_per_sqc)
 
 if args:
      print("Error: script doesn't take any positional arguments")
diff --git a/configs/example/ruby_mem_test.py b/configs/example/ruby_mem_test.py
index 68ad1ca..880a150 100644
--- a/configs/example/ruby_mem_test.py
+++ b/configs/example/ruby_mem_test.py
@@ -65,7 +65,9 @@
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -101,7 +103,7 @@
                  percent_uncacheable = 0,
                  progress_interval = options.progress,
                  suppress_func_warnings = options.suppress_func_warnings) \
-         for i in xrange(options.num_cpus) ]
+         for i in range(options.num_cpus) ]
 
 system = System(cpu = cpus,
                 clk_domain = SrcClockDomain(clock = options.sys_clock),
@@ -114,7 +116,7 @@
                      progress_interval = options.progress,
                      suppress_func_warnings =
                                         not options.suppress_func_warnings) \
-             for i in xrange(options.num_dmas) ]
+             for i in range(options.num_dmas) ]
     system.dma_devices = dmas
 else:
     dmas = []
diff --git a/configs/example/ruby_random_test.py b/configs/example/ruby_random_test.py
index d6b53cf..15d474c 100644
--- a/configs/example/ruby_random_test.py
+++ b/configs/example/ruby_random_test.py
@@ -59,7 +59,9 @@
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
diff --git a/configs/example/se.py b/configs/example/se.py
index fa9e897..59af888 100644
--- a/configs/example/se.py
+++ b/configs/example/se.py
@@ -171,7 +171,7 @@
     fatal("You cannot use SMT with multiple CPUs!")
 
 np = options.num_cpus
-system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
+system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
                 mem_mode = test_mem_mode,
                 mem_ranges = [AddrRange(options.mem_size)],
                 cache_line_size = options.cacheline_size)
@@ -220,7 +220,7 @@
     if np > 1:
         fatal("SimPoint generation not supported with more than one CPUs")
 
-for i in xrange(np):
+for i in range(np):
     if options.smt:
         system.cpu[i].workload = multiprocesses
     elif len(multiprocesses) == 1:
@@ -246,7 +246,7 @@
 
     system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
                                         voltage_domain = system.voltage_domain)
-    for i in xrange(np):
+    for i in range(np):
         ruby_port = system.ruby._cpu_ports[i]
 
         # Create the interrupt controller and connect its ports to Ruby
diff --git a/configs/ruby/AMD_Base_Constructor.py b/configs/ruby/AMD_Base_Constructor.py
index 96f6575..a8a0e0d 100644
--- a/configs/ruby/AMD_Base_Constructor.py
+++ b/configs/ruby/AMD_Base_Constructor.py
@@ -115,7 +115,7 @@
     cpu_sequencers = []
     cpuCluster = None
     cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
diff --git a/configs/ruby/GPU_RfO.py b/configs/ruby/GPU_RfO.py
index fea5e5a..afe9614 100644
--- a/configs/ruby/GPU_RfO.py
+++ b/configs/ruby/GPU_RfO.py
@@ -470,7 +470,7 @@
         block_size_bits = int(math.log(options.cacheline_size, 2))
         numa_bit = block_size_bits + dir_bits - 1
 
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_ranges = []
         for r in system.mem_ranges:
             addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -511,7 +511,7 @@
 
     # For an odd number of CPUs, still create the right number of controllers
     cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -545,7 +545,7 @@
 
     gpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
 
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = 2560) # max outstanding requests
@@ -578,7 +578,7 @@
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -610,7 +610,7 @@
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_cp):
+    for i in range(options.num_cp):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = 2560) # max outstanding requests
@@ -673,7 +673,7 @@
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = options.num_compute_units * 2560)
diff --git a/configs/ruby/GPU_VIPER.py b/configs/ruby/GPU_VIPER.py
index 8d12230..94dcbef 100644
--- a/configs/ruby/GPU_VIPER.py
+++ b/configs/ruby/GPU_VIPER.py
@@ -429,7 +429,7 @@
         mainCluster = Cluster(intBW=crossbar_bw)
     else:
         mainCluster = Cluster(intBW=8) # 16 GB/s
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
 
         dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
         dir_cntrl.create(options, ruby_system, system)
@@ -467,7 +467,7 @@
         cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
     else:
         cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -504,7 +504,7 @@
       gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
     else:
       gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -543,7 +543,7 @@
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -569,7 +569,7 @@
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_cp):
+    for i in range(options.num_cp):
 
         tcp_ID = options.num_compute_units + i
         sqc_ID = options.num_sqc + i
@@ -623,7 +623,7 @@
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
         tcc_cntrl.create(options, ruby_system, system)
diff --git a/configs/ruby/GPU_VIPER_Baseline.py b/configs/ruby/GPU_VIPER_Baseline.py
index 960cbbd..5c713ce 100644
--- a/configs/ruby/GPU_VIPER_Baseline.py
+++ b/configs/ruby/GPU_VIPER_Baseline.py
@@ -407,7 +407,7 @@
     # Clusters
     crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
     mainCluster = Cluster(intBW = crossbar_bw)
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
 
         dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
         dir_cntrl.create(options, ruby_system, system)
@@ -440,7 +440,7 @@
         mainCluster.add(dir_cntrl)
 
     cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -473,7 +473,7 @@
         cpuCluster.add(cp_cntrl)
 
     gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -510,7 +510,7 @@
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -539,7 +539,7 @@
     # Because of wire buffers, num_tccs must equal num_tccdirs
     numa_bit = 6
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl()
         tcc_cntrl.create(options, ruby_system, system)
diff --git a/configs/ruby/GPU_VIPER_Region.py b/configs/ruby/GPU_VIPER_Region.py
index 90e8b77..8b317fb 100644
--- a/configs/ruby/GPU_VIPER_Region.py
+++ b/configs/ruby/GPU_VIPER_Region.py
@@ -469,7 +469,7 @@
     # For an odd number of CPUs, still create the right number of controllers
     crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
     cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -535,7 +535,7 @@
         cpuCluster.add(rb_cntrl)
 
     gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -571,7 +571,7 @@
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -599,7 +599,7 @@
 
     numa_bit = 6
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl()
         tcc_cntrl.create(options, ruby_system, system)
diff --git a/configs/ruby/Garnet_standalone.py b/configs/ruby/Garnet_standalone.py
index a70780b..c38bdba 100644
--- a/configs/ruby/Garnet_standalone.py
+++ b/configs/ruby/Garnet_standalone.py
@@ -66,7 +66,7 @@
     # controller constructors are called before the network constructor
     #
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         # Only one cache exists for this protocol, so by default use the L1D
diff --git a/configs/ruby/MESI_Three_Level.py b/configs/ruby/MESI_Three_Level.py
index f38b7cf..95ac342 100644
--- a/configs/ruby/MESI_Three_Level.py
+++ b/configs/ruby/MESI_Three_Level.py
@@ -83,8 +83,8 @@
     # Must create the individual controllers before the network to ensure the
     # controller constructors are called before the network constructor
     #
-    for i in xrange(options.num_clusters):
-        for j in xrange(num_cpus_per_cluster):
+    for i in range(options.num_clusters):
+        for j in range(num_cpus_per_cluster):
             #
             # First create the Ruby objects associated with this cpu
             #
@@ -164,7 +164,7 @@
             l1_cntrl.responseFromL2.slave = ruby_system.network.master
 
 
-        for j in xrange(num_l2caches_per_cluster):
+        for j in range(num_l2caches_per_cluster):
             l2_cache = L2Cache(size = options.l2_size,
                                assoc = options.l2_assoc,
                                start_index_bit = l2_index_start)
diff --git a/configs/ruby/MESI_Two_Level.py b/configs/ruby/MESI_Two_Level.py
index 52976e6..27ef9c8 100644
--- a/configs/ruby/MESI_Two_Level.py
+++ b/configs/ruby/MESI_Two_Level.py
@@ -67,7 +67,7 @@
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -135,7 +135,7 @@
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
diff --git a/configs/ruby/MI_example.py b/configs/ruby/MI_example.py
index 222d084..e3395bd 100644
--- a/configs/ruby/MI_example.py
+++ b/configs/ruby/MI_example.py
@@ -64,7 +64,7 @@
     #
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         # Only one cache exists for this protocol, so by default use the L1D
diff --git a/configs/ruby/MOESI_AMD_Base.py b/configs/ruby/MOESI_AMD_Base.py
index ad16543..aeec378 100644
--- a/configs/ruby/MOESI_AMD_Base.py
+++ b/configs/ruby/MOESI_AMD_Base.py
@@ -248,7 +248,7 @@
         block_size_bits = int(math.log(options.cacheline_size, 2))
         numa_bit = block_size_bits + dir_bits - 1
 
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_ranges = []
         for r in system.mem_ranges:
             addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -294,7 +294,7 @@
 
     # For an odd number of CPUs, still create the right number of controllers
     cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
diff --git a/configs/ruby/MOESI_CMP_directory.py b/configs/ruby/MOESI_CMP_directory.py
index 3fef48b..40cb7ce 100644
--- a/configs/ruby/MOESI_CMP_directory.py
+++ b/configs/ruby/MOESI_CMP_directory.py
@@ -67,7 +67,7 @@
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -126,7 +126,7 @@
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
diff --git a/configs/ruby/MOESI_CMP_token.py b/configs/ruby/MOESI_CMP_token.py
index 94a518b..817d6f9 100644
--- a/configs/ruby/MOESI_CMP_token.py
+++ b/configs/ruby/MOESI_CMP_token.py
@@ -80,7 +80,7 @@
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -149,7 +149,7 @@
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
diff --git a/configs/ruby/MOESI_hammer.py b/configs/ruby/MOESI_hammer.py
index 7c31ca2..7630886 100644
--- a/configs/ruby/MOESI_hammer.py
+++ b/configs/ruby/MOESI_hammer.py
@@ -74,7 +74,7 @@
     #
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
diff --git a/configs/ruby/Ruby.py b/configs/ruby/Ruby.py
index 2ddf608..03e836e 100644
--- a/configs/ruby/Ruby.py
+++ b/configs/ruby/Ruby.py
@@ -214,7 +214,7 @@
 
 def create_directories(options, bootmem, ruby_system, system):
     dir_cntrl_nodes = []
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_cntrl = Directory_Controller()
         dir_cntrl.version = i
         dir_cntrl.directory = RubyDirectoryMemory()
diff --git a/configs/splash2/cluster.py b/configs/splash2/cluster.py
index f819bd1..753fb0f 100644
--- a/configs/splash2/cluster.py
+++ b/configs/splash2/cluster.py
@@ -167,41 +167,41 @@
 all_l1s = []
 all_l1buses = []
 if options.timing:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
                                         clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
 elif options.detailed:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
                                    clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
 else:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
                                         clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
diff --git a/configs/splash2/run.py b/configs/splash2/run.py
index b17eb54..f97616a 100644
--- a/configs/splash2/run.py
+++ b/configs/splash2/run.py
@@ -182,15 +182,15 @@
 if options.timing:
     cpus = [TimingSimpleCPU(cpu_id = i,
                             clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 elif options.detailed:
     cpus = [DerivO3CPU(cpu_id = i,
                        clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 else:
     cpus = [AtomicSimpleCPU(cpu_id = i,
                             clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 
 # ----------------------
 # Create a system, and add system wide objects
diff --git a/configs/topologies/MeshDirCorners_XY.py b/configs/topologies/MeshDirCorners_XY.py
index 46f3c6f..2381624 100644
--- a/configs/topologies/MeshDirCorners_XY.py
+++ b/configs/topologies/MeshDirCorners_XY.py
@@ -126,8 +126,8 @@
         int_links = []
 
         # East output to West input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -156,8 +156,8 @@
                     link_count += 1
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -171,8 +171,8 @@
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
diff --git a/configs/topologies/Mesh_XY.py b/configs/topologies/Mesh_XY.py
index 652ac16..200d346 100644
--- a/configs/topologies/Mesh_XY.py
+++ b/configs/topologies/Mesh_XY.py
@@ -78,7 +78,7 @@
         # distributed across the network.
         network_nodes = []
         remainder_nodes = []
-        for node_index in xrange(len(nodes)):
+        for node_index in range(len(nodes)):
             if node_index < (len(nodes) - remainder):
                 network_nodes.append(nodes[node_index])
             else:
@@ -110,8 +110,8 @@
         int_links = []
 
         # East output to West input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -125,8 +125,8 @@
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -140,8 +140,8 @@
                     link_count += 1
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -155,8 +155,8 @@
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
diff --git a/configs/topologies/Mesh_westfirst.py b/configs/topologies/Mesh_westfirst.py
index 6139f67..f327820 100644
--- a/configs/topologies/Mesh_westfirst.py
+++ b/configs/topologies/Mesh_westfirst.py
@@ -82,7 +82,7 @@
         # distributed across the network.
         network_nodes = []
         remainder_nodes = []
-        for node_index in xrange(len(nodes)):
+        for node_index in range(len(nodes)):
             if node_index < (len(nodes) - remainder):
                 network_nodes.append(nodes[node_index])
             else:
@@ -114,8 +114,8 @@
         int_links = []
 
         # East output to West input links (weight = 2)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -127,8 +127,8 @@
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@
 
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -154,8 +154,8 @@
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
diff --git a/configs/topologies/Pt2Pt.py b/configs/topologies/Pt2Pt.py
index 6cbf5ad..81d61d7 100644
--- a/configs/topologies/Pt2Pt.py
+++ b/configs/topologies/Pt2Pt.py
@@ -63,8 +63,8 @@
 
         link_count = len(nodes)
         int_links = []
-        for i in xrange(len(nodes)):
-            for j in xrange(len(nodes)):
+        for i in range(len(nodes)):
+            for j in range(len(nodes)):
                 if (i != j):
                     link_count += 1
                     int_links.append(IntLink(link_id=link_count,