C++QED  v2 Milestone 10
a framework for simulating open quantum dynamics
testdriver.py
1 # Copyright Raimar Sandner 2012-2014. Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.txt)
2 
3 ## @package testdriver
4 # This is the Python testdriver for the \ref testsuite.
5 #
6 # It is intended to be used with the CMake CTest utility.
7 # When called with the parameter `--testclass=<TESTCLASS>`, it calls the `run`
8 # method of the specified runner class. Success of a test is indicated by the
9 # return value 0.
10 
11 import logging
12 from optparse import OptionParser
13 import ConfigParser
14 import sys
15 import os
16 import errno
17 import subprocess
18 import numpy as np
19 import shutil
20 import ast
21 import scipy.interpolate
22 from scipy.integrate import quadrature
23 from scipy import exp
24 
25 try:
26  import matplotlib
27  matplotlib.use('Agg')
28  import matplotlib.pyplot as plt
29  from matplotlib.backends.backend_pdf import PdfPages
30  from matplotlib.font_manager import FontProperties
31 
32  plot=True
33 except ImportError:
34  plot=False
35 
36 logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
37 
38 ## @name Helper functions
39 # @{
40 
41 ## Create a directory with parent directories.
42 # @param path The path to create.
43 #
44 # From this [stackoverflow question](http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python)
45 def mkdir_p(path):
46  try:
47  os.makedirs(path)
48  except OSError, exc:
49  if exc.errno == errno.EEXIST:
50  pass
51  else: raise
52 
53 ## Remove a file without error if it doesn't exist.
54 # @param filename The file to delete.
55 #
56 # From this [stackoverflow question](http://stackoverflow.com/a/10840586)
57 def rm_f(filename):
58  try:
59  os.remove(filename)
60  except OSError as e: # this would be "except OSError, e:" before Python 2.6
61  if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
62  raise # re-raise exception if a diff
63 
64 ## Loads a trajectory file.
65 # \param fname File name to load from.
66 # \return array Numpy array.
67 def load_sv(fname, format=None):
68  if format is None: return np.genfromtxt(fname)
69 
70  floatingReString=r'([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)'
71  complexReString =r'\(\s*'+floatingReString+'\s*,\s*'+floatingReString+'\s*\)'
72 
73  return np.fromregex(fname,format.replace(r'+',r'\s*').replace('f',floatingReString).replace('c',complexReString),np.float)
74 
75 ## @}
76 
77 def PTLA_postprocess(input):
78  result=np.zeros((input.shape[0],6))
79  result[:,[0,1]]=input[:,[0,1]]
80  result[:,2]=(1+input[:,2])/2
81  result[:,3]=(1-input[:,2])/2
82  result[:,4]=input[:,3]/2
83  result[:,5]=input[:,4]/2
84  return result
85 
86 def PLM_Evolved_postprocess(input):
87  result=np.zeros((input.shape[0],5))
88  result[:,[0,1]]=input[:,[0,1]]
89  result[:,2]=input[:,2]**2+input[:,3]**2
90  result[:,3]=input[:,2]
91  result[:,4]=input[:,3]
92  return result
93 
94 ## @defgroup TestclassHelpers Helpers
95 # @ingroup Testclasses
96 # \brief Helper base classes to test classes.
97 # These classes cannot be used as a test class directly, but serve as base to other test classes
98 # and define some \ref TestclassKeys "configuration file keys" and \ref TestclassOptions "command line options".
99 
100 class OptionsManager(object):
101  """!
102  @ingroup TestclassHelpers
103  \brief Stores command line options and configuration file keys.
104 
105  Each OptionsManager instance has its own section in the configuration file, named after
106  the current test name (OptionsManager::test). If the current section has the key
107  `import=othersection`, import all keys from `othersection` if they are not present already
108  (works recursively). Values which end in `_local` are never imported.
109 
110  \ref OptionsManager_options "Command line" options this class understands.
111  """
112 
113  ## @addtogroup TestclassOptions
114  #
115  # @anchor OptionsManager_options
116  # ## OptionsManager command line options
117  # * `--test=<testname>`: The name of the test. This defines the section in the configuration file
118  # and also ends up in output files etc.
119 
120  def __init__(self, options, cp):
121  """!
122  @param options optparse.Values: object holding all the command line options.
123  @param cp ConfigParser: ConfigParser instance holding all configuration file keys.
124  """
125 
126  ## optparse.Values: command line options
127  self.options = options
128  ## ConfigParser: configuration file keys
129  self.cp = cp
130  ## The name of the current test
131  self.test = options.test
132  if not self.test: sys.exit('--test missing')
133 
134  def _import_section(self,section=None):
135  if section is None: section = self.test
136  if self.cp.has_option(section,'import'):
137  import_section=self.cp.get(section,'import')
138  self._import_section(section=import_section) # import recursively
139  for item in self.cp.items(import_section):
140  if not self.cp.has_option(section,item[0]) and not item[0].endswith('_local'):
141  self.cp.set(section, *item)
142  self.cp.remove_option(section,'import')
143 
144  def get_option(self, name, default=None, required=False, section=None):
145  """!
146  Get configuration file keys in a safe way.
147  \param name Name of the key.
148  \param default Default value to return if key does not exist.
149  \param required Fail if True and key does not exist.
150  \param section The section name to look in, defaults to OptionsManager::test if None.
151  \return The value to the key.
152 
153  This methods looks up the key `name` in the section name OptionsManager::test.
154  """
155  if section is None: section=self.test
156  self._import_section(section=section)
157  if self.cp.has_option(section,name):
158  return self.cp.get(section,name)
159  else:
160  if not required: return default
161  else: sys.exit("Error: required option \"{0}\" not found in section {1}.".format(name,section))
162 
164  """!
165  @ingroup TestclassHelpers
166  \brief Manages output files for different run modes.
167 
168  \ref OutputManager_keys "Configuration file keys" this class understands.
169  """
170 
171  ## @addtogroup SetupKeys
172  #
173  # * `outuptdir`: All output files end up here.
174  # * `expecteddir`: Where to look for pre-run simulations to compare test runs to.
175 
176 
177  ## @addtogroup TestclassKeys
178  #
179  # @anchor OutputManager_keys
180  # ## OutputManager configuration file keys
181  # * `runmodes`: comma separated list of runmodes (single, master ensemble)
182 
183 
184  def __init__(self, *args, **kwargs):
185  """!
186  Arguments are passed through to OptionsManager.
187  """
188  OptionsManager.__init__(self, *args, **kwargs)
189  ## All output files end up here.
190  self.outputdir = self.cp.get('Setup','outputdir')
191  ## Where to look for pre-run simulations to compare test runs to.
192  self.expecteddir = self.cp.get('Setup','expecteddir')
193  mkdir_p(self.outputdir)
194 
195  def runmodes(self,section=None):
196  """!
197  Return runmodes.
198  \param section (optional) String: Where to look up the runmodes, take current test section if not specified.
199  \return A list of runmodes in this section.
200  """
201  if section is None: section=self.test
202  return self.get_option('runmodes', section=section, default='generic').split(',')
203 
204  def _filter_runmodes(self, section):
205  filter_runmodes=self.get_option('runmodes_'+self.test+'_local',section=section)
206  if not filter_runmodes is None: filter_runmodes=filter_runmodes.split(',')
207  for mode in self.runmodes(section=section):
208  if not filter_runmodes is None and not mode in filter_runmodes: continue
209  yield(mode)
210 
211  def output(self, runmode, section=None, statefile=False):
212  """!
213  The name of the output file for a given runmode.
214  \param runmode String: The runmode for which the filename should be generated.
215  \param section (optional) String: Output file name for which section, current test section if left empty.
216  \param statefile (optional) Boolean: By default generate the file name for a trajectory file. If set to true
217  generate the file name for a state file.
218  \return Full path including OutputManager::outputdir.
219  """
220  if section is None: section=self.test
221  if runmode == "generic":
222  output = os.path.join(self.outputdir, section)
223  else:
224  output = os.path.join(self.outputdir, section+'_'+runmode)
225  if statefile: output+=".state"
226  return output
227 
228  def clean(self, runmode):
229  """!
230  Delete the trajectory file and state file for a given runmode.
231  \param runmode String: The runmode for which output files should be deleted.
232  """
233  rm_f(self.output(runmode))
234  rm_f(self.output(runmode,statefile=True))
235 
236 
237 # The test classes
238 
240  """!
241  @ingroup Testclasses
242  Runs a script repeatedly for all declared runmodes and succeeds if the scripts do.
243 
244  \ref Runner_keys "Configuration file keys" this class understands.
245  """
246  def run(self, clean=True, extra_opts=None, interpreter=None, *args, **kwargs):
247  """!
248  The method to run the test.
249  \param clean (optional) `Boolean`: Whether to remove old output before running the test.
250  \param extra_opts (optional) `List`: Additional command line options appended to the script call.
251  \param interpreter (optional) `str`: Interpreter to run the command through, e.g. `python`.
252  \param args passed through to `subprocess.call`
253  \param kwargs passed through to `subprocess.call`
254 
255  This method terminates the test driver with a return value equal to that of the script call
256  if one of the scripts fail.
257  """
258  for runmode in self.runmodes():
259  if clean: self.clean(runmode)
260  command = self._build_commandline(runmode,extra_opts,interpreter)
261  logging.debug(subprocess.list2cmdline(command))
262  ret = subprocess.call(command, *args, **kwargs)
263  if not ret==0: sys.exit(ret)
264 
265  ## @addtogroup TestclassKeys
266  #
267  # @anchor Runner_keys
268  # ## Runner configuration file keys
269  # * `opts*`: The command line options used for running the script, multiple keys matching `opts*` can be given
270  # * `single*`, `master*`, `ensemble*`: Additional options for the specific runmodes. Multiple keys
271  # matching `<runmode>*` can be given.
272  #
273  # Example usage:
274  #
275  # # The options used for running the scripts, multiple keys can be given if they match opts*
276  # opts=--etat 8 --sdf 3
277  # opts1=--dc 0 --Dt 0.1 --NDt 10
278  #
279  # # runmode specific options
280  # single=...
281  # single1=...
282  # ensemble=...
283  # master=...
284 
285  def _extend_opts(self, options, section, option_prefix):
286  for option in sorted([ item[0] for item in self.cp.items(section) if item[0].startswith(option_prefix)]):
287  options.extend(self.cp.get(section,option).split())
288 
289  def _build_commandline(self, runmode, extra_opts=None, interpreter=None):
290  result = [interpreter] if not interpreter is None else []
291  result.append(self.options.script)
292  if extra_opts: result+=extra_opts
293 
294  ## @addtogroup SetupKeys
295  #
296  # * `opts`: Script command line options added to all scripts
297 
298  self._extend_opts(result, 'Setup','opts')
299  self._extend_opts(result, self.test,'opts')
300  self._extend_opts(result, self.test,runmode)
301 
302  if not runmode=="generic": result.extend(('--evol',runmode))
303  result.extend(('--o',self.output(runmode)))
304  return result
305 
307  """!
308  @ingroup Testclasses
309  Runs a cpypyqed script repeatedly for all declared runmodes and succeeds if the scripts do.
310 
311  \ref PythonRunner_options "Configuration file keys" this class understands.
312  """
313 
314  ## @addtogroup TestclassOptions
315  #
316  # @anchor PythonRunner_options
317  # ## PythonRunner command line options
318  # * `--cpypyqed_builddir=<dir>`: Directory for on-demand compilation
319  # * `--cpypyqed_config=<config-file>`: Configuration file for on-demand compilation
320 
321  def run(self, clean=True, extra_opts=None, *args, **kwargs):
322  """!
323  The method to run the test.
324  \param clean (optional) `Boolean`: Whether to remove old output before running the test.
325  \param extra_opts (optional) `List`: Additional command line options appended to the script call.
326  \param args passed through to Runner.run()
327  \param kwargs passed through to Runner.run()
328 
329  This method terminates the test driver with a return value equal to that of the script call
330  if one of the scripts fail.
331  """
332  cpypyqed_builddir = self.options.cpypyqed_builddir
333  cpypyqed_config = self.options.cpypyqed_config
334  env = os.environ.copy()
335  if cpypyqed_builddir:
336  env['CPYPYQED_BUILDDIR']=cpypyqed_builddir
337  if clean: shutil.rmtree(os.path.join(cpypyqed_builddir,'cppqedmodules'),ignore_errors=True)
338  if cpypyqed_config: env['CPYPYQED_CONFIG']=cpypyqed_config
339  env['PYTHONPATH']=self.cp.get('Setup','modulepath')
340  if extra_opts is None: extra_opts = []
341  if self.options.configuration.lower()=="debug": extra_opts += ['--debug']
342  Runner.run(self,clean=clean,extra_opts=extra_opts,interpreter=sys.executable,env=env,*args,**kwargs)
343 
345  """!
346  @ingroup Testclasses
347  Verifies the output of a script 'this' to an expected output or the output of some other test run 'other'
348 
349  \ref Verifier_keys "Configuration file keys" this class understands.
350  """
351 
352  ## @addtogroup TestclassKeys
353  #
354  # @anchor Verifier_keys
355  # ## Verifier configuration file keys
356  # The Verifier compares some test 'this' to another test 'other'.
357  # * `this`: Test name of 'this', by default the current test if missing
358  # * `other`: Testname of 'other', by default the results from the directory of expected results
359  # (OutputManager::expecteddir)
360  # * `verify`: Verify that both trajectories are exactly equal (default if this key is missing or
361  # `verify=full`), or verify that the last outcome of the simulation is equal, e.g. timesteps may differ
362  # (`verify=outcome`)
363  #
364  # If `this=some_test` is specified, it is probably also a good idea to `import=some_test` to keep
365  # the runmodes in sync. Currently the directory of expected results is `Testing/expected`, it is kept
366  # under version control so that changes in the output of the scripts are noticed.
367 
368 
369  def __init__(self,*args,**kwargs):
370  """!
371  \param args passed through to OutputManager
372  \param kwargs passed through to OutputManager
373  """
374  OutputManager.__init__(self,*args,**kwargs)
375  self.thisSection = self.get_option('this',default=self.test)
376  self.otherSection = self.get_option('other')
377 
378  def run(self):
379  """!
380  Run the test.
381  """
382  mode=self.get_option('verify')
383  if mode is None or mode=='full':
384  self._verify_full()
385  elif mode=='outcome':
386  self._verify_outcome()
387  def _verify_full(self):
388  for runmode in self.runmodes(section=self.thisSection):
389  self._verify_ev(self._thisOutput(runmode),self._otherOutput(runmode))
390  self._verify_state(self._thisOutput(runmode,statefile=True),self._otherOutput(runmode,statefile=True))
391  def _thisOutput(self,runmode,statefile=False):
392  return self.output(runmode,section=self.thisSection,statefile=statefile)
393  def _otherOutput(self,runmode,statefile=False):
394  if self.otherSection is None:
395  return os.path.join(self.expecteddir,os.path.basename(self._thisOutput(runmode,statefile)))
396  else:
397  return self.output(runmode,section=self.otherSection,statefile=statefile)
398  def _differ(self,this,other):
399  sys.exit("Error: {0} and {1} differ.".format(this,other))
400  def _equiv(self,this,other):
401  logging.debug("{0} and {1} are equivalent.".format(this,other))
402  def _verify_ev(self,this,other):
403  if not np.allclose(load_sv(this),load_sv(other)): self._differ(this,other)
404  else: self._equiv(this,other)
405  def _verify_state(self,this,other):
406  _,r_state,r_time = io.read(this)
407  _,e_state,e_time = io.read(other)
408  if not (np.allclose(r_state,e_state) and np.allclose(r_time,e_time)): self._differ(this,other)
409  else: self._equiv(this,other)
410  def _verify_outcome(self,this,other):
411  _,r_state,r_time=io.read(this)
412  _,e_state,e_time=io.read(other)
413  if not (np.allclose(r_state[-1],e_state[-1]) and np.allclose(r_time[-1],e_time[-1])):
414  self._differ(this,other)
415  else: self._equiv(this,other)
416 
418  """!
419  @ingroup Testclasses
420  Combines the functionality of Runner and Verifier to a single test.
421  """
422 
423  def run(self):
424  """!
425  Run the test.
426  """
427  Runner.run(self)
428  Verifier.run(self)
429 
431  """!
432  @ingroup TestclassHelpers
433  This class hosts continued_run(), which will run and then continue a script.
434  """
435 
436  ## @addtogroup TestclassKeys
437  #
438  # @anchor GenericContinuer_keys
439  # ## GenericContinuer configuration file keys
440  # * `firstrun`: script options for the first run
441  # * `secondrun`: script options for the second run
442 
443  def continued_run(self, runfn, *args, **kwargs):
444  """!
445  Run, then continue a script.
446  \param runfn Function: The run function to call.
447  \param args passed through to `runfn`
448  \param kwargs passed through to `runfn`
449  """
450  runfn(self, extra_opts=self.get_option('firstrun',default='').split(), *args, **kwargs)
451  runfn(self, clean=False, extra_opts=self.get_option('secondrun',default='').split(), *args, **kwargs)
452 
454  """!
455  @ingroup Testclasses
456  GenericContinuer version of Runner.
457 
458  \ref GEnericContinuer_keys "Configuration file keys" this class understands.
459  """
460 
461  ## @addtogroup TestclassKeys
462  #
463  # @anchor Continuer
464  # ## Continuer configuration file keys
465  # See \ref GenericContinuer_keys "GenericContinuer keys".
466 
467  def run(self, *args, **kwargs):
468  """!
469  Delegates to GenericContinuer::continued_run().
470  """
471  GenericContinuer.continued_run(self, Runner.run, *args, **kwargs)
472 
474  """!
475  @ingroup Testclasses
476  GenericContinuer version of PythonRunner.
477 
478  \ref GEnericContinuer_keys "Configuration file keys" this class understands.
479  """
480 
481  ## @addtogroup TestclassKeys
482  #
483  # @anchor PythonContinuer
484  # ## PythonContinuer configuration file keys
485  # See \ref GenericContinuer_keys "GenericContinuer keys".
486 
487  def run(self, *args, **kwargs):
488  """!
489  Delegates to GenericContiuer::continued_run().
490  """
491  GenericContinuer.continued_run(self, PythonRunner.run, *args, **kwargs)
492 
494  """!
495  @ingroup Testclasses
496  \brief This test tries to compile a %CMake target.
497 
498  If the `--error` option is not given,
499  the test succeeds if the target can be compiled, otherwise the test succeeds if the
500  compilation fails and the string specified together with `--error` is found.
501 
502  \ref CompileTarget_options "Command line options" this class understands.
503  """
504 
505  ## @addtogroup SetupKeys
506  #
507  # * `cmake`: Path of the cmake executable
508  # * `builddir`: Top-level build directory
509  # * `
510 
511  ## @addtogroup TestclassOptions
512  #
513  # @anchor CompileTarget_options
514  # ## CompileTarget command line options
515  # * `--script`: The name of the target to compile.
516 
517  ## @addtogroup TestclassKeys
518  #
519  # @anchor CompileTarget_keys
520  # ## CompileTarget configuration file keys
521  # * `error`: Turn on "failure mode". The error message which is expected in the output.
522  # * `dependencies`: Space separated list of dependencies to compile first. These are
523  # always required to succeed, independent of the presence of `error`.
524 
525  def run(self):
526  """!
527  Runs the test.
528  """
529  error=self.get_option('error')
530  cmake=self.cp.get('Setup','cmake')
531  builddir=self.cp.get('Setup','builddir')
532  command=[cmake,'--build',builddir,'--target']
533  dependencies=self.get_option('dependencies',default="").split()
534  for dep in dependencies:
535  logging.debug(subprocess.list2cmdline(command+[dep]))
536  p = subprocess.Popen(command+[dep], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
537  (std,err) = p.communicate()
538  if not p.returncode==0:
539  sys.exit("Compilation of dependency {0} for {1} failed.".format(dep,self.options.script))
540  logging.debug(subprocess.list2cmdline(command+[self.options.script]))
541  p = subprocess.Popen(command+[self.options.script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
542  (std,err) = p.communicate()
543  returncode = p.returncode
544  if error is None:
545  if returncode != 0:
546  sys.exit("Compilation of {0} failed.".format(self.options.script))
547  else:
548  if returncode == 0:
549  sys.exit("Compilation was successful, but failure was expected.")
550  if (not error in std) and (not error in err):
551  logging.debug(std)
552  logging.debug(err)
553  sys.exit("Compilation failed as expected, but \"{0}\" was not found in the error message.".format(error))
554 
556  """!
557  \brief This is a helper class which helps with plotting functions to a pdf file.
558 
559  If the global variable `plot` is False, all functions are a no-op.
560  """
561 
562  def _plot(self):
563  return plot and not self.get_option('pdf') is None
564  def start_pdf(self):
565  """!
566  \brief Initialize a new pdf file.
567 
568  The file is read from the configuration key `pdf`.
569  """
570  if not self._plot(): return
571  self.pdf = PdfPages(os.path.join(self.outputdir,self.get_option('pdf')))
572  def close_pdf(self):
573  """!
574  \brief Saves the pdf file to disc after all plots are finished.
575  """
576  if not self._plot(): return
577  for n in plt.get_fignums():
578  plt.figure(num=n)
579  self._place_legend()
580  self.finish_plot()
581  self.pdf.close()
582  def finish_plot(self):
583  """!
584  \brief Adds the current plot to the pdf file.
585  """
586  if not self._plot(): return
587  self.pdf.savefig()
588  plt.close()
589  def figureLegendRight(self,ylabel,title,n):
590  """!
591  \brief Creates a new plot with figure legend right of the plot.
592  \param ylabel The label of the y axis.
593  \param title The title of the plot
594  \param n The value number.
595  """
596  if not self._plot(): return
597  if n in plt.get_fignums():
598  plt.figure(num=n)
599  return
600  f = plt.figure(num=n,figsize=(11.6,8.2))
601  f.add_axes([0.09, 0.1, 0.6, 0.75])
602  plt.title(title)
603  plt.ylabel(ylabel)
604  plt.xlabel('t')
605  def _place_legend(self):
606  if not self._plot(): return
607  fontP = FontProperties()
608  fontP.set_size('small')
609  leg=plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop = fontP)
610  llines=leg.get_lines()
611  plt.setp(llines, linewidth=1.5)
612  def plot(self,time,data,**kwargs):
613  """!
614  \brief Wraps matplotlibs plot function.
615  \param time An array of time values.
616  \param data An array of data values.
617  \param **kwargs These are passed to `matplotlib.plot`.
618  """
619  if not self._plot(): return
620  plt.plot(time,data,**kwargs)
621 
622 
623 def final_temperature(nTh):
624  def fn(states):
625  state=states[-1]
626  n=np.arange(state.shape[0],dtype=float)
627  expected_rho=np.diag(nTh**n/(1.+4)**(n+1))
628  return np.sqrt(np.sum(np.abs(state-expected_rho)**2))
629  return fn
630 
632  """!
633  @ingroup Testclasses
634  Tests final states of several trajectories by applying a given function.
635 
636  \ref StateComparer_keys "Configuration file keys" this class understands.
637  """
638 
639  ## @addtogroup TestclassKeys
640  #
641  # @anchor StateComparer_keys
642  # ## StateComparer configuration file keys
643  # * `trajectories`: List of comma-separated trajectories which should be tested.
644  # * `function`: A meta-function which should return the actual test function. The actual test function
645  # should accept the state array and return some epsilon value (the measure of the test).
646  # * `parameters`: Tuple of function parameters passed to the meta function.
647  #
648  # The following configuration keys are read from the 'target'-sections.
649  # * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
650  # * `epsilon_<runmode>_<test>`: Acceptable deviation for the given runmode and comparison test.
651 
652  def run(self):
653  trajectories=self.get_option('trajectories',required=True).split(',')
654  function=globals()[self.get_option('function',required=True)]
655  parameters=ast.literal_eval(self.get_option('parameters'))
656  if parameters is None: parameters=[]
657  failure=False
658  for traj in trajectories:
659  for runmode in self._filter_runmodes(section=traj):
660  statefile=self.output(runmode=runmode,section=traj,statefile=True)
661  _,states,_=io.read(statefile)
662  logging.debug("Evaluating {0}.".format(os.path.basename(statefile)))
663  eps=float(self.get_option('epsilon_'+runmode+'_'+self.test,section=traj,required=True))
664  value=function(*parameters)(states)
665  logging.debug("Value: {0}, epsilon: {1}".format(value,eps))
666  if not value<eps:
667  failure=True
668  logging.debug("====== FAILED ======")
669  if failure: sys.exit(-1)
670 
671 
673  """!
674  @ingroup Testclasses
675  Compares several trajectories to a reference trajectory by using function interpolation.
676 
677  \ref TrajectoryComparer_keys "Configuration file keys" this class understands.
678  """
679 
680  ## @addtogroup TestclassKeys
681  #
682  # @anchor TrajectoryComparer_keys
683  # ## TrajectoryComparer configuration file keys
684  # * `pdf`: Save plots to this pdf file.
685  # * `reference`: Section of reference trajectory
686  # * `trajectories`: List of comma-separated trajectories which should be compared to the reference.
687  #
688  # The following configuration keys are read from the 'target'-sections.
689  # * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
690  # * `columns_<test>`: Use these columns of the output files for the comparison.
691  # * `epsilon_<runmode>_<test>`: List of acceptable deviations for the given runmode and comparison test.
692  # * `postprocess_local`: Name of a global function which expects the data array as input and postprocesses the data.
693  # * `format_local`: specifies which columns are floats (`f`) and which are complex numbers (`c`). Example:
694  # "f+f+c+c" will result in 6 columns, the two complex number columns are split into real and imaginary parts.
695  # * `start_<test>`: The first row of the data lines to consider for the comparison test `<test>`.
696  # * `length_<test>`: How many lines of data to consider for the comparison test `<test>`.
697 
698 
699  def run(self):
700  """!
701  Runs the test.
702  """
703  trajectories=self.get_option('trajectories',required=True).split(',')
704  failure=False
705  self.start_pdf()
706  reference_plotted=dict()
707  for traj in trajectories:
708  for runmode in self._filter_runmodes(section=traj):
709  for n in range(len(self._get_columns(traj,runmode))):
710  self.figureLegendRight(ylabel='value '+str(n+1), title=self.test, n=n)
711 
712  data,timeArray,data_label=self._get_data(section=traj,runmode=runmode,n=n)
713  reference,reference_label=self._get_reference(section=traj,runmode=runmode,n=n)
714  if not reference_plotted.has_key((reference_label,n)):
715  self.plot(timeArray,reference(timeArray),label=reference_label)
716  reference_plotted[(reference_label,n)]=True
717  self.plot(timeArray,data(timeArray),label=data_label)
718  logging.debug("Evaluating {0}, value number {1}.".format(data_label,n+1))
719  eps=self._get_eps(runmode, traj, n)
720  if not self._regression(reference,data,timeArray,eps):
721  logging.debug("====== FAILED ======")
722  failure=True
723  self.close_pdf()
724  if failure:
725  sys.exit(-1)
726 
727  def _get_eps(self, runmode, section, n):
728  return float(self.get_option('epsilon_'+runmode+'_'+self.test,section=section,required=True).split(',')[n])
729 
730  def _get_columns(self,section,runmode):
731  return map(int,self.get_option('columns_'+self.test,section=section,required=True).split(','))
732 
733  def _get_reference(self,section,runmode,n):
734  reference=self.get_option('reference',required=True)
735  reference_runmode=self.runmodes(section=reference)[0]
736  result=self._get_data(section=reference,runmode=reference_runmode,n=n)
737  return result[0],result[2]
738 
739  def _get_data(self,section,runmode,n):
740  fname=self.get_option('postprocess_local',section=section)
741  format=self.get_option('format_local',section=section)
742  length=self.get_option('length_'+self.test,section=section)
743  start=self.get_option('start_'+self.test,section=section)
744  postprocess=globals()[fname] if not fname is None else lambda x: x
745  result=postprocess(load_sv(self.output(runmode=runmode,section=section),format=format))
746  if not start is None: result=result[int(start):]
747  if not length is None: result=result[:int(length)]
748  timeArray = result[:,0]
749  data = result[:,self._get_columns(section,runmode)[n]]
750  return self._interpolate(timeArray,data),timeArray,os.path.basename(self.output(runmode,section))
751 
752  def _interpolate(self,timeArray,array):
753  return scipy.interpolate.interp1d(timeArray,array)
754 
755  def _regression(self, f1, f2, timeArray, eps) :
756  t0=timeArray[ 0]
757  t1=timeArray[-1]
758  res=quadrature(lambda t : (f1(t)-f2(t))**2,t0,t1,maxiter=100)[0]
759  logging.debug("Quadrature: {0}, epsilon: {1}".format(res,eps))
760  return res<eps
761 
762 def exponential(a,l):
763  def fn(t):
764  return a*exp(-l*t)
765  return fn,"{0}*exp(-{1}*t)".format(a,l)
766 
767 def FreeParticleX(x0,p0):
768  def fn(t):
769  return x0+2*p0*t
770  return fn, "{0}+2*{1}*t".format(x0,p0)
771 
772 def FreeParticleVarX(dx0,dp0):
773  def fn(t):
774  return (dx0+4.*dp0*t**2)**.5
775  return fn, "({0}+(4*{1}*t)^2)^0.5"
776 
778  """!
779  @ingroup Testclasses
780  Compares several trajectories to a reference function by using function interpolation.
781 
782  \ref FunctionComparer_keys "Configuration file keys" this class understands.
783  """
784 
785  ## @addtogroup TestclassKeys
786  #
787  # @anchor FunctionComparer_keys
788  # ## FunctionComparer configuration file keys
789  # * `reference_function`: Name of a global function, which should return a tuple of a unary function and a label used in plots.
790  #
791  # The following configuration keys are read from the 'target'-sections.
792  # * `paramters_<test>`: List of tuples or single tuple which are passed to the reference function.
793  # Example: `[(1,5,3),(2,2,1)]` or `(1,5,3)`. If this is a list, each entry corresponds to a column of the data file,
794  # otherwise the same parameters are used for all columns.
795  def _get_reference(self, section, runmode, n):
796  reference = globals()[self.get_option('reference_function', required=True)]
797  parameters=self.get_option('parameters_'+self.test, section=section)
798  parameters=() if parameters is None else ast.literal_eval(parameters)
799  if type(parameters)==list:parameters=parameters[n]
800  return reference(*parameters)
801 
802 def main():
803  """!
804  \brief Main function of the Python test driver.
805 
806  Command line options are defined here. It is responsible of loading the right `cpypyqed` module
807  (release or debug) as well as instantiating and running the test class.
808  """
809  op = OptionParser()
810  cp = ConfigParser.SafeConfigParser()
811 
812  op.add_option("--test", help="the name of the test, and the name of the section in the config file")
813  op.add_option("--testclass", help="the name of the testclass to use, must implement run()")
814  op.add_option("--script", help="the script to run or the target to compile")
815  op.add_option("--configuration", help="debug or release")
816  op.add_option("--cpypyqed_builddir", help="directory for on-demand module compilation")
817  op.add_option("--cpypyqed_config", help="configure file for on-demand module compilation")
818 
819  (options,args) = op.parse_args()
820 
821  if len(args)==0: op.error("Need configuration file(s) as argument(s).")
822  cp.read(args)
823  sys.path.insert(0,cp.get('Setup','modulepath'))
824  # we can only load the io module after we know where to look for the cpypyqed package
825  global io
826  if options.configuration.lower()=="release":
827  import cpypyqed.io as io
828  elif options.configuration.lower()=="debug":
829  import cpypyqed.io_d as io
830  logging.info("Taking cpypyqed from {0}".format(io.__file__))
831 
832  if options.testclass:
833  constructor = globals()[options.testclass]
834  myTestclass = constructor(options,cp)
835  myTestclass.run()
836 
837 if __name__ == '__main__':
838  main()
Runs a cpypyqed script repeatedly for all declared runmodes and succeeds if the scripts do...
Definition: testdriver.py:306
def run(self)
Runs the test.
Definition: testdriver.py:525
def _get_data(self, section, runmode, n)
Definition: testdriver.py:739
def clean(self, runmode)
Delete the trajectory file and state file for a given runmode.
Definition: testdriver.py:228
Compares several trajectories to a reference function by using function interpolation.
Definition: testdriver.py:777
def _import_section(self, section=None)
Definition: testdriver.py:134
def _get_columns(self, section, runmode)
Definition: testdriver.py:730
cp
ConfigParser: configuration file keys.
Definition: testdriver.py:129
def _place_legend(self)
Definition: testdriver.py:605
def __init__(self, args, kwargs)
Definition: testdriver.py:369
def run(self, clean=True, extra_opts=None, args, kwargs)
The method to run the test.
Definition: testdriver.py:321
def _verify_outcome(self, this, other)
Definition: testdriver.py:410
This is a helper class which helps with plotting functions to a pdf file.
Definition: testdriver.py:555
Tests final states of several trajectories by applying a given function.
Definition: testdriver.py:631
GenericContinuer version of Runner.
Definition: testdriver.py:453
Combines the functionality of Runner and Verifier to a single test.
Definition: testdriver.py:417
def run(self, clean=True, extra_opts=None, interpreter=None, args, kwargs)
The method to run the test.
Definition: testdriver.py:246
def _build_commandline(self, runmode, extra_opts=None, interpreter=None)
Definition: testdriver.py:289
Stores command line options and configuration file keys.
Definition: testdriver.py:100
def __init__(self, args, kwargs)
Arguments are passed through to OptionsManager.
Definition: testdriver.py:184
def _get_eps(self, runmode, section, n)
Definition: testdriver.py:727
def _extend_opts(self, options, section, option_prefix)
Definition: testdriver.py:285
Compares several trajectories to a reference trajectory by using function interpolation.
Definition: testdriver.py:672
def _regression(self, f1, f2, timeArray, eps)
Definition: testdriver.py:755
def finish_plot(self)
Adds the current plot to the pdf file.
Definition: testdriver.py:582
outputdir
All output files end up here.
Definition: testdriver.py:190
Runs a script repeatedly for all declared runmodes and succeeds if the scripts do.
Definition: testdriver.py:239
test
The name of the current test.
Definition: testdriver.py:131
def output(self, runmode, section=None, statefile=False)
The name of the output file for a given runmode.
Definition: testdriver.py:211
def continued_run(self, runfn, args, kwargs)
Run, then continue a script.
Definition: testdriver.py:443
def run(self)
Runs the test.
Definition: testdriver.py:699
expecteddir
Where to look for pre-run simulations to compare test runs to.
Definition: testdriver.py:192
Verifies the output of a script &#39;this&#39; to an expected output or the output of some other test run &#39;ot...
Definition: testdriver.py:344
def main()
Main function of the Python test driver.
Definition: testdriver.py:802
def run(self, args, kwargs)
Delegates to GenericContiuer::continued_run().
Definition: testdriver.py:487
def _get_reference(self, section, runmode, n)
Definition: testdriver.py:733
GenericContinuer version of PythonRunner.
Definition: testdriver.py:473
def plot(self, time, data, kwargs)
Wraps matplotlibs plot function.
Definition: testdriver.py:612
This test tries to compile a CMake target.
Definition: testdriver.py:493
def __init__(self, options, cp)
Definition: testdriver.py:120
def close_pdf(self)
Saves the pdf file to disc after all plots are finished.
Definition: testdriver.py:572
def _verify_ev(self, this, other)
Definition: testdriver.py:402
def start_pdf(self)
Initialize a new pdf file.
Definition: testdriver.py:564
def _verify_full(self)
Definition: testdriver.py:387
def get_option(self, name, default=None, required=False, section=None)
Get configuration file keys in a safe way.
Definition: testdriver.py:144
def _thisOutput(self, runmode, statefile=False)
Definition: testdriver.py:391
def figureLegendRight(self, ylabel, title, n)
Creates a new plot with figure legend right of the plot.
Definition: testdriver.py:589
options
optparse.Values: command line options
Definition: testdriver.py:127
def _equiv(self, this, other)
Definition: testdriver.py:400
def _verify_state(self, this, other)
Definition: testdriver.py:405
def rm_f(filename)
Remove a file without error if it doesn&#39;t exist.
Definition: testdriver.py:57
def run(self)
Run the test.
Definition: testdriver.py:378
Manages output files for different run modes.
Definition: testdriver.py:163
This class hosts continued_run(), which will run and then continue a script.
Definition: testdriver.py:430
def run(self)
Run the test.
Definition: testdriver.py:423
def run(self, args, kwargs)
Delegates to GenericContinuer::continued_run().
Definition: testdriver.py:467
def load_sv(fname, format=None)
Loads a trajectory file.
Definition: testdriver.py:67
def _otherOutput(self, runmode, statefile=False)
Definition: testdriver.py:393
def _differ(self, this, other)
Definition: testdriver.py:398
def _filter_runmodes(self, section)
Definition: testdriver.py:204
def runmodes(self, section=None)
Return runmodes.
Definition: testdriver.py:195
def mkdir_p(path)
Create a directory with parent directories.
Definition: testdriver.py:45
def _interpolate(self, timeArray, array)
Definition: testdriver.py:752