Back to index

radiance  4R0+20100331
lcompare.py
Go to the documentation of this file.
00001 ''' Text comparison functions for Radiance unit testing.
00002 
00003 This allows differences in whitespace, which is why the text
00004 corpora are split into tokens first.
00005 Tokens are then converted into an appropriate data type, so
00006 that floating point items will still be considered correct
00007 even if they are slightly different, eg. as a consequence of
00008 binary rounding errors.
00009 '''
00010 
00011 import string
00012 import types
00013 
00014 class error(Exception): pass
00015 
00016 # internal functions
00017 def _icompare(itest, iref):
00018        '''compare ints (not public)'''
00019        if type(itest) == str:
00020               iftest = int(itest)
00021        else: iftest = itest
00022        if iftest == iref: return 1
00023        return 0
00024 
00025 def _fcompare(ftest, fref):
00026        '''compare floats (not public)'''
00027        FUZZ = 0.0000001 # XXX heuristically determined
00028        if type(ftest) == str:
00029               fftest = float(ftest)
00030        else: fftest = ftest
00031        if (fftest < (fref + FUZZ)) and (fftest > (fref - FUZZ)):
00032               return 1
00033        return 0
00034 
00035 def _typify_token(t):
00036        '''return the token as int resp. float if possible (not public)'''
00037        try: return int(t)
00038        except ValueError: pass
00039        try: return float(t)
00040        except ValueError: pass
00041        return t
00042 
00043 
00044 # public comparison functions
00045 
00046 def lcompare(ltest, lref):
00047        '''compare a list of tokens
00048               raise an error if there are intolerable differences
00049               the reference tokens in lref should already be of the correct type.
00050        '''
00051        if len(ltest) != len(lref):
00052               raise error, ('List comparision failed: Different number of tokens'
00053                      ' (%d, %d)' % (len(ltest), len(lref)))
00054        for i in range(len(lref)):
00055               tref = lref[i]
00056               ttest = ltest[i]
00057               if type(tref) == str and tref != ttest:
00058                      raise error, 'Token comparison failed: "%s" != "%s"' % (ttest, tref)
00059               elif type(tref) == int and not _icompare(ttest, tref):
00060                      raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
00061               elif type(tref) == float and not _fcompare(ttest, tref):
00062                      raise error, 'Token comparison failed: %s != %s' % (ttest, tref)
00063 
00064 def llcompare(lltest, llref, ignore_empty=0, recurse=[]):
00065        '''compare a list of lists of tokens recursively
00066               raise an error if there are intolerable differences
00067               the reference tokens in lref should already be of the correct type.
00068               if ignore_empty is true, empty lines are not included in the comparison
00069               the recurse argument is only used internally
00070        '''
00071        if ignore_empty:
00072               lltest = filter(None, lltest)
00073               llref = filter(None, llref)
00074        if len(lltest) != len(llref):
00075               raise error, 'Comparision failed: Different number of lines (%d,%d)' %(
00076                      len(lltest), len(llref))
00077        for i in range(len(llref)):
00078               if llref[i]:
00079                      rtype = type(llref[i][0])
00080                      if rtype == list or rtype == tuple:
00081                             return llcompare(lltest[i], llref[i],
00082                                           recurse=recurse.append(i), ignore_empty=ignore_empty)
00083               try: lcompare(lltest[i], llref[i])
00084               except error, e:
00085                      if recurse:
00086                             raise error, '%s (line %s)' % (str(e), recurse.append(i + 1))
00087                      else: raise error, '%s (line %d)' % (str(e), i + 1)
00088        
00089 def split_headers(s):
00090        '''split Radiance file headers
00091               return a list of lists of tokens suitable for llcompare()
00092               this is useful to check the output of getinfo'''
00093        ll = map(string.strip,string.split(s, '\n'))
00094        nll = []
00095        for l in ll:
00096               parts = string.split(l, '=', 1)
00097               if len(parts) == 2:
00098                      left = map(_typify_token, string.split(parts[0]))
00099                      right = map(_typify_token, string.split(parts[1]))
00100                      nll.append(left + ['='] + right)
00101               else: nll.append(map(_typify_token, string.split(l)))
00102        return nll
00103 
00104 def split_rad(s):
00105        '''Split the contents of a scene description string
00106               return a list of list of tokens suitable for llcompare()'''
00107        ll = map(string.strip,string.split(s, '\n'))
00108        nll = []
00109        for l in ll:
00110               nll.append(map(_typify_token, string.split(l)))
00111        return nll
00112 
00113 def split_radfile(fn):
00114        '''Split the contents of a file object containing a scene description
00115               return a list of list of tokens suitable for llcompare()'''
00116        f = open(fn, 'r')
00117        ll = split_rad(f.read())
00118        f.close()
00119        return ll
00120