Back to index

python-biopython  1.60
MarkovModel.py
Go to the documentation of this file.
00001 """
00002 This is an implementation of a state-emitting MarkovModel.  I am using
00003 terminology similar to Manning and Schutze.
00004 
00005 
00006 
00007 Functions:
00008 train_bw        Train a markov model using the Baum-Welch algorithm.
00009 train_visible   Train a visible markov model using MLE.
00010 find_states     Find the a state sequence that explains some observations.
00011 
00012 load            Load a MarkovModel.
00013 save            Save a MarkovModel.
00014 
00015 Classes:
00016 MarkovModel     Holds the description of a markov model
00017 """
00018 
00019 import numpy
00020 
00021 try:
00022     logaddexp = numpy.logaddexp
00023 except AttributeError:
00024     # Numpy versions older than 1.3 do not contain logaddexp.
00025     # Once we require Numpy version 1.3 or later, we should revisit this
00026     # module to see if we can simplify some of the other functions in
00027     # this module.
00028     import warnings
00029     warnings.warn("For optimal speed, please update to Numpy version 1.3 or later (current version is %s)" % numpy.__version__)
00030     def logaddexp(logx, logy):
00031         if logy - logx > 100:
00032             return logy
00033         elif logx - logy > 100:
00034             return logx
00035         minxy = min(logx, logy)
00036         return minxy + numpy.log(numpy.exp(logx-minxy) + numpy.exp(logy-minxy))
00037 
00038 
00039 
00040 def itemindex(values):
00041     d = {}
00042     entries = enumerate(values[::-1])
00043     n = len(values)-1
00044     for index, key in entries: d[key] = n-index
00045     return d
00046 
00047 numpy.random.seed()
00048 
00049 VERY_SMALL_NUMBER = 1E-300
00050 LOG0 = numpy.log(VERY_SMALL_NUMBER)
00051 
00052 class MarkovModel(object):
00053     def __init__(self, states, alphabet,
00054                  p_initial=None, p_transition=None, p_emission=None):
00055         self.states = states
00056         self.alphabet = alphabet
00057         self.p_initial = p_initial
00058         self.p_transition = p_transition
00059         self.p_emission = p_emission
00060     def __str__(self):
00061         import StringIO
00062         handle = StringIO.StringIO()
00063         save(self, handle)
00064         handle.seek(0)
00065         return handle.read()
00066 
00067 def _readline_and_check_start(handle, start):
00068     line = handle.readline()
00069     if not line.startswith(start):
00070         raise ValueError("I expected %r but got %r" % (start, line))
00071     return line
00072 
00073 def load(handle):
00074     """load(handle) -> MarkovModel()"""
00075     # Load the states.
00076     line = _readline_and_check_start(handle, "STATES:")
00077     states = line.split()[1:]
00078 
00079     # Load the alphabet.
00080     line = _readline_and_check_start(handle, "ALPHABET:")
00081     alphabet = line.split()[1:]
00082 
00083     mm = MarkovModel(states, alphabet)
00084     N, M = len(states), len(alphabet)
00085 
00086     # Load the initial probabilities.
00087     mm.p_initial = numpy.zeros(N)
00088     line = _readline_and_check_start(handle, "INITIAL:")
00089     for i in range(len(states)):
00090         line = _readline_and_check_start(handle, "  %s:" % states[i])
00091         mm.p_initial[i] = float(line.split()[-1])
00092 
00093     # Load the transition.
00094     mm.p_transition = numpy.zeros((N, N))
00095     line = _readline_and_check_start(handle, "TRANSITION:")
00096     for i in range(len(states)):
00097         line = _readline_and_check_start(handle, "  %s:" % states[i])
00098         mm.p_transition[i,:] = map(float, line.split()[1:])
00099 
00100     # Load the emission.
00101     mm.p_emission = numpy.zeros((N, M))
00102     line = _readline_and_check_start(handle, "EMISSION:")
00103     for i in range(len(states)):
00104         line = _readline_and_check_start(handle, "  %s:" % states[i])
00105         mm.p_emission[i,:] = map(float, line.split()[1:])
00106 
00107     return mm
00108         
00109 def save(mm, handle):
00110     """save(mm, handle)"""
00111     # This will fail if there are spaces in the states or alphabet.
00112     w = handle.write
00113     w("STATES: %s\n" % ' '.join(mm.states))
00114     w("ALPHABET: %s\n" % ' '.join(mm.alphabet))
00115     w("INITIAL:\n")
00116     for i in range(len(mm.p_initial)):
00117         w("  %s: %g\n" % (mm.states[i], mm.p_initial[i]))
00118     w("TRANSITION:\n")
00119     for i in range(len(mm.p_transition)):
00120         x = map(str, mm.p_transition[i])
00121         w("  %s: %s\n" % (mm.states[i], ' '.join(x)))
00122     w("EMISSION:\n")
00123     for i in range(len(mm.p_emission)):
00124         x = map(str, mm.p_emission[i])
00125         w("  %s: %s\n" % (mm.states[i], ' '.join(x)))
00126 
00127 # XXX allow them to specify starting points
00128 def train_bw(states, alphabet, training_data, 
00129              pseudo_initial=None, pseudo_transition=None, pseudo_emission=None,
00130              update_fn=None,             
00131              ):
00132     """train_bw(states, alphabet, training_data[, pseudo_initial]
00133     [, pseudo_transition][, pseudo_emission][, update_fn]) -> MarkovModel
00134 
00135     Train a MarkovModel using the Baum-Welch algorithm.  states is a list
00136     of strings that describe the names of each state.  alphabet is a
00137     list of objects that indicate the allowed outputs.  training_data
00138     is a list of observations.  Each observation is a list of objects
00139     from the alphabet.
00140 
00141     pseudo_initial, pseudo_transition, and pseudo_emission are
00142     optional parameters that you can use to assign pseudo-counts to
00143     different matrices.  They should be matrices of the appropriate
00144     size that contain numbers to add to each parameter matrix, before
00145     normalization.
00146 
00147     update_fn is an optional callback that takes parameters
00148     (iteration, log_likelihood).  It is called once per iteration.
00149 
00150     """
00151     N, M = len(states), len(alphabet)
00152     if not training_data:
00153         raise ValueError("No training data given.")
00154     if pseudo_initial!=None:
00155         pseudo_initial = numpy.asarray(pseudo_initial)
00156         if pseudo_initial.shape != (N,):
00157             raise ValueError("pseudo_initial not shape len(states)")
00158     if pseudo_transition!=None:
00159         pseudo_transition = numpy.asarray(pseudo_transition)
00160         if pseudo_transition.shape != (N,N):
00161             raise ValueError("pseudo_transition not shape " + \
00162                              "len(states) X len(states)")
00163     if pseudo_emission!=None:
00164         pseudo_emission = numpy.asarray(pseudo_emission)
00165         if pseudo_emission.shape != (N,M):
00166             raise ValueError("pseudo_emission not shape " + \
00167                              "len(states) X len(alphabet)")
00168         
00169     # Training data is given as a list of members of the alphabet.
00170     # Replace those with indexes into the alphabet list for easier
00171     # computation.
00172     training_outputs = []
00173     indexes = itemindex(alphabet)
00174     for outputs in training_data:
00175         training_outputs.append([indexes[x] for x in outputs])
00176 
00177     # Do some sanity checking on the outputs.
00178     lengths = map(len, training_outputs)
00179     if min(lengths) == 0:
00180         raise ValueError("I got training data with outputs of length 0")
00181 
00182     # Do the training with baum welch.
00183     x = _baum_welch(N, M, training_outputs,
00184                     pseudo_initial=pseudo_initial,
00185                     pseudo_transition=pseudo_transition,
00186                     pseudo_emission=pseudo_emission,
00187                     update_fn=update_fn)
00188     p_initial, p_transition, p_emission = x
00189     return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
00190 
00191 MAX_ITERATIONS = 1000
00192 def _baum_welch(N, M, training_outputs,
00193                 p_initial=None, p_transition=None, p_emission=None,
00194                 pseudo_initial=None, pseudo_transition=None,
00195                 pseudo_emission=None, update_fn=None):
00196     # Returns (p_initial, p_transition, p_emission)
00197     if p_initial==None:
00198         p_initial = _random_norm(N)
00199     else:
00200         p_initial = _copy_and_check(p_initial, (N,))
00201 
00202     if p_transition==None:
00203         p_transition = _random_norm((N,N))
00204     else:
00205         p_transition = _copy_and_check(p_transition, (N,N))
00206     if p_emission==None:
00207         p_emission = _random_norm((N,M))
00208     else:
00209         p_emission = _copy_and_check(p_emission, (N,M))
00210     
00211     # Do all the calculations in log space to avoid underflows.
00212     lp_initial, lp_transition, lp_emission = map(
00213         numpy.log, (p_initial, p_transition, p_emission))
00214     if pseudo_initial!=None:
00215         lpseudo_initial = numpy.log(pseudo_initial)
00216     else:
00217         lpseudo_initial = None
00218     if pseudo_transition!=None:
00219         lpseudo_transition = numpy.log(pseudo_transition)
00220     else:
00221         lpseudo_transition = None
00222     if pseudo_emission!=None:
00223         lpseudo_emission = numpy.log(pseudo_emission)
00224     else:
00225         lpseudo_emission = None
00226 
00227     # Iterate through each sequence of output, updating the parameters
00228     # to the HMM.  Stop when the log likelihoods of the sequences
00229     # stops varying.
00230     prev_llik = None
00231     for i in range(MAX_ITERATIONS):
00232         llik = LOG0
00233         for outputs in training_outputs:
00234             x = _baum_welch_one(
00235                 N, M, outputs,
00236                 lp_initial, lp_transition, lp_emission,
00237                 lpseudo_initial, lpseudo_transition, lpseudo_emission,)
00238             llik += x
00239         if update_fn is not None:
00240             update_fn(i, llik)
00241         if prev_llik is not None and numpy.fabs(prev_llik-llik) < 0.1:
00242             break
00243         prev_llik = llik
00244     else:
00245         raise RuntimeError("HMM did not converge in %d iterations" \
00246                            % MAX_ITERATIONS)
00247 
00248     # Return everything back in normal space.
00249     return map(numpy.exp, (lp_initial, lp_transition, lp_emission))
00250     
00251 def _baum_welch_one(N, M, outputs,
00252                     lp_initial, lp_transition, lp_emission,
00253                     lpseudo_initial, lpseudo_transition, lpseudo_emission):
00254     # Do one iteration of Baum-Welch based on a sequence of output.
00255     # NOTE: This will change the values of lp_initial, lp_transition,
00256     # and lp_emission in place.
00257     T = len(outputs)
00258     fmat = _forward(N, T, lp_initial, lp_transition, lp_emission, outputs)
00259     bmat = _backward(N, T, lp_transition, lp_emission, outputs)
00260 
00261     # Calculate the probability of traversing each arc for any given
00262     # transition.
00263     lp_arc = numpy.zeros((N, N, T))
00264     for t in range(T):
00265         k = outputs[t]
00266         lp_traverse = numpy.zeros((N, N)) # P going over one arc.
00267         for i in range(N):
00268             for j in range(N):
00269                 # P(getting to this arc)
00270                 # P(making this transition)
00271                 # P(emitting this character)
00272                 # P(going to the end)
00273                 lp = fmat[i][t] + \
00274                      lp_transition[i][j] + \
00275                      lp_emission[i][k] + \
00276                      bmat[j][t+1]
00277                 lp_traverse[i][j] = lp
00278         # Normalize the probability for this time step.
00279         lp_arc[:,:,t] = lp_traverse - _logsum(lp_traverse)
00280 
00281 
00282     # Sum of all the transitions out of state i at time t.
00283     lp_arcout_t = numpy.zeros((N, T))
00284     for t in range(T):
00285         for i in range(N):
00286             lp_arcout_t[i][t] = _logsum(lp_arc[i,:,t])
00287             
00288     # Sum of all the transitions out of state i.
00289     lp_arcout = numpy.zeros(N)
00290     for i in range(N):
00291         lp_arcout[i] = _logsum(lp_arcout_t[i,:])
00292 
00293     # UPDATE P_INITIAL.
00294     lp_initial = lp_arcout_t[:,0]
00295     if lpseudo_initial!=None:
00296         lp_initial = _logvecadd(lp_initial, lpseudo_initial)
00297         lp_initial = lp_initial - _logsum(lp_initial)
00298     
00299     # UPDATE P_TRANSITION.  p_transition[i][j] is the sum of all the
00300     # transitions from i to j, normalized by the sum of the
00301     # transitions out of i.
00302     for i in range(N):
00303         for j in range(N):
00304             lp_transition[i][j] = _logsum(lp_arc[i,j,:]) - lp_arcout[i]
00305         if lpseudo_transition!=None:
00306             lp_transition[i] = _logvecadd(lp_transition[i], lpseudo_transition)
00307             lp_transition[i] = lp_transition[i] - _logsum(lp_transition[i])
00308             
00309     # UPDATE P_EMISSION.  lp_emission[i][k] is the sum of all the
00310     # transitions out of i when k is observed, divided by the sum of
00311     # the transitions out of i.
00312     for i in range(N):
00313         ksum = numpy.zeros(M)+LOG0    # ksum[k] is the sum of all i with k.
00314         for t in range(T):
00315             k = outputs[t]
00316             for j in range(N):
00317                 ksum[k] = logaddexp(ksum[k], lp_arc[i,j,t])
00318         ksum = ksum - _logsum(ksum)      # Normalize
00319         if lpseudo_emission!=None:
00320             ksum = _logvecadd(ksum, lpseudo_emission[i])
00321             ksum = ksum - _logsum(ksum)  # Renormalize
00322         lp_emission[i,:] = ksum
00323 
00324     # Calculate the log likelihood of the output based on the forward
00325     # matrix.  Since the parameters of the HMM has changed, the log
00326     # likelihoods are going to be a step behind, and we might be doing
00327     # one extra iteration of training.  The alternative is to rerun
00328     # the _forward algorithm and calculate from the clean one, but
00329     # that may be more expensive than overshooting the training by one
00330     # step.
00331     return _logsum(fmat[:,T])
00332 
00333 def _forward(N, T, lp_initial, lp_transition, lp_emission, outputs):
00334     # Implement the forward algorithm.  This actually calculates a
00335     # Nx(T+1) matrix, where the last column is the total probability
00336     # of the output.
00337     
00338     matrix = numpy.zeros((N, T+1))
00339     
00340     # Initialize the first column to be the initial values.
00341     matrix[:,0] = lp_initial
00342     for t in range(1, T+1):
00343         k = outputs[t-1]
00344         for j in range(N):
00345             # The probability of the state is the sum of the
00346             # transitions from all the states from time t-1.
00347             lprob = LOG0
00348             for i in range(N):
00349                 lp = matrix[i][t-1] + \
00350                      lp_transition[i][j] + \
00351                      lp_emission[i][k]
00352                 lprob = logaddexp(lprob, lp)
00353             matrix[j][t] = lprob
00354     return matrix
00355 
00356 def _backward(N, T, lp_transition, lp_emission, outputs):
00357     matrix = numpy.zeros((N, T+1))
00358     for t in range(T-1, -1, -1):
00359         k = outputs[t]
00360         for i in range(N):
00361             # The probability of the state is the sum of the
00362             # transitions from all the states from time t+1.
00363             lprob = LOG0
00364             for j in range(N):
00365                 lp = matrix[j][t+1] + \
00366                      lp_transition[i][j] + \
00367                      lp_emission[i][k]
00368                 lprob = logaddexp(lprob, lp)
00369             matrix[i][t] = lprob
00370     return matrix
00371 
00372 def train_visible(states, alphabet, training_data,
00373                   pseudo_initial=None, pseudo_transition=None,
00374                   pseudo_emission=None):
00375     """train_visible(states, alphabet, training_data[, pseudo_initial]
00376     [, pseudo_transition][, pseudo_emission]) -> MarkovModel
00377 
00378     Train a visible MarkovModel using maximum likelihoood estimates
00379     for each of the parameters.  states is a list of strings that
00380     describe the names of each state.  alphabet is a list of objects
00381     that indicate the allowed outputs.  training_data is a list of
00382     (outputs, observed states) where outputs is a list of the emission
00383     from the alphabet, and observed states is a list of states from
00384     states.
00385 
00386     pseudo_initial, pseudo_transition, and pseudo_emission are
00387     optional parameters that you can use to assign pseudo-counts to
00388     different matrices.  They should be matrices of the appropriate
00389     size that contain numbers to add to each parameter matrix
00390 
00391     """
00392     N, M = len(states), len(alphabet)
00393     if pseudo_initial!=None:
00394         pseudo_initial = numpy.asarray(pseudo_initial)
00395         if pseudo_initial.shape != (N,):
00396             raise ValueError("pseudo_initial not shape len(states)")
00397     if pseudo_transition!=None:
00398         pseudo_transition = numpy.asarray(pseudo_transition)
00399         if pseudo_transition.shape != (N,N):
00400             raise ValueError("pseudo_transition not shape " + \
00401                              "len(states) X len(states)")
00402     if pseudo_emission!=None:
00403         pseudo_emission = numpy.asarray(pseudo_emission)
00404         if pseudo_emission.shape != (N,M):
00405             raise ValueError("pseudo_emission not shape " + \
00406                              "len(states) X len(alphabet)")
00407     
00408     # Training data is given as a list of members of the alphabet.
00409     # Replace those with indexes into the alphabet list for easier
00410     # computation.
00411     training_states, training_outputs = [], []
00412     states_indexes = itemindex(states)
00413     outputs_indexes = itemindex(alphabet)
00414     for toutputs, tstates in training_data:
00415         if len(tstates) != len(toutputs):
00416             raise ValueError("states and outputs not aligned")
00417         training_states.append([states_indexes[x] for x in tstates])
00418         training_outputs.append([outputs_indexes[x] for x in toutputs])
00419 
00420     x = _mle(N, M, training_outputs, training_states,
00421              pseudo_initial, pseudo_transition, pseudo_emission)
00422     p_initial, p_transition, p_emission = x
00423 
00424     return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
00425 
00426 def _mle(N, M, training_outputs, training_states, pseudo_initial,
00427          pseudo_transition, pseudo_emission):
00428     # p_initial is the probability that a sequence of states starts
00429     # off with a particular one.
00430     p_initial = numpy.zeros(N)
00431     if pseudo_initial:
00432         p_initial = p_initial + pseudo_initial
00433     for states in training_states:
00434         p_initial[states[0]] += 1
00435     p_initial = _normalize(p_initial)
00436     
00437     # p_transition is the probability that a state leads to the next
00438     # one.  C(i,j)/C(i) where i and j are states.
00439     p_transition = numpy.zeros((N,N))
00440     if pseudo_transition:
00441         p_transition = p_transition + pseudo_transition
00442     for states in training_states:
00443         for n in range(len(states)-1):
00444             i, j = states[n], states[n+1]
00445             p_transition[i, j] += 1
00446     for i in range(len(p_transition)):
00447         p_transition[i,:] = p_transition[i,:] / sum(p_transition[i,:])
00448 
00449     # p_emission is the probability of an output given a state.
00450     # C(s,o)|C(s) where o is an output and s is a state.
00451     p_emission = numpy.zeros((N,M))
00452     if pseudo_emission:
00453         p_emission = p_emission + pseudo_emission
00454     p_emission = numpy.ones((N,M))
00455     for outputs, states in zip(training_outputs, training_states):
00456         for o, s in zip(outputs, states):
00457             p_emission[s, o] += 1
00458     for i in range(len(p_emission)):
00459         p_emission[i,:] = p_emission[i,:] / sum(p_emission[i,:])
00460 
00461     return p_initial, p_transition, p_emission
00462           
00463 def _argmaxes(vector, allowance=None):
00464     return [numpy.argmax(vector)]
00465 
00466 def find_states(markov_model, output):
00467     """find_states(markov_model, output) -> list of (states, score)"""
00468     mm = markov_model
00469     N = len(mm.states)
00470     
00471     # _viterbi does calculations in log space.  Add a tiny bit to the
00472     # matrices so that the logs will not break.
00473     x = mm.p_initial + VERY_SMALL_NUMBER
00474     y = mm.p_transition + VERY_SMALL_NUMBER
00475     z = mm.p_emission + VERY_SMALL_NUMBER
00476     lp_initial, lp_transition, lp_emission = map(numpy.log, (x, y, z))
00477     # Change output into a list of indexes into the alphabet.
00478     indexes = itemindex(mm.alphabet)
00479     output = [indexes[x] for x in output]
00480     
00481     # Run the viterbi algorithm.
00482     results = _viterbi(N, lp_initial, lp_transition, lp_emission, output)
00483 
00484     for i in range(len(results)):
00485         states, score = results[i]
00486         results[i] = [mm.states[x] for x in states], numpy.exp(score)
00487     return results
00488 
00489 def _viterbi(N, lp_initial, lp_transition, lp_emission, output):
00490     # The Viterbi algorithm finds the most likely set of states for a
00491     # given output.  Returns a list of states.
00492 
00493     T = len(output)
00494     # Store the backtrace in a NxT matrix.
00495     backtrace = []    # list of indexes of states in previous timestep.
00496     for i in range(N):
00497         backtrace.append([None] * T)
00498 
00499     # Store the best scores.
00500     scores = numpy.zeros((N, T))
00501     scores[:,0] = lp_initial + lp_emission[:,output[0]]
00502     for t in range(1, T):
00503         k = output[t]
00504         for j in range(N):
00505             # Find the most likely place it came from.
00506             i_scores = scores[:,t-1] + \
00507                        lp_transition[:,j] + \
00508                        lp_emission[j,k]
00509             indexes = _argmaxes(i_scores)
00510             scores[j,t] = i_scores[indexes[0]]
00511             backtrace[j][t] = indexes
00512 
00513     # Do the backtrace.  First, find a good place to start.  Then,
00514     # we'll follow the backtrace matrix to find the list of states.
00515     # In the event of ties, there may be multiple paths back through
00516     # the matrix, which implies a recursive solution.  We'll simulate
00517     # it by keeping our own stack.
00518     in_process = []    # list of (t, states, score)
00519     results = []       # return values.  list of (states, score)
00520     indexes = _argmaxes(scores[:,T-1])      # pick the first place
00521     for i in indexes:
00522         in_process.append((T-1, [i], scores[i][T-1]))
00523     while in_process:
00524         t, states, score = in_process.pop()
00525         if t == 0:
00526             results.append((states, score))
00527         else:
00528             indexes = backtrace[states[0]][t]
00529             for i in indexes:
00530                 in_process.append((t-1, [i]+states, score))
00531     return results
00532 
00533 def _normalize(matrix):
00534     # Make sure numbers add up to 1.0
00535     if len(matrix.shape) == 1:
00536         matrix = matrix / float(sum(matrix))
00537     elif len(matrix.shape) == 2:
00538         # Normalize by rows.
00539         for i in range(len(matrix)):
00540             matrix[i,:] = matrix[i,:] / sum(matrix[i,:])
00541     else:
00542         raise ValueError("I cannot handle matrixes of that shape")
00543     return matrix
00544     
00545 def _uniform_norm(shape):
00546     matrix = numpy.ones(shape)
00547     return _normalize(matrix)
00548 
00549 def _random_norm(shape):
00550     matrix = numpy.random.random(shape)
00551     return _normalize(matrix)
00552 
00553 def _copy_and_check(matrix, desired_shape):
00554     # Copy the matrix.
00555     matrix = numpy.array(matrix, copy=1)
00556     # Check the dimensions.
00557     if matrix.shape != desired_shape:
00558         raise ValueError("Incorrect dimension")
00559     # Make sure it's normalized.
00560     if len(matrix.shape) == 1:
00561         if numpy.fabs(sum(matrix)-1.0) > 0.01:
00562             raise ValueError("matrix not normalized to 1.0")
00563     elif len(matrix.shape) == 2:
00564         for i in range(len(matrix)):
00565             if numpy.fabs(sum(matrix[i])-1.0) > 0.01:
00566                 raise ValueError("matrix %d not normalized to 1.0" % i)
00567     else:
00568         raise ValueError("I don't handle matrices > 2 dimensions")
00569     return matrix
00570 
00571 def _logsum(matrix):
00572     if len(matrix.shape) > 1:
00573         vec = numpy.reshape(matrix, (numpy.product(matrix.shape),))
00574     else:
00575         vec = matrix
00576     sum = LOG0
00577     for num in vec:
00578         sum = logaddexp(sum, num)
00579     return sum
00580 
00581 def _logvecadd(logvec1, logvec2):
00582     assert len(logvec1) == len(logvec2), "vectors aren't the same length"
00583     sumvec = numpy.zeros(len(logvec1))
00584     for i in range(len(logvec1)):
00585         sumvec[i] = logaddexp(logvec1[i], logvec2[i])
00586     return sumvec
00587 
00588 def _exp_logsum(numbers):
00589     sum = _logsum(numbers)
00590     return numpy.exp(sum)