Back to index

python3.2  3.2.2
Public Member Functions | Public Attributes
test.test_zlib.CompressObjectTestCase Class Reference
Inheritance diagram for test.test_zlib.CompressObjectTestCase:
Inheritance graph
[legend]
Collaboration diagram for test.test_zlib.CompressObjectTestCase:
Collaboration graph
[legend]

List of all members.

Public Member Functions

def test_pair
def test_compressoptions
def test_compressincremental
def test_decompinc
def test_decompincflush
def test_decompimax
def test_decompressmaxlen
def test_decompressmaxlenflush
def test_maxlenmisc
def test_clear_unconsumed_tail
def test_flushes
def test_odd_flush
def test_empty_flush
def test_decompress_incomplete_stream
def test_compresscopy
def test_badcompresscopy
def test_decompresscopy
def test_baddecompresscopy
def test_big_compress_buffer
def test_big_decompress_buffer
def test_length_overflow
def check_big_compress_buffer
def check_big_decompress_buffer

Public Attributes

_PyObject_HEAD_EXTRA Py_ssize_t ob_refcnt
struct _typeobjectob_type

Detailed Description

Definition at line 201 of file test_zlib.py.


Member Function Documentation

def test.test_zlib.BaseCompressTestCase.check_big_compress_buffer (   self,
  size,
  compress_func 
) [inherited]

Definition at line 126 of file test_zlib.py.

00126 
00127     def check_big_compress_buffer(self, size, compress_func):
00128         _1M = 1024 * 1024
00129         fmt = "%%0%dx" % (2 * _1M)
00130         # Generate 10MB worth of random, and expand it by repeating it.
00131         # The assumption is that zlib's memory is not big enough to exploit
00132         # such spread out redundancy.
00133         data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
00134                         for i in range(10)])
00135         data = data * (size // len(data) + 1)
00136         try:
00137             compress_func(data)
00138         finally:
00139             # Release memory
00140             data = None

Here is the call graph for this function:

Here is the caller graph for this function:

def test.test_zlib.BaseCompressTestCase.check_big_decompress_buffer (   self,
  size,
  decompress_func 
) [inherited]

Definition at line 141 of file test_zlib.py.

00141 
00142     def check_big_decompress_buffer(self, size, decompress_func):
00143         data = b'x' * size
00144         try:
00145             compressed = zlib.compress(data, 1)
00146         finally:
00147             # Release memory
00148             data = None
00149         data = decompress_func(compressed)
00150         # Sanity check
00151         try:
00152             self.assertEqual(len(data), size)
00153             self.assertEqual(len(data.strip(b'x')), 0)
00154         finally:
00155             data = None
00156 

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 473 of file test_zlib.py.

00473 
00474         def test_badcompresscopy(self):
00475             # Test copying a compression object in an inconsistent state
00476             c = zlib.compressobj()
00477             c.compress(HAMLET_SCENE)
00478             c.flush()
00479             self.assertRaises(ValueError, c.copy)

Here is the call graph for this function:

Definition at line 504 of file test_zlib.py.

00504 
00505         def test_baddecompresscopy(self):
00506             # Test copying a compression object in an inconsistent state
00507             data = zlib.compress(HAMLET_SCENE)
00508             d = zlib.decompressobj()
00509             d.decompress(data)
00510             d.flush()
00511             self.assertRaises(ValueError, d.copy)

Here is the call graph for this function:

Definition at line 515 of file test_zlib.py.

00515 
00516     def test_big_compress_buffer(self, size):
00517         c = zlib.compressobj(1)
00518         compress = lambda s: c.compress(s) + c.flush()
00519         self.check_big_compress_buffer(size, compress)

Here is the call graph for this function:

Definition at line 521 of file test_zlib.py.

00521 
00522     def test_big_decompress_buffer(self, size):
00523         d = zlib.decompressobj()
00524         decompress = lambda s: d.decompress(s) + d.flush()
00525         self.check_big_decompress_buffer(size, decompress)

Here is the call graph for this function:

Definition at line 364 of file test_zlib.py.

00364 
00365     def test_clear_unconsumed_tail(self):
00366         # Issue #12050: calling decompress() without providing max_length
00367         # should clear the unconsumed_tail attribute.
00368         cdata = b"x\x9cKLJ\x06\x00\x02M\x01"    # "abc"
00369         dco = zlib.decompressobj()
00370         ddata = dco.decompress(cdata, 1)
00371         ddata += dco.decompress(dco.unconsumed_tail)
00372         self.assertEqual(dco.unconsumed_tail, b"")

Here is the call graph for this function:

Definition at line 451 of file test_zlib.py.

00451 
00452         def test_compresscopy(self):
00453             # Test copying a compression object
00454             data0 = HAMLET_SCENE
00455             data1 = bytes(str(HAMLET_SCENE, "ascii").swapcase(), "ascii")
00456             c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
00457             bufs0 = []
00458             bufs0.append(c0.compress(data0))
00459 
00460             c1 = c0.copy()
00461             bufs1 = bufs0[:]
00462 
00463             bufs0.append(c0.compress(data0))
00464             bufs0.append(c0.flush())
00465             s0 = b''.join(bufs0)
00466 
00467             bufs1.append(c1.compress(data1))
00468             bufs1.append(c1.flush())
00469             s1 = b''.join(bufs1)
00470 
00471             self.assertEqual(zlib.decompress(s0),data0+data0)
00472             self.assertEqual(zlib.decompress(s1),data0+data1)

Here is the call graph for this function:

Definition at line 237 of file test_zlib.py.

00237 
00238     def test_compressincremental(self):
00239         # compress object in steps, decompress object as one-shot
00240         data = HAMLET_SCENE * 128
00241         co = zlib.compressobj()
00242         bufs = []
00243         for i in range(0, len(data), 256):
00244             bufs.append(co.compress(data[i:i+256]))
00245         bufs.append(co.flush())
00246         combuf = b''.join(bufs)
00247 
00248         dco = zlib.decompressobj()
00249         y1 = dco.decompress(b''.join(bufs))
00250         y2 = dco.flush()
00251         self.assertEqual(data, y1 + y2)

Here is the call graph for this function:

Definition at line 222 of file test_zlib.py.

00222 
00223     def test_compressoptions(self):
00224         # specify lots of options to compressobj()
00225         level = 2
00226         method = zlib.DEFLATED
00227         wbits = -12
00228         memlevel = 9
00229         strategy = zlib.Z_FILTERED
00230         co = zlib.compressobj(level, method, wbits, memlevel, strategy)
00231         x1 = co.compress(HAMLET_SCENE)
00232         x2 = co.flush()
00233         dco = zlib.decompressobj(wbits)
00234         y1 = dco.decompress(x1 + x2)
00235         y2 = dco.flush()
00236         self.assertEqual(HAMLET_SCENE, y1 + y2)

Here is the call graph for this function:

def test.test_zlib.CompressObjectTestCase.test_decompimax (   self,
  source = None,
  cx = 256,
  dcx = 64 
)

Definition at line 296 of file test_zlib.py.

00296 
00297     def test_decompimax(self, source=None, cx=256, dcx=64):
00298         # compress in steps, decompress in length-restricted steps
00299         source = source or HAMLET_SCENE
00300         # Check a decompression object with max_length specified
00301         data = source * 128
00302         co = zlib.compressobj()
00303         bufs = []
00304         for i in range(0, len(data), cx):
00305             bufs.append(co.compress(data[i:i+cx]))
00306         bufs.append(co.flush())
00307         combuf = b''.join(bufs)
00308         self.assertEqual(data, zlib.decompress(combuf),
00309                          'compressed data failure')
00310 
00311         dco = zlib.decompressobj()
00312         bufs = []
00313         cb = combuf
00314         while cb:
00315             #max_length = 1 + len(cb)//10
00316             chunk = dco.decompress(cb, dcx)
00317             self.assertFalse(len(chunk) > dcx,
00318                     'chunk too big (%d>%d)' % (len(chunk), dcx))
00319             bufs.append(chunk)
00320             cb = dco.unconsumed_tail
00321         bufs.append(dco.flush())
00322         self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')

Here is the call graph for this function:

def test.test_zlib.CompressObjectTestCase.test_decompinc (   self,
  flush = False,
  source = None,
  cx = 256,
  dcx = 64 
)

Definition at line 252 of file test_zlib.py.

00252 
00253     def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
00254         # compress object in steps, decompress object in steps
00255         source = source or HAMLET_SCENE
00256         data = source * 128
00257         co = zlib.compressobj()
00258         bufs = []
00259         for i in range(0, len(data), cx):
00260             bufs.append(co.compress(data[i:i+cx]))
00261         bufs.append(co.flush())
00262         combuf = b''.join(bufs)
00263 
00264         decombuf = zlib.decompress(combuf)
00265         # Test type of return value
00266         self.assertIsInstance(decombuf, bytes)
00267 
00268         self.assertEqual(data, decombuf)
00269 
00270         dco = zlib.decompressobj()
00271         bufs = []
00272         for i in range(0, len(combuf), dcx):
00273             bufs.append(dco.decompress(combuf[i:i+dcx]))
00274             self.assertEqual(b'', dco.unconsumed_tail, ########
00275                              "(A) uct should be b'': not %d long" %
00276                                        len(dco.unconsumed_tail))
00277             self.assertEqual(b'', dco.unused_data)
00278         if flush:
00279             bufs.append(dco.flush())
00280         else:
00281             while True:
00282                 chunk = dco.decompress(b'')
00283                 if chunk:
00284                     bufs.append(chunk)
00285                 else:
00286                     break
00287         self.assertEqual(b'', dco.unconsumed_tail, ########
00288                          "(B) uct should be b'': not %d long" %
00289                                        len(dco.unconsumed_tail))
00290         self.assertEqual(b'', dco.unused_data)
00291         self.assertEqual(data, b''.join(bufs))
00292         # Failure means: "decompressobj with init options failed"

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 293 of file test_zlib.py.

00293 
00294     def test_decompincflush(self):
00295         self.test_decompinc(flush=True)

Here is the call graph for this function:

Definition at line 437 of file test_zlib.py.

00437 
00438     def test_decompress_incomplete_stream(self):
00439         # This is 'foo', deflated
00440         x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
00441         # For the record
00442         self.assertEqual(zlib.decompress(x), b'foo')
00443         self.assertRaises(zlib.error, zlib.decompress, x[:-5])
00444         # Omitting the stream end works with decompressor objects
00445         # (see issue #8672).
00446         dco = zlib.decompressobj()
00447         y = dco.decompress(x[:-5])
00448         y += dco.flush()
00449         self.assertEqual(y, b'foo')

Here is the call graph for this function:

Definition at line 481 of file test_zlib.py.

00481 
00482         def test_decompresscopy(self):
00483             # Test copying a decompression object
00484             data = HAMLET_SCENE
00485             comp = zlib.compress(data)
00486             # Test type of return value
00487             self.assertIsInstance(comp, bytes)
00488 
00489             d0 = zlib.decompressobj()
00490             bufs0 = []
00491             bufs0.append(d0.decompress(comp[:32]))
00492 
00493             d1 = d0.copy()
00494             bufs1 = bufs0[:]
00495 
00496             bufs0.append(d0.decompress(comp[32:]))
00497             s0 = b''.join(bufs0)
00498 
00499             bufs1.append(d1.decompress(comp[32:]))
00500             s1 = b''.join(bufs1)
00501 
00502             self.assertEqual(s0,s1)
00503             self.assertEqual(s0,data)

Here is the call graph for this function:

Definition at line 323 of file test_zlib.py.

00323 
00324     def test_decompressmaxlen(self, flush=False):
00325         # Check a decompression object with max_length specified
00326         data = HAMLET_SCENE * 128
00327         co = zlib.compressobj()
00328         bufs = []
00329         for i in range(0, len(data), 256):
00330             bufs.append(co.compress(data[i:i+256]))
00331         bufs.append(co.flush())
00332         combuf = b''.join(bufs)
00333         self.assertEqual(data, zlib.decompress(combuf),
00334                          'compressed data failure')
00335 
00336         dco = zlib.decompressobj()
00337         bufs = []
00338         cb = combuf
00339         while cb:
00340             max_length = 1 + len(cb)//10
00341             chunk = dco.decompress(cb, max_length)
00342             self.assertFalse(len(chunk) > max_length,
00343                         'chunk too big (%d>%d)' % (len(chunk),max_length))
00344             bufs.append(chunk)
00345             cb = dco.unconsumed_tail
00346         if flush:
00347             bufs.append(dco.flush())
00348         else:
00349             while chunk:
00350                 chunk = dco.decompress(b'', max_length)
00351                 self.assertFalse(len(chunk) > max_length,
00352                             'chunk too big (%d>%d)' % (len(chunk),max_length))
00353                 bufs.append(chunk)
00354         self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 355 of file test_zlib.py.

00355 
00356     def test_decompressmaxlenflush(self):
00357         self.test_decompressmaxlen(flush=True)

Here is the call graph for this function:

Definition at line 427 of file test_zlib.py.

00427 
00428     def test_empty_flush(self):
00429         # Test that calling .flush() on unused objects works.
00430         # (Bug #1083110 -- calling .flush() on decompress objects
00431         # caused a core dump.)
00432 
00433         co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
00434         self.assertTrue(co.flush())  # Returns a zlib header
00435         dco = zlib.decompressobj()
00436         self.assertEqual(dco.flush(), b"") # Returns nothing

Here is the call graph for this function:

Definition at line 373 of file test_zlib.py.

00373 
00374     def test_flushes(self):
00375         # Test flush() with the various options, using all the
00376         # different levels in order to provide more variations.
00377         sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
00378         sync_opt = [getattr(zlib, opt) for opt in sync_opt
00379                     if hasattr(zlib, opt)]
00380         data = HAMLET_SCENE * 8
00381 
00382         for sync in sync_opt:
00383             for level in range(10):
00384                 obj = zlib.compressobj( level )
00385                 a = obj.compress( data[:3000] )
00386                 b = obj.flush( sync )
00387                 c = obj.compress( data[3000:] )
00388                 d = obj.flush()
00389                 self.assertEqual(zlib.decompress(b''.join([a,b,c,d])),
00390                                  data, ("Decompress failed: flush "
00391                                         "mode=%i, level=%i") % (sync, level))
00392                 del obj

Here is the call graph for this function:

Definition at line 527 of file test_zlib.py.

00527 
00528     def test_length_overflow(self, size):
00529         if size < _4G + 100:
00530             self.skipTest("not enough free memory, need at least 4 GB")
00531         data = b'x' * size
00532         c = zlib.compressobj(1)
00533         d = zlib.decompressobj()
00534         try:
00535             self.assertRaises(OverflowError, c.compress, data)
00536             self.assertRaises(OverflowError, d.decompress, data)
00537         finally:
00538             data = None
00539 

Here is the call graph for this function:

Definition at line 358 of file test_zlib.py.

00358 
00359     def test_maxlenmisc(self):
00360         # Misc tests of max_length
00361         dco = zlib.decompressobj()
00362         self.assertRaises(ValueError, dco.decompress, b"", -1)
00363         self.assertEqual(b'', dco.unconsumed_tail)

Here is the call graph for this function:

Definition at line 393 of file test_zlib.py.

00393 
00394     def test_odd_flush(self):
00395         # Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
00396         import random
00397 
00398         if hasattr(zlib, 'Z_SYNC_FLUSH'):
00399             # Testing on 17K of "random" data
00400 
00401             # Create compressor and decompressor objects
00402             co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
00403             dco = zlib.decompressobj()
00404 
00405             # Try 17K of data
00406             # generate random data stream
00407             try:
00408                 # In 2.3 and later, WichmannHill is the RNG of the bug report
00409                 gen = random.WichmannHill()
00410             except AttributeError:
00411                 try:
00412                     # 2.2 called it Random
00413                     gen = random.Random()
00414                 except AttributeError:
00415                     # others might simply have a single RNG
00416                     gen = random
00417             gen.seed(1)
00418             data = genblock(1, 17 * 1024, generator=gen)
00419 
00420             # compress, sync-flush, and decompress
00421             first = co.compress(data)
00422             second = co.flush(zlib.Z_SYNC_FLUSH)
00423             expanded = dco.decompress(first + second)
00424 
00425             # if decompressed data is different from the input data, choke.
00426             self.assertEqual(expanded, data, "17K random source doesn't match")

Here is the call graph for this function:

Definition at line 203 of file test_zlib.py.

00203 
00204     def test_pair(self):
00205         # straightforward compress/decompress objects
00206         datasrc = HAMLET_SCENE * 128
00207         datazip = zlib.compress(datasrc)
00208         # should compress both bytes and bytearray data
00209         for data in (datasrc, bytearray(datasrc)):
00210             co = zlib.compressobj()
00211             x1 = co.compress(data)
00212             x2 = co.flush()
00213             self.assertRaises(zlib.error, co.flush) # second flush should not work
00214             self.assertEqual(x1 + x2, datazip)
00215         for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
00216             dco = zlib.decompressobj()
00217             y1 = dco.decompress(v1 + v2)
00218             y2 = dco.flush()
00219             self.assertEqual(data, y1 + y2)
00220             self.assertIsInstance(dco.unconsumed_tail, bytes)
00221             self.assertIsInstance(dco.unused_data, bytes)

Here is the call graph for this function:


Member Data Documentation

Definition at line 107 of file object.h.

struct _typeobject* _object::ob_type [inherited]

Definition at line 108 of file object.h.


The documentation for this class was generated from the following file: