|
| 1 | +""" |
| 2 | +Example "proper" blurhash usage |
| 3 | +""" |
| 4 | + |
| 5 | +import numpy as np |
| 6 | +import PIL.Image |
| 7 | +import blurhash |
| 8 | + |
| 9 | +# Input/output file names |
| 10 | +input_image = "tests/cool_cat.jpg" |
| 11 | +output_image = "example_out.png" |
| 12 | + |
| 13 | +# How many components do we want, maximum and minimum? |
| 14 | +target_components = 4 |
| 15 | +min_components = 2 |
| 16 | + |
| 17 | +# Which maximum size should we work at for blurhash calculations? |
| 18 | +work_size = 64 |
| 19 | + |
| 20 | +# What's the final intended output size? |
| 21 | +out_size = (320, 180) |
| 22 | + |
| 23 | +""" |
| 24 | +Part 1: Encode |
| 25 | +""" |
| 26 | +# Load the image and store sizes (useful for decoding later, and likely part of your |
| 27 | +# metadata objects anyways) |
| 28 | +image = PIL.Image.open(input_image).convert("RGB") |
| 29 | +image_size = (image.width, image.height) |
| 30 | +print("Read image " + input_image + "({} x {})".format(image_size[0], image_size[1])) |
| 31 | + |
| 32 | +# Convert to linear and thumbnail |
| 33 | +image_linear = np.vectorize(blurhash.srgb_to_linear)(np.array(image)) |
| 34 | +image_linear_thumb = [] |
| 35 | +for i in range(3): |
| 36 | + channel_linear = PIL.Image.fromarray(image_linear[:,:,i].astype("float32"), mode = 'F') |
| 37 | + channel_linear.thumbnail((work_size, work_size)) |
| 38 | + image_linear_thumb.append(np.array(channel_linear)) |
| 39 | +image_linear_thumb = np.transpose(np.array(image_linear_thumb), (1, 2, 0)) |
| 40 | +print("Encoder working at size: {} x {}".format(image_linear_thumb.shape[1], image_linear_thumb.shape[0])) |
| 41 | + |
| 42 | +# Figure out a good component count |
| 43 | +components_x = int(max(min_components, min(target_components, round(image_linear_thumb.shape[1] / (work_size / target_components))))) |
| 44 | +components_y = int(max(min_components, min(target_components, round(image_linear_thumb.shape[0] / (work_size / target_components))))) |
| 45 | +print("Using component counts: {} x {}".format(components_x, components_y)) |
| 46 | + |
| 47 | +# Create blurhash |
| 48 | +blur_hash = blurhash.encode(image_linear_thumb, components_x, components_y, linear = True) |
| 49 | +print("Blur hash of image: " + blur_hash) |
| 50 | + |
| 51 | +""" |
| 52 | +Part 2: Decode |
| 53 | +""" |
| 54 | +# Figure out what size to decode to |
| 55 | +decode_components_x, decode_components_y = blurhash.components(blur_hash) |
| 56 | +decode_size_x = decode_components_x * (work_size // target_components) |
| 57 | +decode_size_y = decode_components_y * (work_size // target_components) |
| 58 | +print("Decoder working at size {} x {}".format(decode_size_x, decode_size_y)) |
| 59 | + |
| 60 | +# Decode |
| 61 | +decoded_image = np.array(blurhash.decode(blur_hash, decode_size_x, decode_size_y, linear = True)) |
| 62 | + |
| 63 | +# Scale so that we have the right size to fill out_size without letter/pillarboxing |
| 64 | +# while matching original images aspect ratio. |
| 65 | +fill_x_size_y = out_size[0] * (image_size[0] / image_size[1]) |
| 66 | +fill_y_size_x = out_size[1] * (image_size[1] / image_size[0]) |
| 67 | +scale_target_size = list(out_size) |
| 68 | +if fill_x_size_y / out_size[1] < fill_y_size_x / out_size[0]: |
| 69 | + scale_target_size[0] = max(scale_target_size[0], int(fill_y_size_x)) |
| 70 | +else: |
| 71 | + scale_target_size[1] = max(scale_target_size[1], int(fill_x_size_y)) |
| 72 | + |
| 73 | +# Scale (ideally, your UI layer should take care of this in some kind of efficient way) |
| 74 | +print("Scaling to target size: {} x {}".format(scale_target_size[0], scale_target_size[1])) |
| 75 | +decoded_image_large = [] |
| 76 | +for i in range(3): |
| 77 | + channel_linear = PIL.Image.fromarray(decoded_image[:,:,i].astype("float32"), mode = 'F') |
| 78 | + decoded_image_large.append(np.array(channel_linear.resize(scale_target_size, PIL.Image.BILINEAR))) |
| 79 | +decoded_image_large = np.transpose(np.array(decoded_image_large), (1, 2, 0)) |
| 80 | + |
| 81 | +# Convert to srgb PIL image |
| 82 | +decoded_image_out = np.vectorize(blurhash.linear_to_srgb)(np.array(decoded_image_large)) |
| 83 | +decoded_image_out = PIL.Image.fromarray(np.array(decoded_image_out).astype('uint8')) |
| 84 | + |
| 85 | +# Crop to final size and write |
| 86 | +decoded_image_out = decoded_image_out.crop(( |
| 87 | + (decoded_image_out.width - out_size[0]) / 2, |
| 88 | + (decoded_image_out.height - out_size[1]) / 2, |
| 89 | + (decoded_image_out.width + out_size[0]) / 2, |
| 90 | + (decoded_image_out.height + out_size[1]) / 2, |
| 91 | +)) |
| 92 | +decoded_image_out.save(output_image) |
| 93 | +print("Wrote final result to " + str(output_image)) |
0 commit comments