!pip install tensorflow --upgrade
Requirement already up-to-date: tensorflow in /opt/venv/lib/python3.7/site-packages (2.2.0)
Requirement already satisfied, skipping upgrade: opt-einsum>=2.3.2 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (3.2.1)
Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.1.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.1.2)
Requirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.30.0)
Requirement already satisfied, skipping upgrade: absl-py>=0.7.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (0.9.0)
Requirement already satisfied, skipping upgrade: tensorboard<2.3.0,>=2.2.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (2.2.2)
Requirement already satisfied, skipping upgrade: google-pasta>=0.1.8 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (0.2.0)
Requirement already satisfied, skipping upgrade: numpy<2.0,>=1.16.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.18.5)
Requirement already satisfied, skipping upgrade: six>=1.12.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.15.0)
Requirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.1.0)
Requirement already satisfied, skipping upgrade: protobuf>=3.8.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (3.12.2)
Requirement already satisfied, skipping upgrade: tensorflow-estimator<2.3.0,>=2.2.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (2.2.0)
Requirement already satisfied, skipping upgrade: wrapt>=1.11.1 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.12.1)
Requirement already satisfied, skipping upgrade: h5py<2.11.0,>=2.10.0 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (2.10.0)
Requirement already satisfied, skipping upgrade: gast==0.3.3 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (0.3.3)
Requirement already satisfied, skipping upgrade: wheel>=0.26; python_version >= "3" in /opt/venv/lib/python3.7/site-packages (from tensorflow) (0.34.2)
Requirement already satisfied, skipping upgrade: astunparse==1.6.3 in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.6.3)
Requirement already satisfied, skipping upgrade: scipy==1.4.1; python_version >= "3" in /opt/venv/lib/python3.7/site-packages (from tensorflow) (1.4.1)
Requirement already satisfied, skipping upgrade: setuptools>=41.0.0 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (46.4.0)
Requirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (1.0.1)
Requirement already satisfied, skipping upgrade: markdown>=2.6.8 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (3.2.2)
Requirement already satisfied, skipping upgrade: tensorboard-plugin-wit>=1.6.0 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (1.7.0)
Requirement already satisfied, skipping upgrade: google-auth-oauthlib<0.5,>=0.4.1 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (0.4.1)
Requirement already satisfied, skipping upgrade: requests<3,>=2.21.0 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (2.23.0)
Requirement already satisfied, skipping upgrade: google-auth<2,>=1.6.3 in /opt/venv/lib/python3.7/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow) (1.18.0)
Requirement already satisfied, skipping upgrade: importlib-metadata; python_version < "3.8" in /opt/venv/lib/python3.7/site-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow) (1.6.1)
Requirement already satisfied, skipping upgrade: requests-oauthlib>=0.7.0 in /opt/venv/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow) (1.3.0)
Requirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /opt/venv/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow) (3.0.4)
Requirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/venv/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow) (1.25.9)
Requirement already satisfied, skipping upgrade: idna<3,>=2.5 in /opt/venv/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow) (2.9)
Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /opt/venv/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow) (2020.4.5.2)
Requirement already satisfied, skipping upgrade: cachetools<5.0,>=2.0.0 in /opt/venv/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow) (4.1.1)
Requirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /opt/venv/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow) (0.2.8)
Requirement already satisfied, skipping upgrade: rsa<5,>=3.1.4; python_version >= "3" in /opt/venv/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow) (4.6)
Requirement already satisfied, skipping upgrade: zipp>=0.5 in /opt/venv/lib/python3.7/site-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow) (3.1.0)
Requirement already satisfied, skipping upgrade: oauthlib>=3.0.0 in /opt/venv/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow) (3.1.0)
Requirement already satisfied, skipping upgrade: pyasn1<0.5.0,>=0.4.6 in /opt/venv/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow) (0.4.8)
!pip install opencv-python
Requirement already satisfied: opencv-python in /opt/venv/lib/python3.7/site-packages (4.2.0.34)
Requirement already satisfied: numpy>=1.14.5 in /opt/venv/lib/python3.7/site-packages (from opencv-python) (1.18.5)
!pip install keras --upgrade
Requirement already up-to-date: keras in /opt/venv/lib/python3.7/site-packages (2.4.3)
Requirement already satisfied, skipping upgrade: numpy>=1.9.1 in /opt/venv/lib/python3.7/site-packages (from keras) (1.18.5)
Requirement already satisfied, skipping upgrade: pyyaml in /opt/venv/lib/python3.7/site-packages (from keras) (5.3.1)
Requirement already satisfied, skipping upgrade: scipy>=0.14 in /opt/venv/lib/python3.7/site-packages (from keras) (1.4.1)
Requirement already satisfied, skipping upgrade: h5py in /opt/venv/lib/python3.7/site-packages (from keras) (2.10.0)
Requirement already satisfied, skipping upgrade: six in /opt/venv/lib/python3.7/site-packages (from h5py->keras) (1.15.0)
import segmentation_models as sm
AttributeError: module 'keras.utils' has no attribute 'generic_utils'
!pip install tensorboardX
Collecting tensorboardX
Downloading tensorboardX-2.0-py2.py3-none-any.whl (195 kB)
|████████████████████████████████| 195 kB 3.5 MB/s eta 0:00:01
Requirement already satisfied: numpy in /opt/venv/lib/python3.7/site-packages (from tensorboardX) (1.18.5)
Requirement already satisfied: six in /opt/venv/lib/python3.7/site-packages (from tensorboardX) (1.15.0)
Requirement already satisfied: protobuf>=3.8.0 in /opt/venv/lib/python3.7/site-packages (from tensorboardX) (3.12.2)
Requirement already satisfied: setuptools in /opt/venv/lib/python3.7/site-packages (from protobuf>=3.8.0->tensorboardX) (46.4.0)
Installing collected packages: tensorboardX
Successfully installed tensorboardX-2.0
!pip install toml torch torchvision
Collecting toml
Downloading toml-0.10.1-py2.py3-none-any.whl (19 kB)
Collecting torch
Downloading torch-1.5.1-cp37-cp37m-manylinux1_x86_64.whl (753.2 MB)
|████████████████████████████████| 753.2 MB 6.0 kB/s eta 0:00:01 |▊ | 17.9 MB 5.0 MB/s eta 0:02:28 |██▊ | 63.0 MB 30.0 MB/s eta 0:00:23 |████▎ | 101.8 MB 29.8 MB/s eta 0:00:22 |█████▍ | 127.5 MB 43.3 MB/s eta 0:00:15 |██████▎ | 148.4 MB 43.3 MB/s eta 0:00:14 |██████▋ | 156.5 MB 26.6 MB/s eta 0:00:23MB 21.0 MB/s eta 0:00:26 |█████████▉ | 231.8 MB 21.0 MB/s eta 0:00:25 |████████████████████▌ | 483.6 MB 42.9 MB/s eta 0:00:07 |███████████████████████ | 539.0 MB 29.0 MB/s eta 0:00:08 |███████████████████████ | 543.2 MB 29.0 MB/s eta 0:00:08 |███████████████████████▏ | 544.0 MB 29.0 MB/s eta 0:00:08�██████████▏ | 546.3 MB 26.6 MB/s eta 0:00:08 |█████████████████████████ | 588.1 MB 39.5 MB/s eta 0:00:05 |█████████████████████████████ | 684.1 MB 44.5 MB/s eta 0:00:02
Collecting torchvision
Downloading torchvision-0.6.1-cp37-cp37m-manylinux1_x86_64.whl (6.6 MB)
|████████████████████████████████| 6.6 MB 25.4 MB/s eta 0:00:01
Requirement already satisfied: numpy in /opt/venv/lib/python3.7/site-packages (from torch) (1.18.5)
Collecting future
Downloading future-0.18.2.tar.gz (829 kB)
|████████████████████████████████| 829 kB 31.9 MB/s eta 0:00:01
Requirement already satisfied: pillow>=4.1.1 in /opt/venv/lib/python3.7/site-packages (from torchvision) (7.2.0)
Building wheels for collected packages: future
Building wheel for future (setup.py) ... done
Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491058 sha256=1b58fb48b929ea49c2e8ce517dd221deb2cc183224f9884c6c911dc0567b30d2
Stored in directory: /home/jovyan/.cache/pip/wheels/56/b0/fe/4410d17b32f1f0c3cf54cdfb2bc04d7b4b8f4ae377e2229ba0
Successfully built future
Installing collected packages: toml, future, torch, torchvision
Successfully installed future-0.18.2 toml-0.10.1 torch-1.5.1 torchvision-0.6.1
import os
import cv2
import keras
import numpy as np
import matplotlib.pyplot as plt
import glob
import pandas as pd
import toml
import numpy as np
import torch
from torch.nn import functional as F
import cv2
import utils
from utils import CONFIG
import networks
!ls /datasets/model_seg
inception_new_categories.h5
BACKBONE = 'inceptionv3'
BATCH_SIZE = 4
CLASSES = ['product']
LR = 0.0001
EPOCHS = 10
preprocess_input = sm.get_preprocessing(BACKBONE)
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
activation = 'sigmoid' if n_classes == 1 else 'softmax'
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation,encoder_weights='imagenet',
encoder_freeze=False,decoder_block_type='transpose',decoder_use_batchnorm=True)
model.load_weights('/datasets/model_seg/inception_new_categories.h5')
def single_inference(model, image_dict, return_offset=True,):
with torch.no_grad():
image, trimap = image_dict['image'], image_dict['trimap']
alpha_shape = image_dict['alpha_shape']
image = image
trimap = trimap
alpha_pred, info_dict = model(image, trimap)
if CONFIG.model.trimap_channel == 3:
trimap_argmax = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap_argmax == 2] = 1
alpha_pred[trimap_argmax == 0] = 0
h, w = alpha_shape
test_pred = alpha_pred[0, 0, ...].data.cpu().numpy() * 255
test_pred = test_pred.astype(np.uint8)
test_pred = test_pred[32:h+32, 32:w+32]
if return_offset:
short_side = h if h < w else w
ratio = 512 / short_side
offset_1 = utils.flow_to_image(info_dict['offset_1'][0][0,...].data.cpu().numpy()).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict['offset_1'][1].cpu()
offset_1 = cv2.resize(offset_1, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_NEAREST)
text = 'unknown: {:.2f}, known: {:.2f}'.format(scale[-1,0].item(), scale[-1,1].item())
offset_1 = cv2.putText(offset_1, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 0, thickness=2)
offset_2 = utils.flow_to_image(info_dict['offset_2'][0][0,...].data.cpu().numpy()).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict['offset_2'][1].cpu()
offset_2 = cv2.resize(offset_2, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_NEAREST)
text = 'unknown: {:.2f}, known: {:.2f}'.format(scale[-1,0].item(), scale[-1,1].item())
offset_2 = cv2.putText(offset_2, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 0, thickness=2)
return test_pred, (offset_1, offset_2)
else:
return test_pred, None
def generator_tensor_dict(image, trimap):
# read images
image = image
trimap = trimap
image = cv2.resize(image,(int(image.shape[1]/4),int(image.shape[0]/4)))
trimap = cv2.resize(trimap,(int(trimap.shape[1]/4),int(trimap.shape[0]/4)))
sample = {'image': image, 'trimap': trimap, 'alpha_shape': trimap.shape}
# reshape
h, w = sample["alpha_shape"]
if h % 32 == 0 and w % 32 == 0:
padded_image = np.pad(sample['image'], ((32,32), (32, 32), (0,0)), mode="reflect")
padded_trimap = np.pad(sample['trimap'], ((32,32), (32, 32)), mode="reflect")
sample['image'] = padded_image
sample['trimap'] = padded_trimap
else:
target_h = 32 * ((h - 1) // 32 + 1)
target_w = 32 * ((w - 1) // 32 + 1)
pad_h = target_h - h
pad_w = target_w - w
padded_image = np.pad(sample['image'], ((32,pad_h+32), (32, pad_w+32), (0,0)), mode="reflect")
padded_trimap = np.pad(sample['trimap'], ((32,pad_h+32), (32, pad_w+32)), mode="reflect")
sample['image'] = padded_image
sample['trimap'] = padded_trimap
# ImageNet mean & std
mean = torch.tensor([0.485, 0.456, 0.406]).view(3,1,1)
std = torch.tensor([0.229, 0.224, 0.225]).view(3,1,1)
# convert GBR images to RGB
image, trimap = sample['image'][:,:,::-1], sample['trimap']
# swap color axis
image = image.transpose((2, 0, 1)).astype(np.float32)
trimap[trimap < 85] = 0
trimap[trimap >= 170] = 2
trimap[trimap >= 85] = 1
# normalize image
image /= 255.
# to tensor
sample['image'], sample['trimap'] = torch.from_numpy(image), torch.from_numpy(trimap).to(torch.long)
sample['image'] = sample['image'].sub_(mean).div_(std)
if CONFIG.model.trimap_channel == 3:
sample['trimap'] = F.one_hot(sample['trimap'], num_classes=3).permute(2, 0, 1).float()
elif CONFIG.model.trimap_channel == 1:
sample['trimap'] = sample['trimap'][None, ...].float()
else:
raise NotImplementedError("CONFIG.model.trimap_channel can only be 3 or 1")
# add first channel
sample['image'], sample['trimap'] = sample['image'][None, ...], sample['trimap'][None, ...]
return sample
with open('config/gca-dist-all-data.toml') as f:
utils.load_config(toml.load(f))
# Check if toml config file is loaded
if CONFIG.is_default:
raise ValueError("No .toml config loaded.")
model_matting = networks.get_generator(encoder=CONFIG.model.arch.encoder, decoder=CONFIG.model.arch.decoder)
#model_matting.cuda()
# load checkpoint
checkpoint = torch.load('gca-dist-all-data.pth',map_location='cpu')
model_matting.load_state_dict(utils.remove_prefix_state_dict(checkpoint['state_dict']), strict=True)
# inference
model_matting = model_matting.eval()
import cv2
import numpy as np
def erode_dilate(msk, struc="ELLIPSE", size=(3, 3)):
if struc == "RECT":
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, size)
elif struc == "CORSS":
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, size)
else:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)
#msk = msk.astype(np.float32)
msk = msk / 255
#msk = msk.astype(np.uint8)
# val in 0 or 255
dilated = cv2.dilate(msk, kernel, iterations=10) * 255
eroded = cv2.erode(msk, kernel, iterations=10) * 255
cnt1 = len(np.where(msk >= 0)[0])
cnt2 = len(np.where(msk == 0)[0])
cnt3 = len(np.where(msk == 1)[0])
#print("all:{} bg:{} fg:{}".format(cnt1, cnt2, cnt3))
assert(cnt1 == cnt2 + cnt3)
cnt1 = len(np.where(dilated >= 0)[0])
cnt2 = len(np.where(dilated == 0)[0])
cnt3 = len(np.where(dilated == 255)[0])
#print("all:{} bg:{} fg:{}".format(cnt1, cnt2, cnt3))
assert(cnt1 == cnt2 + cnt3)
cnt1 = len(np.where(eroded >= 0)[0])
cnt2 = len(np.where(eroded == 0)[0])
cnt3 = len(np.where(eroded == 255)[0])
#print("all:{} bg:{} fg:{}".format(cnt1, cnt2, cnt3))
assert(cnt1 == cnt2 + cnt3)
res = dilated.copy()
#res[((dilated == 255) & (msk == 0))] = 128
res[((dilated == 255) & (eroded == 0))] = 128
return res
def get_bg_removed_image(url,model,model_matting):
'''
image= cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
'''
image = download_image(url)
height,width = image.shape[:2]
#actual_image = denormalize(image.squeeze())
#actual_image = cv2.cvtColor(actual_image,cv2.COLOR_BGR2RGB)
image_resized = image.copy()
image_resized = cv2.resize(image,(224,224))
image_resized = np.expand_dims(image_resized, axis=0)
pr_mask = model.predict(preprocess_input(image_resized).reshape(1,224,224,3)).round()
alpha = pr_mask.reshape(224,224,1)
alpha_seg = alpha.reshape(224,224)
alpha_seg = cv2.resize(alpha_seg,(width,height))*255
alpha_seg = cv2.merge([alpha_seg,alpha_seg,alpha_seg])
alpha_seg[np.where(alpha_seg<127)] =0
alpha_seg[np.where(alpha_seg>=127)] =255
#plt.imshow(alpha)
alpha_seg = alpha_seg[:,:,0]
trimap = erode_dilate(alpha_seg)
image_dict = generator_tensor_dict(image, trimap)
pred, offset = single_inference(model_matting, image_dict)
mat = pred
bg = cv2.imread('plain-white-background.jpg')
bg = cv2.resize(bg,(image.shape[1],image.shape[0]))
pred = cv2.resize(pred,(width,height)).reshape(height, width, 1)/255
im_mat = pred*(image/255) + (1-pred)*(bg/255)
visualize(
image=image,
predicted_mat = alpha_seg,
mat = mat,
trimap = trimap,
image_mat = im_mat
#using_mat=im_seg_mat,
)
!pip install Pillow
Requirement already satisfied: Pillow in /opt/venv/lib/python3.7/site-packages (7.2.0)
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
import numpy as np
import requests
from PIL import Image
def download_image(url):
response = requests.get(url, stream=True)
response.raw.decode_content = True
return np.array(Image.open(response.raw))
get_bg_removed_image(url ='https://media.kubric.io/api/assetlib/15662679-d6e1-46df-b386-8e934904a913.jpg',path=None,model=model,model_matting = model_matting)
NameError: name 'get_bg_removed_image' is not defined
def get_bg_removed_image(url,path,model,model_matting):
if path:
image= cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if url:
image = download_image(url)
height,width = image.shape[:2]
if image.shape[2]>3:
image = image[:,:]
#actual_image = denormalize(image.squeeze())
#actual_image = cv2.cvtColor(actual_image,cv2.COLOR_BGR2RGB)
image_resized = image.copy()
image_resized = cv2.resize(image_resized,(224,224))
image_resized = np.expand_dims(image_resized, axis=0)
print(image_resized.shape)
pr_mask = model.predict(preprocess_input(image_resized).reshape(1,224,224,3)).round()
alpha = pr_mask.reshape(224,224,1)
alpha_seg = alpha.reshape(224,224)
alpha_seg = cv2.resize(alpha_seg,(width,height))*255
alpha_seg = cv2.merge([alpha_seg,alpha_seg,alpha_seg])
alpha_seg[np.where(alpha_seg<127)] =0
alpha_seg[np.where(alpha_seg>=127)] =255
#plt.imshow(alpha)
alpha_seg = alpha_seg[:,:,0]
trimap = erode_dilate(alpha_seg)
image_dict = generator_tensor_dict(image, trimap)
pred, offset = single_inference(model_matting, image_dict)
mat = pred
bg = cv2.imread('plain-white-background.jpg')
bg = cv2.resize(bg,(image.shape[1],image.shape[0]))
pred = cv2.resize(pred,(width,height)).reshape(height, width, 1)/255
im_mat = pred*(image/255) + (1-pred)*(bg/255)
visualize(
image=image,
predicted_mat = alpha_seg,
mat = mat,
trimap = trimap,
image_mat = im_mat
#using_mat=im_seg_mat,
)
plt.show()
for path in glob.glob('Select Imagery/*'):
get_bg_removed_image(path, model, model_matting)
TypeError: get_bg_removed_image() missing 1 required positional argument: 'model_matting'