Repository: zhenngbolun/Learnbale_Bandpass_Filter
Branch: master
Commit: f89dfb315beb
Files: 5
Total size: 15.9 KB
Directory structure:
gitextract_hhj1u1mn/
├── README.md
├── core_layers.py
├── main_multiscale.py
├── model.py
└── utils.py
================================================
FILE CONTENTS
================================================
================================================
FILE: README.md
================================================
# Learnbale_Bandpass_Filter
Image Demoireing with Learnable Bandpass Filters, CVPR2020
Our extension work is accepted by IEEE TPAMI.
The journal paper will come soon.
If you find this work is helpful, please cite:
@article{zheng2021learn,
title={Learning Frequency Domain Priors for Image Demoireing},
author = {Bolun, Zheng and Shanxin, Yuan and Chenggang, Yan and Xiang, Tian and Jiyong, Zhang and Yaoqi, Sun and Lin, Liu and Ales, Leonardis and Gregory, Slabaugh},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year={2021}
}
@inProceedings{zheng2020,
author={B. Zheng and S. Yuan and G. Slabaugh and A. Leonardis},
booktitle={IEEE Conference on Computer Vision and Pattern Recongnition},
title={Image Demoireing with Learnable Bandpass Filters},
year={2020},
}
@article{zheng2019implicit,
title={Implicit dual-domain convolutional network for robust color image compression artifact reduction},
author={Zheng, Bolun and Chen, Yaowu and Tian, Xiang and Zhou, Fan and Liu, Xuesong},
journal={IEEE Transactions on Circuits and Systems for Video Technology},
volume={30},
number={11},
pages={3982--3994},
year={2020},
publisher={IEEE}
}
You can now get this paper at Arxiv preprint: https://arxiv.org/abs/2004.00406
## Run the code
This project requires:
* Tensorflow >1.10
* Keras > 2.0
* opencv > 2.0
* skImage
You can get the weight file for AIM2019 via:
https://1drv.ms/u/s!ArU0YIIFiFuHilwyuwHZjSpvPUBz?e=iZ70Ga
or via Baidu Disk:
https://pan.baidu.com/s/1wsJYyYbQO-ETL5Jq4fN6hw code:jiae
You can get AIM2019 LCDMoire2019 dataset via:
validation:
Moire: https://data.vision.ee.ethz.ch/timofter/AIM19demoire/ValidationMoire.zip
Clean: https://data.vision.ee.ethz.ch/timofter/AIM19demoire/ValidationClear.zip
testing:
https://data.vision.ee.ethz.ch/timofter/AIM19demoire/TestingMoire.zip
Then,
1. edit the 'main_multiscale.py' by:
replacing the 'test_path', 'valid_gt_path', 'valid_ns_path' and 'weight_path' with your own settings.
2. make the dirs 'testing_result' and 'validation_result' at current path.
3. python main_multiscale.py.
================================================
FILE: core_layers.py
================================================
import tensorflow as tf
from keras import backend as k
from keras.utils import conv_utils
import numpy as np
from keras import layers, models, activations, initializers, constraints
from math import cos, pi, sqrt
from keras.regularizers import l2
class Space2Depth(layers.Layer):
def __init__(self, scale, **kwargs):
super(Space2Depth, self).__init__(**kwargs)
self.scale = scale
def call(self, inputs, **kwargs):
return tf.space_to_depth(inputs, self.scale)
def compute_output_shape(self, input_shape):
if input_shape[1] != None and input_shape[2] != None:
return (None, int(input_shape[1]/self.scale), int(input_shape[2]/self.scale), input_shape[3]*self.scale**2)
else:
return (None, None, None, input_shape[3]*self.scale**2)
class Depth2Space(layers.Layer):
def __init__(self, scale, **kwargs):
super(Depth2Space, self).__init__(**kwargs)
self.scale = scale
def call(self, inputs, **kwargs):
return tf.depth_to_space(inputs, self.scale)
def compute_output_shape(self, input_shape):
if input_shape[1] != None and input_shape[2] != None:
return (None, input_shape[1]*self.scale, input_shape[2]*self.scale, int(input_shape[3]/self.scale**2))
else:
return (None, None, None, int(input_shape[3]/self.scale**2))
class adaptive_implicit_trans(layers.Layer):
def __init__(self, **kwargs):
super(adaptive_implicit_trans, self).__init__(**kwargs)
def build(self, input_shape):
conv_shape = (1,1,64,64)
self.it_weights = self.add_weight(
shape = (1,1,64,1),
initializer = initializers.get('ones'),
constraint = constraints.NonNeg(),
name = 'ait_conv')
kernel = np.zeros(conv_shape)
r1 = sqrt(1.0/8)
r2 = sqrt(2.0/8)
for i in range(8):
_u = 2*i+1
for j in range(8):
_v = 2*j+1
index = i*8+j
for u in range(8):
for v in range(8):
index2 = u*8+v
t = cos(_u*u*pi/16)*cos(_v*v*pi/16)
t = t*r1 if u==0 else t*r2
t = t*r1 if v==0 else t*r2
kernel[0,0,index2,index] = t
self.kernel = k.variable(value = kernel, dtype = 'float32')
def call(self, inputs):
#it_weights = k.softmax(self.it_weights)
#self.kernel = self.kernel*it_weights
self.kernel = self.kernel*self.it_weights
y = k.conv2d(inputs,
self.kernel,
padding = 'same',
data_format='channels_last')
return y
def compute_output_shape(self, input_shape):
return input_shape
class ScaleLayer(layers.Layer):
def __init__(self, s, **kwargs):
self.s = s
super(ScaleLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape = (1,),
name = 'scale',
initializer=initializers.Constant(value=self.s))
def call(self, inputs):
return inputs*self.kernel
def compute_output_shape(self, input_shape):
return input_shape
================================================
FILE: main_multiscale.py
================================================
import tensorflow as tf
import os, cv2, random, keras
from skimage.measure import compare_ssim
import numpy as np
from model import *
from keras import optimizers
from keras.utils import multi_gpu_model
from math import log10
from utils import *
from datetime import datetime
import time
from PIL import Image
#the image dir of testing input
test_path = 'F:\\dataset\\AIM2019\\Testing\\'
#the image dir of validation groundtruth
valid_gt_path = 'Validation\\clear\\'
#the image dir of validation input
valid_ns_path = 'Validation\\moire\\'
#weight file path
weight_path = 'model/MBCNN_weights.h5'
multi_input = False
multi_output = True
model = MBCNN(64,multi_output) #MBCNN-light: model = MBCNN(32,multi_output)
# Validation or Testing
def validate_ssim(model, gt_list, ns_list, name_list, multi_output=False):
print ("validating... ",datetime.now().strftime('%H:%M:%S'))
psnr = 0
ssim = 0
count = 0
for i in range(len(gt_list)):
count += 1
gt = gt_list[i]
ns = ns_list[i]
dn = model.predict(ns)#[-1]
if multi_output:
dn = dn[-1]
_psnr = 10*log10(1/np.mean((dn-gt)**2))
_ssim = compare_ssim(dn[0],gt[0],multichannel=True)
psnr += _psnr
ssim += _ssim
_gt = dn[0]
_gt[_gt>1] = 1
_gt[_gt<0] = 0
_gt = _gt*255.0
_gt = np.round(_gt).astype(np.uint8)
cv2.imwrite('validation_result/'+name_list[i],_gt)
print (np.round(psnr/count,3),np.round(ssim/count,4))
return psnr/count
def test(model,multi_output=False):
psnr = 0
count = 0
file_list = os.listdir(test_path)
file_list = list_filter(file_list,'.png')
for f in file_list:
ns = cv2.imread(test_path+f)
ns = ns.astype(np.float32)/255.0
ns = ns.reshape((1,)+ns.shape)
start = time.clock()
_gt = model.predict(ns)
if multi_output:
_gt = _gt[-1]
end = time.clock()
print(f, end-start)
_gt = _gt[0]
_gt[_gt>1] = 1
_gt[_gt<0] = 0
_gt = _gt*255.0
_gt = np.round(_gt).astype(np.uint8)
cv2.imwrite('testing_result/'+f,_gt)
#Generating validation datas
def generate_validation(valid_list, multi_input, mode='sub'):
valid_gt_list = []
valid_ns_list = []
name_list = []
_width = 128
for f in valid_list:
_name = os.path.splitext(f)[0]
gt = cv2.imread(valid_gt_path+f)
ns = cv2.imread(valid_ns_path+f)
gt = gt.astype(np.float32)/255.0
ns = ns.astype(np.float32)/255.0
name_list.append(f)
if multi_input:
ns_x2 = cv2.imread(ns_path+'X2\\'+_name+'.png')
ns_x4 = cv2.imread(ns_path+'X4\\'+_name+'.png')
ns_x8 = cv2.imread(ns_path+'X8\\'+_name+'.png')
ns_x2 = ns_x2.astype(np.float32)/255.0
ns_x4 = ns_x4.astype(np.float32)/255.0
ns_x8 = ns_x8.astype(np.float32)/255.0
if mode == 'sub':
for i in range(0,1024,_width):
for j in range(0,1024,_width):
_gt = gt[i:i+_width,j:j+_width]
_ns = ns[i:i+_width,j:j+_width]
_gt = _gt.reshape((1,)+_gt.shape)
_ns = _ns.reshape((1,)+_ns.shape)
valid_gt_list.append(_gt)
valid_ns_list.append(_ns)
if mode == 'full':
gt = gt.reshape((1,)+gt.shape)
ns = ns.reshape((1,)+ns.shape)
valid_gt_list.append(gt)
if multi_input:
ns_x2 = ns_x2.reshape((1,)+ns_x2.shape)
ns_x4 = ns_x4.reshape((1,)+ns_x4.shape)
ns_x8 = ns_x8.reshape((1,)+ns_x8.shape)
valid_ns_list.append([ns,ns_x2,ns_x4,ns_x8])
else:
valid_ns_list.append(ns)
return valid_gt_list, valid_ns_list, name_list
#Training
multi_gpu = False
if multi_gpu:
print ('use multi gpu mode!')
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
else:
os.environ["CUDA_VISIBLE_DEVICES"]="0"
keras.backend.tensorflow_backend.set_session(get_session())
model.summary()
#exit(0)
valid_list = os.listdir(valid_gt_path)
valid_list = list_filter(valid_list, '.png')
valid_gt_list, valid_ns_list, name_list = generate_validation(valid_list, multi_input, 'full')
model.load_weights(weight_path, by_name = True)
#output validation results
min_loss = validate_ssim(model, valid_gt_list, valid_ns_list, name_list, multi_output)
#output testing results
test(model, multi_output)
exit(0)
================================================
FILE: model.py
================================================
from keras import layers
from keras.models import Model
from keras import backend as K
from core_layers import *
def conv_relu(x, filters, kernel, padding='same', use_bias = True, dilation_rate=1, strides=(1,1)):
if dilation_rate == 0:
y = layers.Conv2D(filters,1,padding=padding,use_bias=use_bias,
activation='relu')(x)
else:
y = layers.Conv2D(filters,kernel,padding=padding,use_bias=use_bias,
dilation_rate=dilation_rate,
strides=strides,
activation='relu')(x)
return y
def conv(x, filters, kernel, padding='same', use_bias=True, dilation_rate=1, strides = (1,1)):
y = layers.Conv2D(filters,kernel,padding=padding,use_bias=use_bias,
dilation_rate=dilation_rate, strides=strides)(x)
return y
def conv_bn_relu(x, filters, kernel, padding='same', use_bias = True, dilation_rate=1):
y = layers.Conv2D(filters,kernel,padding=padding,use_bias=use_bias,
dilation_rate=dilation_rate)(x)
y = layers.BatchNormalization(axis=-1)(y)
y = layers.Activation('relu')(y)
return y
def conv_prelu(x, filters, kernel, padding='same', use_bias=False, dilation_rate=1, strides = (1,1)):
y = layers.Conv2D(filters,kernel,padding=padding,use_bias=use_bias,
dilation_rate=dilation_rate, strides=strides)(x)
y = layers.advanced_activations.PReLU()(y)
return y
def MBCNN(nFilters, multi=True):
conv_func = conv_relu
def pre_block(x, d_list, enbale = True):
t = x
for i in range(len(d_list)):
_t = conv_func(t, nFilters, 3, dilation_rate=d_list[i])
t = layers.Concatenate(axis=-1)([_t,t])
t = conv(t, 64, 3)
t = adaptive_implicit_trans()(t)
t = conv(t,nFilters*2,1)
t = ScaleLayer(s=0.1)(t)
if not enbale:
t = layers.Lambda(lambda x: x*0)(t)
t = layers.Add()([x,t])
return t
def pos_block(x, d_list):
t = x
for i in range(len(d_list)):
_t = conv_func(t, nFilters, 3, dilation_rate=d_list[i])
t = layers.Concatenate(axis=-1)([_t,t])
t = conv_func(t, nFilters*2, 1)
return t
def global_block(x):
t = layers.ZeroPadding2D(padding=(1,1))(x)
t = conv_func(t, nFilters*4, 3, strides=(2,2))
t = layers.GlobalAveragePooling2D()(t)
t = layers.Dense(nFilters*16,activation='relu')(t)
t = layers.Dense(nFilters*8, activation='relu')(t)
t = layers.Dense(nFilters*4)(t)
_t = conv_func(x, nFilters*4, 1)
_t = layers.Multiply()([_t,t])
_t = conv_func(_t, nFilters*2, 1)
return _t
output_list = []
d_list_a = (1,2,3,2,1)
d_list_b = (1,2,3,2,1)
d_list_c = (1,2,2,2,1)
x = layers.Input(shape=(None, None, 3)) #16m*16m
_x = Space2Depth(scale=2)(x)
t1 = conv_func(_x,nFilters*2,3, padding='same') #8m*8m
t1 = pre_block(t1, d_list_a, True)
t2 = layers.ZeroPadding2D(padding=(1,1))(t1)
t2 = conv_func(t2,nFilters*2,3, padding='valid',strides=(2,2)) #4m*4m
t2 = pre_block(t2, d_list_b,True)
t3 = layers.ZeroPadding2D(padding=(1,1))(t2)
t3 = conv_func(t3,nFilters*2,3, padding='valid',strides=(2,2)) #2m*2m
t3 = pre_block(t3,d_list_c, True)
t3 = global_block(t3)
t3 = pos_block(t3, d_list_c)
t3_out = conv(t3, 12, 3)
t3_out = Depth2Space(scale=2)(t3_out) #4m*4m
output_list.append(t3_out)
_t2 = layers.Concatenate()([t3_out,t2])
_t2 = conv_func(_t2, nFilters*2, 1)
_t2 = global_block(_t2)
_t2 = pre_block(_t2, d_list_b,True)
_t2 = global_block(_t2)
_t2 = pos_block(_t2, d_list_b)
t2_out = conv(_t2, 12, 3)
t2_out = Depth2Space(scale=2)(t2_out) #8m*8m
output_list.append(t2_out)
_t1 = layers.Concatenate()([t1, t2_out])
_t1 = conv_func(_t1, nFilters*2, 1)
_t1 = global_block(_t1)
_t1 = pre_block(_t1, d_list_a, True)
_t1 = global_block(_t1)
_t1 = pos_block(_t1, d_list_a)
_t1 = conv(_t1,12,3)
y = Depth2Space(scale=2)(_t1) #16m*16m
output_list.append(y)
if multi != True:
return models.Model(x,y)
else:
return models.Model(x,output_list)
================================================
FILE: utils.py
================================================
import numpy as np
import math, os
from keras import optimizers, backend
import tensorflow as tf
import cv2
def get_Y(x):
r = x[:,:,0]
g = x[:,:,1]
b = x[:,:,2]
y = 0.257*r + 0.504*g + 0.098*b + 0.0627
return y
def calc_PSNR(x,y):
mse = np.mean(np.square(x-y))
psnr = 10*math.log10(1/mse)
return psnr
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config = config)
def list_filter(file_list, tail):
r = []
for f in file_list:
s = os.path.splitext(f)
if s[1] == tail:
r.append(f)
return r
def data_augmentation(x, method):
if method == 0:
return np.rot90(x)
if method == 1:
return np.fliplr(x)
if method == 2:
return np.flipud(x)
if method == 3:
return np.rot90(np.rot90(x))
if method == 4:
return np.rot90(np.fliplr(x))
if method == 5:
return np.rot90(np.flipud(x))
# clear 0.6806, 0.6876, 0.6954
# moire 0.3978, 0.4027, 0.4074
def calc_meanRGB(img_dirs, tail):
file_list = os.listdir(img_dirs)
file_list = list_filter(file_list, tail)
m = np.zeros((3,))
count = 0
for f in file_list:
count += 1
img = cv2.imread(img_dirs+f)
img = img.astype(np.float32)/255.0
m += np.mean(img, axis=(0,1))
_m = m/count
print('%d: %s (%f, %f, %f)'%(count, f, _m[0], _m[1], _m[2]), end='\r')
print(np.round(m/count,4))
def crop(x,scale):
shape = x.shape
h = shape[0]
w = shape[1]
h = h-h%scale
w = w-w%scale
return x[0:h,0:w]
gitextract_hhj1u1mn/ ├── README.md ├── core_layers.py ├── main_multiscale.py ├── model.py └── utils.py
SYMBOL INDEX (33 symbols across 4 files)
FILE: core_layers.py
class Space2Depth (line 9) | class Space2Depth(layers.Layer):
method __init__ (line 10) | def __init__(self, scale, **kwargs):
method call (line 14) | def call(self, inputs, **kwargs):
method compute_output_shape (line 17) | def compute_output_shape(self, input_shape):
class Depth2Space (line 23) | class Depth2Space(layers.Layer):
method __init__ (line 24) | def __init__(self, scale, **kwargs):
method call (line 27) | def call(self, inputs, **kwargs):
method compute_output_shape (line 30) | def compute_output_shape(self, input_shape):
class adaptive_implicit_trans (line 36) | class adaptive_implicit_trans(layers.Layer):
method __init__ (line 37) | def __init__(self, **kwargs):
method build (line 40) | def build(self, input_shape):
method call (line 64) | def call(self, inputs):
method compute_output_shape (line 74) | def compute_output_shape(self, input_shape):
class ScaleLayer (line 77) | class ScaleLayer(layers.Layer):
method __init__ (line 78) | def __init__(self, s, **kwargs):
method build (line 82) | def build(self, input_shape):
method call (line 87) | def call(self, inputs):
method compute_output_shape (line 90) | def compute_output_shape(self, input_shape):
FILE: main_multiscale.py
function validate_ssim (line 29) | def validate_ssim(model, gt_list, ns_list, name_list, multi_output=False):
function test (line 55) | def test(model,multi_output=False):
function generate_validation (line 79) | def generate_validation(valid_list, multi_input, mode='sub'):
FILE: model.py
function conv_relu (line 6) | def conv_relu(x, filters, kernel, padding='same', use_bias = True, dilat...
function conv (line 17) | def conv(x, filters, kernel, padding='same', use_bias=True, dilation_rat...
function conv_bn_relu (line 22) | def conv_bn_relu(x, filters, kernel, padding='same', use_bias = True, di...
function conv_prelu (line 29) | def conv_prelu(x, filters, kernel, padding='same', use_bias=False, dilat...
function MBCNN (line 35) | def MBCNN(nFilters, multi=True):
FILE: utils.py
function get_Y (line 7) | def get_Y(x):
function calc_PSNR (line 14) | def calc_PSNR(x,y):
function get_session (line 19) | def get_session():
function list_filter (line 24) | def list_filter(file_list, tail):
function data_augmentation (line 32) | def data_augmentation(x, method):
function calc_meanRGB (line 48) | def calc_meanRGB(img_dirs, tail):
function crop (line 63) | def crop(x,scale):
Condensed preview — 5 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (17K chars).
[
{
"path": "README.md",
"chars": 2181,
"preview": "# Learnbale_Bandpass_Filter\nImage Demoireing with Learnable Bandpass Filters, CVPR2020\n\nOur extension work is accepted b"
},
{
"path": "core_layers.py",
"chars": 3393,
"preview": "import tensorflow as tf\r\nfrom keras import backend as k\r\nfrom keras.utils import conv_utils\r\nimport numpy as np\r\nfrom ke"
},
{
"path": "main_multiscale.py",
"chars": 4708,
"preview": "import tensorflow as tf\r\nimport os, cv2, random, keras\r\nfrom skimage.measure import compare_ssim\r\nimport numpy as np\r\nfr"
},
{
"path": "model.py",
"chars": 4371,
"preview": "from keras import layers\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\nfrom core_layers import *\r\n\r\nd"
},
{
"path": "utils.py",
"chars": 1623,
"preview": "import numpy as np\r\nimport math, os\r\nfrom keras import optimizers, backend\r\nimport tensorflow as tf\r\nimport cv2\r\n\r\ndef g"
}
]
About this extraction
This page contains the full source code of the zhenngbolun/Learnbale_Bandpass_Filter GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 5 files (15.9 KB), approximately 4.8k tokens, and a symbol index with 33 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.