Repository: DCurro/CannyEdgePytorch
Branch: master
Commit: 8eb3157c8fd8
Files: 5
Total size: 8.2 KB
Directory structure:
gitextract_yqimxnw4/
├── .gitignore
├── README.md
├── canny.py
├── net_canny.py
└── requirements.txt
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
.idea/
*.pyc
================================================
FILE: README.md
================================================
# CannyEdgePytorch
Uses PyTorch 0.4.1 and Python 3.7 (but probably works with 2.7 also).
A simple implementation of the Canny Edge Detection Algorithm (currently without hysteresis).
This project was implemented with PyTorch to take advantage of the parallelization of convolutions.
The original image:
<img src="https://github.com/DCurro/CannyEdgePytorch/blob/master/fb_profile.jpg" width="400">
Finding the gradient magnitude:
<img src="https://github.com/DCurro/CannyEdgePytorch/blob/master/gradient_magnitude.png" width="400">
Early thresholding (to show that edge thingging matters):
<img src="https://github.com/DCurro/CannyEdgePytorch/blob/master/thresholded.png" width="400">
And finally, the image after non-maximum supressions:
<img src="https://github.com/DCurro/CannyEdgePytorch/blob/master/final.png" width="400">
================================================
FILE: canny.py
================================================
from scipy.misc import imread, imsave
import torch
from torch.autograd import Variable
from net_canny import Net
def canny(raw_img, use_cuda=False):
img = torch.from_numpy(raw_img.transpose((2, 0, 1)))
batch = torch.stack([img]).float()
net = Net(threshold=3.0, use_cuda=use_cuda)
if use_cuda:
net.cuda()
net.eval()
data = Variable(batch)
if use_cuda:
data = Variable(batch).cuda()
blurred_img, grad_mag, grad_orientation, thin_edges, thresholded, early_threshold = net(data)
imsave('gradient_magnitude.png',grad_mag.data.cpu().numpy()[0,0])
imsave('thin_edges.png', thresholded.data.cpu().numpy()[0, 0])
imsave('final.png', (thresholded.data.cpu().numpy()[0, 0] > 0.0).astype(float))
imsave('thresholded.png', early_threshold.data.cpu().numpy()[0, 0])
if __name__ == '__main__':
img = imread('fb_profile.jpg') / 255.0
# canny(img, use_cuda=False)
canny(img, use_cuda=True)
================================================
FILE: net_canny.py
================================================
import torch
import torch.nn as nn
import numpy as np
from scipy.signal import gaussian
class Net(nn.Module):
def __init__(self, threshold=10.0, use_cuda=False):
super(Net, self).__init__()
self.threshold = threshold
self.use_cuda = use_cuda
filter_size = 5
generated_filters = gaussian(filter_size,std=1.0).reshape([1,filter_size])
self.gaussian_filter_horizontal = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(1,filter_size), padding=(0,filter_size//2))
self.gaussian_filter_horizontal.weight.data.copy_(torch.from_numpy(generated_filters))
self.gaussian_filter_horizontal.bias.data.copy_(torch.from_numpy(np.array([0.0])))
self.gaussian_filter_vertical = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(filter_size,1), padding=(filter_size//2,0))
self.gaussian_filter_vertical.weight.data.copy_(torch.from_numpy(generated_filters.T))
self.gaussian_filter_vertical.bias.data.copy_(torch.from_numpy(np.array([0.0])))
sobel_filter = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
self.sobel_filter_horizontal = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_horizontal.weight.data.copy_(torch.from_numpy(sobel_filter))
self.sobel_filter_horizontal.bias.data.copy_(torch.from_numpy(np.array([0.0])))
self.sobel_filter_vertical = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0]//2)
self.sobel_filter_vertical.weight.data.copy_(torch.from_numpy(sobel_filter.T))
self.sobel_filter_vertical.bias.data.copy_(torch.from_numpy(np.array([0.0])))
# filters were flipped manually
filter_0 = np.array([ [ 0, 0, 0],
[ 0, 1, -1],
[ 0, 0, 0]])
filter_45 = np.array([ [0, 0, 0],
[ 0, 1, 0],
[ 0, 0, -1]])
filter_90 = np.array([ [ 0, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0]])
filter_135 = np.array([ [ 0, 0, 0],
[ 0, 1, 0],
[-1, 0, 0]])
filter_180 = np.array([ [ 0, 0, 0],
[-1, 1, 0],
[ 0, 0, 0]])
filter_225 = np.array([ [-1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 0]])
filter_270 = np.array([ [ 0,-1, 0],
[ 0, 1, 0],
[ 0, 0, 0]])
filter_315 = np.array([ [ 0, 0, -1],
[ 0, 1, 0],
[ 0, 0, 0]])
all_filters = np.stack([filter_0, filter_45, filter_90, filter_135, filter_180, filter_225, filter_270, filter_315])
self.directional_filter = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=filter_0.shape, padding=filter_0.shape[-1] // 2)
self.directional_filter.weight.data.copy_(torch.from_numpy(all_filters[:, None, ...]))
self.directional_filter.bias.data.copy_(torch.from_numpy(np.zeros(shape=(all_filters.shape[0],))))
def forward(self, img):
img_r = img[:,0:1]
img_g = img[:,1:2]
img_b = img[:,2:3]
blur_horizontal = self.gaussian_filter_horizontal(img_r)
blurred_img_r = self.gaussian_filter_vertical(blur_horizontal)
blur_horizontal = self.gaussian_filter_horizontal(img_g)
blurred_img_g = self.gaussian_filter_vertical(blur_horizontal)
blur_horizontal = self.gaussian_filter_horizontal(img_b)
blurred_img_b = self.gaussian_filter_vertical(blur_horizontal)
blurred_img = torch.stack([blurred_img_r,blurred_img_g,blurred_img_b],dim=1)
blurred_img = torch.stack([torch.squeeze(blurred_img)])
grad_x_r = self.sobel_filter_horizontal(blurred_img_r)
grad_y_r = self.sobel_filter_vertical(blurred_img_r)
grad_x_g = self.sobel_filter_horizontal(blurred_img_g)
grad_y_g = self.sobel_filter_vertical(blurred_img_g)
grad_x_b = self.sobel_filter_horizontal(blurred_img_b)
grad_y_b = self.sobel_filter_vertical(blurred_img_b)
# COMPUTE THICK EDGES
grad_mag = torch.sqrt(grad_x_r**2 + grad_y_r**2)
grad_mag += torch.sqrt(grad_x_g**2 + grad_y_g**2)
grad_mag += torch.sqrt(grad_x_b**2 + grad_y_b**2)
grad_orientation = (torch.atan2(grad_y_r+grad_y_g+grad_y_b, grad_x_r+grad_x_g+grad_x_b) * (180.0/3.14159))
grad_orientation += 180.0
grad_orientation = torch.round( grad_orientation / 45.0 ) * 45.0
# THIN EDGES (NON-MAX SUPPRESSION)
all_filtered = self.directional_filter(grad_mag)
inidices_positive = (grad_orientation / 45) % 8
inidices_negative = ((grad_orientation / 45) + 4) % 8
height = inidices_positive.size()[2]
width = inidices_positive.size()[3]
pixel_count = height * width
pixel_range = torch.FloatTensor([range(pixel_count)])
if self.use_cuda:
pixel_range = torch.cuda.FloatTensor([range(pixel_count)])
indices = (inidices_positive.view(-1).data * pixel_count + pixel_range).squeeze()
channel_select_filtered_positive = all_filtered.view(-1)[indices.long()].view(1,height,width)
indices = (inidices_negative.view(-1).data * pixel_count + pixel_range).squeeze()
channel_select_filtered_negative = all_filtered.view(-1)[indices.long()].view(1,height,width)
channel_select_filtered = torch.stack([channel_select_filtered_positive,channel_select_filtered_negative])
is_max = channel_select_filtered.min(dim=0)[0] > 0.0
is_max = torch.unsqueeze(is_max, dim=0)
thin_edges = grad_mag.clone()
thin_edges[is_max==0] = 0.0
# THRESHOLD
thresholded = thin_edges.clone()
thresholded[thin_edges<self.threshold] = 0.0
early_threshold = grad_mag.clone()
early_threshold[grad_mag<self.threshold] = 0.0
assert grad_mag.size() == grad_orientation.size() == thin_edges.size() == thresholded.size() == early_threshold.size()
return blurred_img, grad_mag, grad_orientation, thin_edges, thresholded, early_threshold
if __name__ == '__main__':
Net()
================================================
FILE: requirements.txt
================================================
numpy==1.15.3
Pillow==5.3.0
scipy==1.1.0
six==1.11.0
torch==0.4.1
torchvision==0.2.1
gitextract_yqimxnw4/ ├── .gitignore ├── README.md ├── canny.py ├── net_canny.py └── requirements.txt
SYMBOL INDEX (4 symbols across 2 files)
FILE: canny.py
function canny (line 7) | def canny(raw_img, use_cuda=False):
FILE: net_canny.py
class Net (line 7) | class Net(nn.Module):
method __init__ (line 8) | def __init__(self, threshold=10.0, use_cuda=False):
method forward (line 74) | def forward(self, img):
Condensed preview — 5 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (9K chars).
[
{
"path": ".gitignore",
"chars": 13,
"preview": ".idea/\n*.pyc\n"
},
{
"path": "README.md",
"chars": 838,
"preview": "# CannyEdgePytorch\n\nUses PyTorch 0.4.1 and Python 3.7 (but probably works with 2.7 also).\n\nA simple implementation of th"
},
{
"path": "canny.py",
"chars": 959,
"preview": "from scipy.misc import imread, imsave\nimport torch\nfrom torch.autograd import Variable\nfrom net_canny import Net\n\n\ndef c"
},
{
"path": "net_canny.py",
"chars": 6529,
"preview": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom scipy.signal import gaussian\n\n\nclass Net(nn.Module):\n def "
},
{
"path": "requirements.txt",
"chars": 85,
"preview": "numpy==1.15.3\nPillow==5.3.0\nscipy==1.1.0\nsix==1.11.0\ntorch==0.4.1\ntorchvision==0.2.1\n"
}
]
About this extraction
This page contains the full source code of the DCurro/CannyEdgePytorch GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 5 files (8.2 KB), approximately 2.3k tokens, and a symbol index with 4 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.