Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
101 commits
Select commit Hold shift + click to select a range
d9a20cb
Update framework.py
usb1508 Oct 7, 2019
1e14be5
Created using Colaboratory
usb1508 Oct 7, 2019
4c90f03
Using torch.sigmoid instead of nn.functoinal.sigmoid
usb1508 Oct 7, 2019
d90747e
Adding Single train image
usb1508 Nov 11, 2019
62842d8
Adding Single test Image
usb1508 Nov 11, 2019
e723318
Converting code for Python 3
usb1508 Nov 11, 2019
23d25a3
Update train.py
usb1508 Nov 11, 2019
126c741
Update framework.py
usb1508 Nov 11, 2019
71d8dd0
Project Parivartan
usb1508 Nov 11, 2019
78d3a44
Making train images = 4
usb1508 Nov 11, 2019
3be1235
Update train.py
usb1508 Nov 11, 2019
3207633
Update data.py
usb1508 Nov 11, 2019
93635d3
Update train.py
usb1508 Nov 11, 2019
021573d
Update train.py
usb1508 Nov 11, 2019
b21e71b
Update train.py
usb1508 Nov 11, 2019
119ddfa
Using List Comprehension instead of Map
usb1508 Nov 11, 2019
56d9ad5
Update data.py
usb1508 Nov 11, 2019
9d3403b
Update train.py
usb1508 Nov 11, 2019
8740386
Update data.py
usb1508 Nov 11, 2019
bde6442
Saving after every 10 epochs
usb1508 Nov 23, 2019
e730cb8
Update train.py
usb1508 Nov 23, 2019
0d6134b
Saving models after every 10 iterations
usb1508 Nov 23, 2019
cb921e0
Update train.py
usb1508 Nov 23, 2019
310e21e
Update train.py
usb1508 Nov 23, 2019
1bae650
Update train.py
usb1508 Nov 23, 2019
757e587
Saving model to google drive
usb1508 Nov 23, 2019
7ce257a
Update train.py
usb1508 Nov 23, 2019
99ac63d
Update train.py
usb1508 Nov 23, 2019
c5b51ad
Testing
usb1508 Nov 24, 2019
42832b7
test
usb1508 Nov 24, 2019
c326d37
Update train.py
usb1508 Nov 24, 2019
af0b5de
Update train.py
usb1508 Nov 24, 2019
a30863b
Update train.py
usb1508 Nov 24, 2019
b882a19
Update train.py
usb1508 Nov 24, 2019
1207d9e
Update framework.py
usb1508 Nov 24, 2019
5407e89
Update framework.py
usb1508 Nov 24, 2019
e665da4
Update train.py
usb1508 Nov 24, 2019
4b41c46
Update train.py
usb1508 Nov 24, 2019
f89c4e1
Update framework.py
usb1508 Nov 24, 2019
57a872e
Update train.py
usb1508 Nov 24, 2019
225138f
Update framework.py
usb1508 Nov 24, 2019
a4f1b83
Update train.py
usb1508 Nov 24, 2019
e6289cb
Update train.py
usb1508 Nov 26, 2019
0c7551c
Update test.py
usb1508 Nov 29, 2019
4db69fd
Update test.py
usb1508 Nov 29, 2019
d000258
Update train.py
usb1508 Nov 29, 2019
c86dc35
Update train.py
usb1508 Nov 29, 2019
d5197f0
Update train.py
usb1508 Nov 29, 2019
0138860
Update train.py
usb1508 Nov 29, 2019
874e2e9
Update train.py
usb1508 Nov 29, 2019
17c5ec7
Update framework.py
usb1508 Nov 29, 2019
bfcca10
Update train.py
usb1508 Nov 29, 2019
26b19e9
Update train.py
usb1508 Nov 29, 2019
7f7d0aa
Update train.py
usb1508 Nov 29, 2019
443b834
Update train.py
usb1508 Nov 29, 2019
97e463c
Create non_local.py
usb1508 Nov 29, 2019
b7ee0e3
Update dinknet.py
usb1508 Nov 29, 2019
8510f8c
Update dinknet.py
usb1508 Nov 29, 2019
f86435c
Update test.py
usb1508 Nov 29, 2019
dba4652
Update test.py
usb1508 Nov 29, 2019
8b64e93
Update test.py
usb1508 Nov 29, 2019
0b1c0e0
Update framework.py
usb1508 Nov 29, 2019
d7c870b
Update test.py
usb1508 Nov 29, 2019
7b9a4dd
Update README.md
usb1508 Nov 29, 2019
5b8d380
Update train.py
usb1508 Nov 29, 2019
5e96ecb
Update train.py
usb1508 Nov 29, 2019
84f3f2f
Update framework.py
usb1508 Nov 29, 2019
f04eead
Update train.py
usb1508 Nov 29, 2019
4311bdb
Update framework.py
usb1508 Nov 29, 2019
2f8dda5
Update framework.py
usb1508 Nov 29, 2019
2f88bca
Update train.py
usb1508 Nov 29, 2019
d9bc991
Update framework.py
usb1508 Nov 29, 2019
d2a38cf
Update train.py
usb1508 Nov 29, 2019
f369301
Update framework.py
usb1508 Nov 29, 2019
744a4e1
changing to previous version:
usb1508 Nov 29, 2019
c940344
Update framework.py
usb1508 Nov 29, 2019
f84040e
Update train.py
usb1508 Nov 29, 2019
77a9b39
Update test.py
usb1508 Nov 29, 2019
3eba29a
Update test.py
usb1508 Nov 29, 2019
bc4fc2d
Update train.py
usb1508 Nov 29, 2019
1debd92
Update train.py
usb1508 Nov 29, 2019
2b674cb
Update train.py
usb1508 Nov 29, 2019
232bae1
Update train.py
usb1508 Nov 29, 2019
3616d30
Changing the load function
usb1508 Nov 30, 2019
cd04fc5
Removing dataParallel from net()
usb1508 Nov 30, 2019
0e2cf2d
TypeError: 'DataParallel' object is not subscriptable
usb1508 Nov 30, 2019
16d88c8
Loading entire_model.pth
usb1508 Nov 30, 2019
2640cc2
Changing the load method
usb1508 Nov 30, 2019
f2e6f02
update test.py
usb1508 Nov 30, 2019
7ef12c7
update
usb1508 Nov 30, 2019
a3f5f65
update
usb1508 Nov 30, 2019
e40fa09
using model.pt
usb1508 Nov 30, 2019
bb01845
update
usb1508 Nov 30, 2019
f6aac01
checking if image loaded is correct
usb1508 Nov 30, 2019
34b0c77
update
usb1508 Nov 30, 2019
1405b8f
update
usb1508 Nov 30, 2019
af51bd2
update
usb1508 Nov 30, 2019
dbddbc6
Testing images with actual results
usb1508 Nov 30, 2019
02c70eb
Added test images for comapring with the paper
usb1508 Dec 5, 2019
3a26712
Update
usb1508 Dec 5, 2019
9262c4c
Documented code more properly
usb1508 Dec 7, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Code for the 1st place solution in [DeepGlobe Road Extraction Challenge](https:/
# Requirements

- Cuda 8.0
- Python 2.7
- Python 3.7
- Pytorch 0.2.0
- cv2

Expand All @@ -17,10 +17,17 @@ Data is from [DeepGlobe Road Extraction Challenge](https://competitions.codalab.

### Train
- Run `python train.py` to train the default D-LinkNet34.
- Run `python train.py location_of_pretrained_model_file_to_start_training_from_the_given_point`
### Example
`python train.py /content/gdrive/My Drive/model.pt`
or
`python train.py`

### Predict
- Run `python test.py` to predict on the default D-LinkNet34.
Predicting two possible image sets

- Run `python test.py valid` to predict on the default D-LinkNet34 from the validation dataset.
- Run `python test.py test` to predict on the default D-LinkNet34 from the testing dataset.

### Download trained D-LinkNet34
- [Dropbox](https://www.dropbox.com/sh/h62vr320eiy57tt/AAB5Tm43-efmtYzW_GFyUCfma?dl=0)
- [百度网盘](https://pan.baidu.com/s/1wqyOEkw5o0bzbuj7gBMesQ)
225 changes: 225 additions & 0 deletions Untitled11.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Untitled11.ipynb",
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/utkarsh1508/DeepGlobe-Road-Extraction-Challenge/blob/master/Untitled11.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"id": "bLq-J42GN56S",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 125
},
"outputId": "4bb65494-300c-49b2-a5aa-2834b8a1d51f"
},
"source": [
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n",
"\n",
"Enter your authorization code:\n",
"··········\n",
"Mounted at /content/gdrive\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "Pb8teP-5j_AR",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 123
},
"outputId": "8b56f87c-cb87-407b-f33b-dbcb865813a5"
},
"source": [
"!git clone https://github.com/utkarsh1508/DeepGlobe-Road-Extraction-Challenge.git"
],
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"text": [
"Cloning into 'DeepGlobe-Road-Extraction-Challenge'...\n",
"remote: Enumerating objects: 3, done.\u001b[K\n",
"remote: Counting objects: 100% (3/3), done.\u001b[K\n",
"remote: Compressing objects: 100% (3/3), done.\u001b[K\n",
"remote: Total 38 (delta 0), reused 0 (delta 0), pack-reused 35\u001b[K\n",
"Unpacking objects: 100% (38/38), done.\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "uH6kwDALNuoT",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"outputId": "c277dd01-e88c-49ad-95b0-87bc657080d7"
},
"source": [
"%cd /content/gdrive/My\\ Drive/Dataset"
],
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"text": [
"/content/gdrive/My Drive/Dataset\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "lEMdtWPOINLm",
"colab_type": "code",
"colab": {}
},
"source": [
"!cp road-train-1.v2.zip /content/DeepGlobe-Road-Extraction-Challenge/dataset/train.zip\n",
"# !cp road-train-2+valid.v2.zip /content/DeepGlobe-Road-Extraction-Challenge/dataset/valid.zip\n",
"# !cp road_test_sat.zip /content/DeepGlobe-Road-Extraction-Challenge/dataset/test.zip"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "AC7zovqHQ4se",
"colab_type": "code",
"outputId": "f06cdeae-ebae-43d2-9401-71e3454e8669",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"%cd /content/DeepGlobe-Road-Extraction-Challenge/dataset/"
],
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"text": [
"/content/DeepGlobe-Road-Extraction-Challenge/dataset\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "sDI-VXnMSxpn",
"colab_type": "code",
"colab": {}
},
"source": [
"!unzip -q train.zip\n",
"# !unzip -qd test test.zip \n",
"# !unzip -q valid valid.zip"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "p4gyA36cW-CM",
"colab_type": "code",
"outputId": "de091696-e0cf-4fdf-f8aa-f539df9bed1b",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"%cd /content/DeepGlobe-Road-Extraction-Challenge/"
],
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"text": [
"/content/DeepGlobe-Road-Extraction-Challenge\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "SY7YYYUdXcWb",
"colab_type": "code",
"outputId": "8cd37e00-62a6-464a-db2c-364a45afc781",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 188
}
},
"source": [
"!python2 train.py"
],
"execution_count": 0,
"outputs": [
{
"output_type": "stream",
"text": [
"Downloading: \"https://download.pytorch.org/models/resnet34-333f7ec4.pth\" to /root/.cache/torch/checkpoints/resnet34-333f7ec4.pth\n",
"100% 83.3M/83.3M [00:00<00:00, 114MB/s]\n",
"/usr/local/lib/python2.7/dist-packages/torch/nn/functional.py:1350: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n",
" warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n",
"********\n",
"epoch: 1 time: 2195\n",
"train_loss: tensor(0.5730, device='cuda:0')\n",
"SHAPE: (1024, 1024)\n"
],
"name": "stdout"
}
]
}
]
}
2 changes: 1 addition & 1 deletion data.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,4 +129,4 @@ def __getitem__(self, index):
return img, mask

def __len__(self):
return len(self.ids)
return len(self.ids)
Binary file added dataset/test/206_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file removed dataset/test/test_image
Empty file.
Binary file added dataset/train/104_mask.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/104_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/113_mask.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/113_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/343_mask.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/343_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/388_mask.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/train/388_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file removed dataset/train/train_image
Empty file.
Empty file removed dataset/train/train_mask
Empty file.
Binary file added dataset/valid/19438_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/valid/457490_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/valid/6477_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dataset/valid/812363_sat.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Empty file removed dataset/valid/valid_image
Empty file.
58 changes: 38 additions & 20 deletions framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import cv2
import numpy as np

# Creating a generic Framework class.
class MyFrame():
def __init__(self, net, loss, lr=2e-4, evalmode = False):
self.net = net().cuda()
Expand All @@ -17,66 +17,84 @@ def __init__(self, net, loss, lr=2e-4, evalmode = False):
for i in self.net.modules():
if isinstance(i, nn.BatchNorm2d):
i.eval()

def set_input(self, img_batch, mask_batch=None, img_id=None):
self.img = img_batch
self.mask = mask_batch
self.img_id = img_id

def test_one_img(self, img):
pred = self.net.forward(img)

pred[pred>0.5] = 1
pred[pred<=0.5] = 0

mask = pred.squeeze().cpu().data.numpy()
return mask



def test_batch(self):
self.forward(volatile=True)
mask = self.net.forward(self.img).cpu().data.numpy().squeeze(1)
mask[mask>0.5] = 1
mask[mask<=0.5] = 0

return mask, self.img_id

def test_one_img_from_path(self, path):
img = cv2.imread(path)
img = np.array(img, np.float32)/255.0 * 3.2 - 1.6
img = V(torch.Tensor(img).cuda())

mask = self.net.forward(img).squeeze().cpu().data.numpy()#.squeeze(1)
mask[mask>0.5] = 1
mask[mask<=0.5] = 0

return mask

def forward(self, volatile=False):
self.img = V(self.img.cuda(), volatile=volatile)
if self.mask is not None:
self.mask = V(self.mask.cuda(), volatile=volatile)


## OPTIMIZE: using the mentioned optimizer
def optimize(self):
self.forward()
self.optimizer.zero_grad()
pred = self.net.forward(self.img)
loss = self.loss(self.mask, pred)
loss.backward()
self.optimizer.step()
return loss.data[0]

def save(self, path):
torch.save(self.net.state_dict(), path)

return loss.data

# Saving the complete model
# No need to use this function
def save_full(self, path):
torch.save(self.net, path)

# Method to save the model for general checkpoint.
# Model_state_dict contanis all the required weights.
# Epoch is the iteration number
# Loss value is saved too.
# Optimizer state dict saves all the gradients of the model
def save(self, path, epoch,losss):
torch.save({
'epoch' : epoch,
'model_state_dict' : self.net.state_dict(),
'optimizer_state_dict' : self.optimizer.state_dict(),
'loss' : losss}, path)

def load(self, path):
self.net.load_state_dict(torch.load(path))

checkpoint = torch.load(path)
self.net.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#Method to update the Learning rate
def update_lr(self, new_lr, mylog, factor=False):
if factor:
new_lr = self.old_lr / new_lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr

print >> mylog, 'update learning rate: %f -> %f' % (self.old_lr, new_lr)
print 'update learning rate: %f -> %f' % (self.old_lr, new_lr)
print('update learning rate: %f -> %f' % (self.old_lr, new_lr), file = mylog)
print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
self.old_lr = new_lr
Loading