68747470733a2f2f692e696d6775722e636f6d2f336763424b71322e6a7067.jpg

    最近有一个比较火的项目,是将图片转换成黑白的简笔画,非常有意思,笔者尝试运行了一下这个项目,在此做一些分享,由于网络等原因,我会将该项目涉及的资源分享出来,希望能够得到大家的支持。

    首先是下载这个项目

    1. git clone https://github.com/vijishmadhavan/ArtLine.git ArtLine

    然后安装一些必要的库

    1. pip install -r colab_requirements.txt

    运行主要的代码,该代码可以将一张图片(图片 url 或者本地图片的格式)转换成简笔画效果。

    在运行这个项目的时候,会出现几个问题:第一,由于网络问题,会导致模型下载失败,所以建议先下载下来,笔者已经将模型下载下来,大家可以在文末获取;第二:该项目最开始的代码仅支持 url 图片预测,笔者进行改进并将预测的结果进行展示和保存。

    1. import fastai
    2. from fastai.vision import *
    3. from fastai.utils.mem import *
    4. from fastai.vision import open_image, load_learner, image, torch
    5. import numpy as np
    6. import urllib.request
    7. import PIL.Image
    8. from io import BytesIO
    9. import torchvision.transforms as T
    10. from PIL import Image
    11. import requests
    12. from io import BytesIO
    13. import fastai
    14. from fastai.vision import *
    15. from fastai.utils.mem import *
    16. from fastai.vision import open_image, load_learner, image, torch
    17. import numpy as np
    18. import urllib.request
    19. import PIL.Image
    20. from io import BytesIO
    21. import torchvision.transforms as T
    22. import cv2
    23. class FeatureLoss(nn.Module):
    24. def __init__(self, m_feat, layer_ids, layer_wgts):
    25. super().__init__()
    26. self.m_feat = m_feat
    27. self.loss_features = [self.m_feat[i] for i in layer_ids]
    28. self.hooks = hook_outputs(self.loss_features, detach=False)
    29. self.wgts = layer_wgts
    30. self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))
    31. ] + [f'gram_{i}' for i in range(len(layer_ids))]
    32. def make_features(self, x, clone=False):
    33. self.m_feat(x)
    34. return [(o.clone() if clone else o) for o in self.hooks.stored]
    35. def forward(self, input, target):
    36. out_feat = self.make_features(target, clone=True)
    37. in_feat = self.make_features(input)
    38. self.feat_losses = [base_loss(input,target)]
    39. self.feat_losses += [base_loss(f_in, f_out)*w
    40. for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
    41. self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
    42. for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
    43. self.metrics = dict(zip(self.metric_names, self.feat_losses))
    44. return sum(self.feat_losses)
    45. def __del__(self): self.hooks.remove()
    46. # MODEL_URL = "https://www.dropbox.com/s/starqc9qd2e1lg1/ArtLine_650.pkl?dl=1"
    47. # urllib.request.urlretrieve(MODEL_URL, "ArtLine_650.pkl")
    48. path = Path(".")
    49. learn=load_learner(path, 'ArtLine_650.pkl')
    50. ################### STRAT 通过 url 获取图片并进行预测 ###################
    51. # url = 'https://pic2.zhimg.com/v2-69fad2bf3b67c2418d6d26fb9f3277b0_1440w.jpg'
    52. # response = requests.get(url)
    53. # img = PIL.Image.open(BytesIO(response.content)).convert("RGB")
    54. ################### END 通过 url 获取图片并进行预测 ###################
    55. ################### STRAT 通过读取本地图片并进行预测 ###################
    56. img = PIL.Image.open("4871730978936137066.jpg").convert("RGB")
    57. ################### END 通过读取本地图片并进行预测 ###################
    58. img_t = T.ToTensor()(img)
    59. img_fast = Image(img_t)
    60. show_image(img_fast, figsize=(8,8), interpolation='nearest');
    61. p,img_hr,b = learn.predict(img_fast)
    62. Image(img_hr).show(figsize=(8,8)) # 显示图片
    63. p.save("result.jpg")# 保存结果

    ArtLine.ipynb