Giter VIP home page Giter VIP logo

Comments (11)

entrpn avatar entrpn commented on August 16, 2024 2

You can use this with tf2 but you need to clone the project and in classifier.py you need to add to the top of the file

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

And then instead of using keras, you import tf.keras.

Full file change:

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

import os
import cv2
#import keras
import pydload
import logging
import numpy as np
from .video_utils import get_interest_frames_from_video

from PIL import Image as pil_image

if pil_image is not None:
    _PIL_INTERPOLATION_METHODS = {
        "nearest": pil_image.NEAREST,
        "bilinear": pil_image.BILINEAR,
        "bicubic": pil_image.BICUBIC,
    }
    # These methods were only introduced in version 3.4.0 (2016).
    if hasattr(pil_image, "HAMMING"):
        _PIL_INTERPOLATION_METHODS["hamming"] = pil_image.HAMMING
    if hasattr(pil_image, "BOX"):
        _PIL_INTERPOLATION_METHODS["box"] = pil_image.BOX
    # This method is new in version 1.1.3 (2013).
    if hasattr(pil_image, "LANCZOS"):
        _PIL_INTERPOLATION_METHODS["lanczos"] = pil_image.LANCZOS


def load_img(
    path, grayscale=False, color_mode="rgb", target_size=None, interpolation="nearest"
):
    """Loads an image into PIL format.
    
    :param path: Path to image file.
    :param grayscale: DEPRECATED use `color_mode="grayscale"`.
    :param color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
        The desired image format.
    :param target_size: Either `None` (default to original size)
        or tuple of ints `(img_height, img_width)`.
    :param interpolation: Interpolation method used to resample the image if the
        target size is different from that of the loaded image.
        Supported methods are "nearest", "bilinear", and "bicubic".
        If PIL version 1.1.3 or newer is installed, "lanczos" is also
        supported. If PIL version 3.4.0 or newer is installed, "box" and
        "hamming" are also supported. By default, "nearest" is used.
    
    :return: A PIL Image instance.
    """
    if grayscale is True:
        logging.warn("grayscale is deprecated. Please use " 'color_mode = "grayscale"')
        color_mode = "grayscale"
    if pil_image is None:
        raise ImportError(
            "Could not import PIL.Image. " "The use of `load_img` requires PIL."
        )

    if isinstance(path, type("")):
        img = pil_image.open(path)
    else:
        path = cv2.cvtColor(path, cv2.COLOR_BGR2RGB)
        img = pil_image.fromarray(path)

    if color_mode == "grayscale":
        if img.mode != "L":
            img = img.convert("L")
    elif color_mode == "rgba":
        if img.mode != "RGBA":
            img = img.convert("RGBA")
    elif color_mode == "rgb":
        if img.mode != "RGB":
            img = img.convert("RGB")
    else:
        raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
    if target_size is not None:
        width_height_tuple = (target_size[1], target_size[0])
        if img.size != width_height_tuple:
            if interpolation not in _PIL_INTERPOLATION_METHODS:
                raise ValueError(
                    "Invalid interpolation method {} specified. Supported "
                    "methods are {}".format(
                        interpolation, ", ".join(_PIL_INTERPOLATION_METHODS.keys())
                    )
                )
            resample = _PIL_INTERPOLATION_METHODS[interpolation]
            img = img.resize(width_height_tuple, resample)
    return img


def load_images(image_paths, image_size, image_names):
    """
    Function for loading images into numpy arrays for passing to model.predict
    inputs:
        image_paths: list of image paths to load
        image_size: size into which images should be resized
    
    outputs:
        loaded_images: loaded images on which keras model can run predictions
        loaded_image_indexes: paths of images which the function is able to process
    
    """
    loaded_images = []
    loaded_image_paths = []

    for i, img_path in enumerate(image_paths):
        try:
            image = load_img(img_path, target_size=image_size)
            image = tf.keras.preprocessing.image.img_to_array(image)
            image /= 255
            loaded_images.append(image)
            loaded_image_paths.append(image_names[i])
        except Exception as ex:
            logging.exception(f"Error reading {img_path} {ex}", exc_info=True)

    return np.asarray(loaded_images), loaded_image_paths


class Classifier:
    """
        Class for loading model and running predictions.
        For example on how to use take a look the if __name__ == '__main__' part.
    """

    nsfw_model = None

    def __init__(self):
        """
            model = Classifier()
        """
        url = "https://github.com/bedapudi6788/NudeNet/releases/download/v0/classifier_model"
        home = os.path.expanduser("~")
        model_folder = os.path.join(home, ".NudeNet/")
        if not os.path.exists(model_folder):
            os.mkdir(model_folder)

        model_path = os.path.join(model_folder, "classifier")

        if not os.path.exists(model_path):
            print("Downloading the checkpoint to", model_path)
            pydload.dload(url, save_to_path=model_path, max_time=None)

        self.nsfw_model = tf.keras.models.load_model(model_path)

    def classify_video(
        self,
        video_path,
        batch_size=4,
        image_size=(256, 256),
        categories=["unsafe", "safe"],
    ):
        frame_indices = None
        frame_indices, frames, fps, video_length = get_interest_frames_from_video(
            video_path
        )
        logging.debug(
            f"VIDEO_PATH: {video_path}, FPS: {fps}, Important frame indices: {frame_indices}, Video length: {video_length}"
        )

        frames, frame_names = load_images(frames, image_size, image_names=frame_indices)

        if not frame_names:
            return {}

        model_preds = self.nsfw_model.predict(frames, batch_size=batch_size)
        preds = np.argsort(model_preds, axis=1).tolist()

        probs = []
        for i, single_preds in enumerate(preds):
            single_probs = []
            for j, pred in enumerate(single_preds):
                single_probs.append(model_preds[i][pred])
                preds[i][j] = categories[pred]

            probs.append(single_probs)

        return_preds = {
            "metadata": {
                "fps": fps,
                "video_length": video_length,
                "video_path": video_path,
            },
            "preds": {},
        }

        for i, frame_name in enumerate(frame_names):
            return_preds["preds"][frame_name] = {}
            for _ in range(len(preds[i])):
                return_preds["preds"][frame_name][preds[i][_]] = probs[i][_]

        return return_preds

    def classify(
        self,
        image_paths=[],
        batch_size=4,
        image_size=(256, 256),
        categories=["unsafe", "safe"],
    ):
        """
            inputs:
                image_paths: list of image paths or can be a string too (for single image)
                batch_size: batch_size for running predictions
                image_size: size to which the image needs to be resized
                categories: since the model predicts numbers, categories is the list of actual names of categories
        """
        if isinstance(image_paths, str):
            image_paths = [image_paths]

        loaded_images, loaded_image_paths = load_images(
            image_paths, image_size, image_names=image_paths
        )

        if not loaded_image_paths:
            return {}

        model_preds = self.nsfw_model.predict(
            loaded_images, batch_size=batch_size
        )

        preds = np.argsort(model_preds, axis=1).tolist()

        probs = []
        for i, single_preds in enumerate(preds):
            single_probs = []
            for j, pred in enumerate(single_preds):
                single_probs.append(model_preds[i][pred])
                preds[i][j] = categories[pred]

            probs.append(single_probs)

        images_preds = {}

        for i, loaded_image_path in enumerate(loaded_image_paths):
            if not isinstance(loaded_image_path, str):
                loaded_image_path = i

            images_preds[loaded_image_path] = {}
            for _ in range(len(preds[i])):
                images_preds[loaded_image_path][preds[i][_]] = probs[i][_]

        return images_preds


if __name__ == "__main__":
    print(
        '\n Enter path for the keras weights, leave empty to use "./nsfw.299x299.h5" \n'
    )
    weights_path = input().strip()
    if not weights_path:
        weights_path = "../nsfw.299x299.h5"

    m = Classifier()

    while 1:
        print(
            "\n Enter single image path or multiple images seperated by || (2 pipes) \n"
        )
        images = input().split("||")
        images = [image.strip() for image in images]
        print(m.predict(images), "\n")

Btw, thank you for this repo. Saved me so much time.

from nudenet.

Adjenz avatar Adjenz commented on August 16, 2024 1

with https://github.com/notAI-tech/LogoDet/blob/master/requirements.txt these requirements, detector should have worked.

if you don't mind, can you do the following and check once

pip uninstall keras-retinanet
wget https://raw.githubusercontent.com/notAI-tech/LogoDet/master/requirements.txt
pip install -r requirements.txt

Hi mate, I have that issue too.

I installed all the requirements with the good versions. This is what it shows :

>>> from nudenet import NudeClassifier Using TensorFlow backend. c>>> classifier = NudeClassifier() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/adrien/.local/lib/python3.8/site-packages/nudenet/classifier.py", line 139, in __init__ self.nsfw_model = keras.models.load_model(model_path) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/saving.py", line 419, in load_model model = _deserialize_model(f, custom_objects, compile) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/saving.py", line 225, in _deserialize_model model = model_from_config(model_config, custom_objects=custom_objects) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/saving.py", line 458, in model_from_config return deserialize(config, custom_objects=custom_objects) File "/home/adrien/.local/lib/python3.8/site-packages/keras/layers/__init__.py", line 52, in deserialize return deserialize_keras_object(config, File "/home/adrien/.local/lib/python3.8/site-packages/keras/utils/generic_utils.py", line 142, in deserialize_keras_object return cls.from_config( File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/network.py", line 1022, in from_config process_layer(layer_data) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/network.py", line 1007, in process_layer layer = deserialize_layer(layer_data, File "/home/adrien/.local/lib/python3.8/site-packages/keras/layers/__init__.py", line 52, in deserialize return deserialize_keras_object(config, File "/home/adrien/.local/lib/python3.8/site-packages/keras/utils/generic_utils.py", line 147, in deserialize_keras_object return cls.from_config(config['config']) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/base_layer.py", line 1109, in from_config return cls(**config) File "/home/adrien/.local/lib/python3.8/site-packages/keras/legacy/interfaces.py", line 91, in wrapper return func(*args, **kwargs) File "/home/adrien/.local/lib/python3.8/site-packages/keras/engine/input_layer.py", line 84, in __init__ input_tensor = K.placeholder(shape=batch_input_shape, File "/home/adrien/.local/lib/python3.8/site-packages/keras/backend/tensorflow_backend.py", line 517, in placeholder x = tf.placeholder(dtype, shape=shape, name=name) AttributeError: module 'tensorflow' has no attribute 'placeholder'

from nudenet.

bedapudi6788 avatar bedapudi6788 commented on August 16, 2024 1

@matthewgdv @Screamus57 @entrpn from the current release, nudenet doesn't require any version of tensorflow and will work as expected on all newer python versions.

from nudenet.

bedapudi6788 avatar bedapudi6788 commented on August 16, 2024

I would suggest using python 3.6.x and keras==2.2.4, tensorflow==1.14

These are the versions i tested the model with.

from nudenet.

matthewgdv avatar matthewgdv commented on August 16, 2024

Heya @bedapudi6788, thanks for your help!

I've just tried downgrading my version of tensorflow from 2.3.0 to 1.14 as you've suggested, and I'm getting this error:

C:\Users\matthewgdv>pip install tensorflow==1.14 --upgrade
ERROR: Could not find a version that satisfies the requirement tensorflow==1.14 (from versions: 2.2.0rc1, 2.2.0rc2, 2.2.0rc3, 2.2.0rc4, 2.2.0, 2.3.0rc0, 2.3.0rc1, 2.3.0rc2, 2.3.0)
ERROR: No matching distribution found for tensorflow==1.14

Is there any chance the tensorflow team has removed that version? If so, how would you recommend setting up a working environment for NudeNet?

I'm sorry. I realize environment issues are literally the worst and just the most boring and obnoxious thing ever. :(

from nudenet.

bedapudi6788 avatar bedapudi6788 commented on August 16, 2024

I think that's because of the python version being higher.

https://github.com/notAI-tech/LogoDet/blob/master/requirements.txt

These versions might work for your python version.

wget https://raw.githubusercontent.com/notAI-tech/LogoDet/master/requirements.txt
pip install -r requirements.txt

from nudenet.

matthewgdv avatar matthewgdv commented on August 16, 2024

With those versions the NudeClassifier is now working, but the NudeDetector is now erroring like so:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-13-c0ac40df705b> in <module>
----> 1 detector = NudeDetector()

~\Python\portable\python\Lib\site-packages\nudenet\detector.py in __init__(self, model_name)
     76             pydload.dload(classes_url, save_to_path=classes_path, max_time=None)
     77 
---> 78         self.detection_model = models.load_model(
     79             checkpoint_path, backbone_name="resnet50"
     80         )

~\Python\portable\python\Lib\site-packages\keras_retinanet\models\__init__.py in load_model(filepath, backbone_name)
     81     """
     82     import keras.models
---> 83     return keras.models.load_model(filepath, custom_objects=backbone(backbone_name).custom_objects)
     84 
     85 

~\Python\portable\python\Lib\site-packages\keras\engine\saving.py in load_wrapper(*args, **kwargs)
    490                 os.remove(tmp_filepath)
    491             return res
--> 492         return load_function(*args, **kwargs)
    493 
    494     return load_wrapper

~\Python\portable\python\Lib\site-packages\keras\engine\saving.py in load_model(filepath, custom_objects, compile)
    582     if H5Dict.is_supported_type(filepath):
    583         with H5Dict(filepath, mode='r') as h5dict:
--> 584             model = _deserialize_model(h5dict, custom_objects, compile)
    585     elif hasattr(filepath, 'write') and callable(filepath.write):
    586         def load_function(h5file):

~\Python\portable\python\Lib\site-packages\keras\engine\saving.py in _deserialize_model(h5dict, custom_objects, compile)
    272         raise ValueError('No model found in config.')
    273     model_config = json.loads(model_config.decode('utf-8'))
--> 274     model = model_from_config(model_config, custom_objects=custom_objects)
    275     model_weights_group = h5dict['model_weights']
    276 

~\Python\portable\python\Lib\site-packages\keras\engine\saving.py in model_from_config(config, custom_objects)
    625                         '`Sequential.from_config(config)`?')
    626     from ..layers import deserialize
--> 627     return deserialize(config, custom_objects=custom_objects)
    628 
    629 

~\Python\portable\python\Lib\site-packages\keras\layers\__init__.py in deserialize(config, custom_objects)
    163     globs['Model'] = models.Model
    164     globs['Sequential'] = models.Sequential
--> 165     return deserialize_keras_object(config,
    166                                     module_objects=globs,
    167                                     custom_objects=custom_objects,

~\Python\portable\python\Lib\site-packages\keras\utils\generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
    142             custom_objects = custom_objects or {}
    143             if has_arg(cls.from_config, 'custom_objects'):
--> 144                 return cls.from_config(
    145                     config['config'],
    146                     custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +

~\Python\portable\python\Lib\site-packages\keras\engine\network.py in from_config(cls, config, custom_objects)
   1073                         node_data = node_data_list[node_index]
   1074                         try:
-> 1075                             process_node(layer, node_data)
   1076 
   1077                         # If the node does not have all inbound layers

~\Python\portable\python\Lib\site-packages\keras\engine\network.py in process_node(layer, node_data)
   1023             # and building the layer if needed.
   1024             if input_tensors:
-> 1025                 layer(unpack_singleton(input_tensors), **kwargs)
   1026 
   1027         def process_layer(layer_data):

~\Python\portable\python\Lib\site-packages\keras\backend\tensorflow_backend.py in symbolic_fn_wrapper(*args, **kwargs)
     73         if _SYMBOLIC_SCOPE.value:
     74             with get_graph().as_default():
---> 75                 return func(*args, **kwargs)
     76         else:
     77             return func(*args, **kwargs)

~\Python\portable\python\Lib\site-packages\keras\engine\base_layer.py in __call__(self, inputs, **kwargs)
    487             # Actually call the layer,
    488             # collecting output(s), mask(s), and shape(s).
--> 489             output = self.call(inputs, **kwargs)
    490             output_mask = self.compute_mask(inputs, previous_mask)
    491 

~\Python\portable\python\Lib\site-packages\keras_retinanet\layers\_misc.py in call(self, inputs, **kwargs)
    107             return output
    108         else:
--> 109             return backend.resize_images(source, (target_shape[1], target_shape[2]), method='nearest')
    110 
    111     def compute_output_shape(self, input_shape):

~\Python\portable\python\Lib\site-packages\keras_retinanet\backend\tensorflow_backend.py in resize_images(images, size, method, align_corners)
     66         'area'    : tensorflow.image.ResizeMethod.AREA,
     67     }
---> 68     return tensorflow.image.resize_images(images, size, methods[method], align_corners)
     69 
     70 

AttributeError: module 'tensorflow._api.v2.image' has no attribute 'resize_images'

I'll try and set up a Python 3.6 environment tomorrow to test it out. That said, I do suspect that it will work and that it's down to spooky interactions between the Keras/Tensorflow dependencies on newer versions of Python.

It's your call of course whether you want to support newer versions of the language or not, but I'd be really grateful if you did. I pretty much religiously upgrade my environments to the newest interpreter version usually within a month of release (unless some core dependencies I use break and need more time to catch up), and I dislike spinning up virtualenvs on older versions, since I hate having to consciously censor my code and avoid using awesome new language features (the most recent awesome one was assignment expressions <3 ) just to accomodate a dependancy.

Granted, not everyone is like me, and you for sure don't have to cater to us, but it would be awesome if you did streamline the process of using NudeNet with an up-to-date interpreter. I think over time it might also give the project more longevity.

Anyways, thanks again!

from nudenet.

bedapudi6788 avatar bedapudi6788 commented on August 16, 2024

with https://github.com/notAI-tech/LogoDet/blob/master/requirements.txt these requirements, detector should have worked.

if you don't mind, can you do the following and check once

pip uninstall keras-retinanet
wget https://raw.githubusercontent.com/notAI-tech/LogoDet/master/requirements.txt
pip install -r requirements.txt

from nudenet.

Adjenz avatar Adjenz commented on August 16, 2024

Same thing on Google Colab :

`AttributeError Traceback (most recent call last)
in ()
----> 1 classifier = NudeClassifier()

13 frames
/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in placeholder(shape, ndim, dtype, sparse, name)
515 x = tf.sparse_placeholder(dtype, shape=shape, name=name)
516 else:
--> 517 x = tf.placeholder(dtype, shape=shape, name=name)
518 x._keras_shape = shape
519 x._uses_learning_phase = False

AttributeError: module 'tensorflow' has no attribute 'placeholder'`

from nudenet.

bedapudi6788 avatar bedapudi6788 commented on August 16, 2024

Sorry, at the moment you will have to use it in a venv with tf 1.14 and keras 2.24. Or, using it as rest API via docker is a good idea if you don't want to create new venv.

I will try to support newer versions in future.

from nudenet.

Adjenz avatar Adjenz commented on August 16, 2024

Sorry, at the moment you will have to use it in a venv with tf 1.14 and keras 2.24. Or, using it as rest API via docker is a good idea if you don't want to create new venv.

I will try to support newer versions in future.

Ok then I'll just learn how to use Docker. Thanks mate.

from nudenet.

Related Issues (20)

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.