Skip to content
Snippets Groups Projects
Select Git revision
  • 1c06b8099f3f3ae51a5fd9d846049d66479903b7
  • master default protected
  • 67-intend-get-area-of-research
  • 35-enhancing-rasa-recognizing-lower-case-entities
  • 66-docker-refactor-dockerfile-to-reduce-number-of-layers
  • 48-create-get_date_of_death-function
  • 34-refactoring-rasa-documentation-naming
  • 12-rasa-trainigsdaten-format
  • 12-rasa-trainingsdaten-format
  • v1.1
  • v1.0
11 results

policy.py

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    policy.py 2.12 KiB
    import logging
    
    from rasa_core.policies.keras_policy import KerasPolicy
    
    logger = logging.getLogger(__name__)
    
    
    class PhysicistPolicy(KerasPolicy):
        def model_architecture(self, input_shape, output_shape):
            """Build a Keras model and return a compiled model."""
            from tensorflow.keras.models import Sequential
            from tensorflow.keras.layers import \
                Masking, LSTM, Dense, TimeDistributed, Activation
    
            # Build Model
            model = Sequential()
    
            # the shape of the y vector of the labels,
            # determines which output from rnn will be used
            # to calculate the loss
            if len(output_shape) == 1:
                # y is (num examples, num features) so
                # only the last output from the rnn is used to
                # calculate the loss
                model.add(Masking(mask_value=-1, input_shape=input_shape))
                model.add(LSTM(self.rnn_size))
                model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1]))
            elif len(output_shape) == 2:
                # y is (num examples, max_dialogue_len, num features) so
                # all the outputs from the rnn are used to
                # calculate the loss, therefore a sequence is returned and
                # time distributed layer is used
    
                # the first value in input_shape is max dialogue_len,
                # it is set to None, to allow dynamic_rnn creation
                # during prediction
                model.add(Masking(mask_value=-1,
                                  input_shape=(None, input_shape[1])))
                model.add(LSTM(self.rnn_size, return_sequences=True))
                model.add(TimeDistributed(Dense(units=output_shape[-1])))
            else:
                raise ValueError("Cannot construct the model because"
                                 "length of output_shape = {} "
                                 "should be 1 or 2."
                                 "".format(len(output_shape)))
    
            model.add(Activation('softmax'))
    
            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])
    
            logger.debug(model.summary())
            return model