
Meezo ML
الأعضاء-
المساهمات
197 -
تاريخ الانضمام
-
تاريخ آخر زيارة
نوع المحتوى
ريادة الأعمال
البرمجة
التصميم
DevOps
التسويق والمبيعات
العمل الحر
البرامج والتطبيقات
آخر التحديثات
قصص نجاح
أسئلة وأجوبة
كتب
دورات
كل منشورات العضو Meezo ML
-
قمت ببناء نموذج تصنيف متعدد على مجمةعة بيانات x-ray scans لكن لا أفهم سبب الخطأ التالي في الكود: # الكود model = Sequential() model.add(Conv2D(512, (3, 3), input_shape=X_train.shape[1:])) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3))) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(1000)) model.add(Dense(64)) model.add(Dense(6)) model.add(Activation('softmax')) model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc']) model.fit(X_train, y_label, batch_size=64, epochs=20, validation_split=0.2) ------------------------------------------------------------------------------------------------- ValueError: Shapes (None, 1) and (None, 6) are incompatible
-
قمت ببناء نموذج Baseline لتصنيف الأصوات، الداتاسيت التي أعمل عليها هي مجموعة بيانات صوتية، أبعادها (400,50,95) # النموذج model = Sequential() model.add(LSTM(64, input_shape = (50,95,1), return_sequences=True)) model.add(LSTM(32,activation='relu')) model.add(Dense(4, activation='softmax')) model.compile(optimizer='rmsprop',loss='categorical_crossentropy', metrics=["acc"]) model.summary() model.fit(X_train, y_train, batch_size=64, nb_epoch=20, validation_data=(X_test, y_test)) لكن يظهر لي الخطأ: ValueError: Shapes (None, 1) and (None, 4) are incompatible
-
اعمل على تصنيف الصور minst في keras على google colab ولكن قمت ببعض التعديلات على الداتا أي فقط صنفين 0 أو 1 ولكن يظهر لي الخطأ التالي الكود: import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense , Activation, Dropout from keras.models import Sequential from keras.utils.np_utils import to_categorical from keras.datasets import mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() y_train[y_train<=5]=0 y_train[y_train>5]=1 y_train=y_train.astype('int64') print(x_train.shape, y_train.shape) image_size = x_train.shape[1] input_size = image_size * image_size x_train = np.reshape(x_train, [-1, input_size]) x_train = x_train.astype('float32') / 255 x_test = np.reshape(x_test, [-1, input_size]) x_test = x_test.astype('float32') / 255 batch_size = 32 hidden_units = 256 model = Sequential() model.add(Dense(hidden_units, input_shape=(input_size,))) model.add(Activation('relu')) model.add(Dropout(0.45)) model.add(Dense(hidden_units)) model.add(Activation('relu')) model.add(Dropout(0.45)) model.add(Dense(2)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=5, batch_size=batch_size) الخطأ (60000, 28, 28) (60000,) Epoch 1/5 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-22-3c7ca6e80a5b> in <module>() 32 model.add(Activation('softmax')) 33 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) ---> 34 model.fit(x_train, y_train, epochs=5, batch_size=batch_size) 9 frames /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, kwargs) 984 except Exception as e: # pylint:disable=broad-except 985 if hasattr(e, "ag_error_metadata"): --> 986 raise e.ag_error_metadata.to_exception(e) 987 else: 988 raise ValueError: in user code: /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:830 train_function * return step_function(self, iterator) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:813 run_step * outputs = model.train_step(data) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:771 train_step * loss = self.compiled_loss( /usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py:201 __call__ * loss_value = loss_obj(y_t, y_p, sample_weight=sw) /usr/local/lib/python3.7/dist-packages/keras/losses.py:142 __call__ * losses = call_fn(y_true, y_pred) /usr/local/lib/python3.7/dist-packages/keras/losses.py:246 call * return ag_fn(y_true, y_pred, **self._fn_kwargs) /usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper return target(*args, **kwargs) /usr/local/lib/python3.7/dist-packages/keras/losses.py:1631 categorical_crossentropy y_true, y_pred, from_logits=from_logits) /usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper return target(*args, **kwargs) /usr/local/lib/python3.7/dist-packages/keras/backend.py:4827 categorical_crossentropy target.shape.assert_is_compatible_with(output.shape) /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/tensor_shape.py:1161 assert_is_compatible_with raise ValueError("Shapes %s and %s are incompatible" % (self, other)) ValueError: Shapes (32, 1) and (32, 2) are incompatible
-
قمت ببناء نموذج لتوقع أسعار المنازل باستخدام إطار العمل كيراس لكن ظهر لي الخطأ التالي: from keras.datasets import boston_housing import keras (train_data, train_targets), (test_data, test_targets) = boston_housing.load_data() # توحيد البيانات mean = train_data.mean(axis=0) train_data -= mean std = train_data.std(axis=0) train_data /= std test_data -= mean test_data /= std from keras import models from keras import layers # بناء النموذج def build_model(): model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(300,))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(1)) model.compile(optimizer='rmsprop', loss="mse", metrics=['mae']) #بالشكل التالي compile هنا استخدمناها كدالة تكلفة وكمعيار عن طريق تمريره إلى الدالة #model.compile(optimizer='rmsprop', loss='mse', metrics=['mse']) return model # تدريب النموذج model = build_model() model.fit(train_data, train_targets,epochs=8, batch_size=64) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-14-2add221cc354> in <module> 24 # تدريب النموذج 25 model = build_model() ---> 26 model.fit(train_data, train_targets,epochs=8, batch_size=64) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing) 1098 _r=1): 1099 callbacks.on_train_batch_begin(step) -> 1100 tmp_logs = self.train_function(iterator) 1101 if data_handler.should_sync: 1102 context.async_wait() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds) 826 tracing_count = self.experimental_get_tracing_count() 827 with trace.Trace(self._name) as tm: --> 828 result = self._call(*args, **kwds) 829 compiler = "xla" if self._experimental_compile else "nonXla" 830 new_tracing_count = self.experimental_get_tracing_count() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds) 869 # This is the first call of __call__, so we have to initialize. 870 initializers = [] --> 871 self._initialize(args, kwds, add_initializers_to=initializers) 872 finally: 873 # At this point we know that the initialization is complete (or less ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to) 724 self._concrete_stateful_fn = ( 725 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access --> 726 *args, **kwds)) 727 728 def invalid_creator_scope(*unused_args, **unused_kwds): ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs) 2967 args, kwargs = None, None 2968 with self._lock: -> 2969 graph_function, _ = self._maybe_define_function(args, kwargs) 2970 return graph_function 2971 ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs) 3359 3360 self._function_cache.missed.add(call_context_key) -> 3361 graph_function = self._create_graph_function(args, kwargs) 3362 self._function_cache.primary[cache_key] = graph_function 3363 ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes) 3204 arg_names=arg_names, 3205 override_flat_arg_shapes=override_flat_arg_shapes, -> 3206 capture_by_value=self._capture_by_value), 3207 self._function_attributes, 3208 function_spec=self.function_spec, ~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes) 988 _, original_func = tf_decorator.unwrap(python_func) 989 --> 990 func_outputs = python_func(*func_args, **func_kwargs) 991 992 # invariant: `func_outputs` contains only Tensors, CompositeTensors, ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds) 632 xla_context.Exit() 633 else: --> 634 out = weak_wrapped_fn().__wrapped__(*args, **kwds) 635 return out 636 ~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs) 975 except Exception as e: # pylint:disable=broad-except 976 if hasattr(e, "ag_error_metadata"): --> 977 raise e.ag_error_metadata.to_exception(e) 978 else: 979 raise ValueError: in user code: C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:805 train_function * return step_function(self, iterator) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:795 step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica return self._call_for_each_replica(fn, args, kwargs) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica return fn(*args, **kwargs) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:788 run_step ** outputs = model.train_step(data) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:754 train_step y_pred = self(x, training=True) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__ input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) C:\Users\Windows.10\anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:259 assert_input_compatibility ' but received input with shape ' + display_shape(x.shape)) ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 255 but received input with shape (None, 13)
-
استخدام دالة الخسارة Hinge losses في كيراس؟
-
ماهي ال logcosh وكيف نقوم باستخدامها في Keras؟
-
أقوم ببناء شبكة عصبية لمهمة NLP (مهمة تحليل مشاعر على بيانات imdb) لكن لا أعرف سبب الخطأ التالي: from keras.datasets import imdb from keras.layers import Embedding, SimpleRNN,Flatten,Dense from keras.models import Sequential (input_train, y_train), (input_test, y_test) = imdb.load_data( num_words=10000) print(len(input_train), 'train sequences') print(len(input_test), 'test sequences') ################ نضيف################### from keras.preprocessing import sequence maxlen = 20 print('Pad sequences (samples x time)') input_train = sequence.pad_sequences(input_train, maxlen=maxlen) input_test = sequence.pad_sequences(input_test, maxlen=maxlen) ############# انتهى#################### print('input_train shape:', input_train.shape) print('input_test shape:', input_test.shape) from keras.layers import Dense model = Sequential() model.add(Embedding(10000, 16)) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) history = model.fit(input_train, y_train, epochs=2, batch_size=32, validation_split=0.2) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-10-08cd97ead789> in <module> 19 model.add(Embedding(10000, 16)) 20 model.add(Flatten()) ---> 21 model.add(Dense(32, activation='relu')) 22 model.add(Dense(1, activation='sigmoid')) 23 model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) ~\anaconda3\lib\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs) 515 self._self_setattr_tracking = False # pylint: disable=protected-access 516 try: --> 517 result = method(self, *args, **kwargs) 518 finally: 519 self._self_setattr_tracking = previous_value # pylint: disable=protected-access ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\sequential.py in add(self, layer) 221 # If the model is being built continuously on top of an input layer: 222 # refresh its output. --> 223 output_tensor = layer(self.outputs[0]) 224 if len(nest.flatten(output_tensor)) != 1: 225 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs) 950 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list): 951 return self._functional_construction_call(inputs, args, kwargs, --> 952 input_list) 953 954 # Maintains info about the `Layer.call` stack. ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list) 1089 # Check input assumptions set after layer building, e.g. input shape. 1090 outputs = self._keras_tensor_symbolic_call( -> 1091 inputs, input_masks, args, kwargs) 1092 1093 if outputs is None: ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs) 820 return nest.map_structure(keras_tensor.KerasTensor, output_signature) 821 else: --> 822 return self._infer_output_signature(inputs, args, kwargs, input_masks) 823 824 def _infer_output_signature(self, inputs, args, kwargs, input_masks): ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks) 860 # overridden). 861 # TODO(kaftan): do we maybe_build here, or have we already done it? --> 862 self._maybe_build(inputs) 863 outputs = call_fn(inputs, *args, **kwargs) 864 ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in _maybe_build(self, inputs) 2708 # operations. 2709 with tf_utils.maybe_init_scope(self): -> 2710 self.build(input_shapes) # pylint:disable=not-callable 2711 # We must set also ensure that the layer is marked as built, and the build 2712 # shape is stored since user defined build functions may not be calling ~\anaconda3\lib\site-packages\tensorflow\python\keras\layers\core.py in build(self, input_shape) 1180 last_dim = tensor_shape.dimension_value(input_shape[-1]) 1181 if last_dim is None: -> 1182 raise ValueError('The last dimension of the inputs to `Dense` ' 1183 'should be defined. Found `None`.') 1184 self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim}) ValueError: The last dimension of the inputs to `Dense` should be defined. Found `None`.
-
قمت ببناء نموذج لتحليل المشاعر، لكن عند التدريب يظهر لي الخطأ التالي: df_train = pd.read_csv('/content/drive/MyDrive/imdbdataset/Completely_clean_data.csv') df_train.drop(df_train.filter(regex="Unname"),axis=1, inplace=True) df_test = pd.read_csv('/content/drive/MyDrive/imdbdataset/Completely_clean_data_test.csv') df_test.drop(df_test.filter(regex="Unname"),axis=1, inplace=True) max_words = 75000 tokenizer = Tokenizer(num_words=max_words) # fitting tokenizer.fit_on_texts(pd.concat([df_test['review'], df_train['review']])) #max_len=int(df["review_len"].mean()) #231 # do you remember!! train = tokenizer.texts_to_sequences(df_train['review']) test = tokenizer.texts_to_sequences(df_test['review']) train = pad_sequences(train, maxlen=200) test = pad_sequences(test, maxlen=200) print("the shape of data train :",train.shape) print("the shape of data test :",test.shape) # model def modelBiLSTM(): max_words = 75000 #drop_lstm =0.4 embeddings=128 model = Sequential() model.add(Embedding(10000, embeddings)) model.add(Bidirectional(LSTM(64, activation='tanh'))) # 2D output model.add(Dense(1, activation='sigmoid')) # binary output return model model=modelBiLSTM() model.summary() # training model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) history=model.fit(train, train_label,validation_split=0.12, batch_size=32, epochs=8) --------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) <ipython-input-13-850a3e320f47> in <module>() 1 model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) ----> 2 history=model.fit(train, train_label,validation_split=0.12, batch_size=32, epochs=8) 6 frames /usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) 58 ctx.ensure_initialized() 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, ---> 60 inputs, attrs, num_outputs) 61 except core._NotOkStatusException as e: 62 if name is not None: InvalidArgumentError: indices[28,152] = 57171 is not in [0, 10000) [[node sequential_2/embedding_2/embedding_lookup (defined at /usr/local/lib/python3.7/dist-packages/keras/layers/embeddings.py:184) ]] [Op:__inference_train_function_12661] Errors may have originated from an input operation. Input Source operations connected to node sequential_2/embedding_2/embedding_lookup: sequential_2/embedding_2/embedding_lookup/10350 (defined at /usr/lib/python3.7/contextlib.py:112) sequential_2/embedding_2/Cast (defined at /usr/local/lib/python3.7/dist-packages/keras/layers/embeddings.py:183) Function call stack: train_function
-
ماهي دالة التكلفة Huber loss وكيف يتم تطبيقها في كيراس Keras؟
-
أقوم بتدريب شبكة CNN بسيطة على مجموعة minst في كيراس ولكن لا أعرف لماذا يظهر هذا الخطأ: import numpy as np import keras from keras.models import Sequential from keras.layers import Dense , Activation, Dropout from keras.optimizers import Adam ,RMSprop from keras.models import Sequential from keras.utils.np_utils import to_categorical from keras.datasets import mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() model = keras.models.Sequential([ keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28,28,1)), keras.layers.MaxPool2D(2,2), keras.layers.Conv2D(64, (3,3), activation='relu'), keras.layers.MaxPool2D(2,2), keras.layers.Flatten(), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=20,validation_data=(x_test, y_test), batch_size=60) # الخطأ Epoch 1/20 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-4-30f7b9447993> in <module>() 22 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) 23 ---> 24 model.fit(x_train, y_train, epochs=20,validation_data=[x_test, y_test], batch_size=60) 25 9 frames /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs) 984 except Exception as e: # pylint:disable=broad-except 985 if hasattr(e, "ag_error_metadata"): --> 986 raise e.ag_error_metadata.to_exception(e) 987 else: 988 raise ValueError: in user code: /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:830 train_function * return step_function(self, iterator) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:813 run_step * outputs = model.train_step(data) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:770 train_step * y_pred = self(x, training=True) /usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py:989 call * input_spec.assert_input_compatibility(self.input_spec, inputs, self.name) /usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py:227 assert_input_compatibility * raise ValueError('Input ' + str(input_index) + ' of layer ' + ValueError: Input 0 of layer sequential_1 is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: (60, 28, 28)
-
خوارزمية التحسين Adamax Optimization واستخدامها في Keras؟
-
أحاول تدريب شبكة عصبية لتحليل المشاعر، لكن يظهر لي الخطأ التالي: from keras.datasets import imdb from keras.layers import Embedding, SimpleRNN from keras.models import Sequential (input_train, y_train), (input_test, y_test) = imdb.load_data( num_words=max_features) print(len(input_train), 'train sequences') print(len(input_test), 'test sequences') print('input_train shape:', input_train.shape) print('input_test shape:', input_test.shape) from keras.layers import Dense model = Sequential() model.add(Embedding(10000, 32)) model.add(SimpleRNN(32)) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) history = model.fit(input_train, y_train, epochs=2, batch_size=32, validation_split=0.2) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-53-0d042e98c73b> in <module> 17 epochs=2, 18 batch_size=32, ---> 19 validation_split=0.2) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing) 1062 use_multiprocessing=use_multiprocessing, 1063 model=self, -> 1064 steps_per_execution=self._steps_per_execution) 1065 1066 # Container that configures and calls `tf.keras.Callback`s. ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution) 1110 use_multiprocessing=use_multiprocessing, 1111 distribution_strategy=ds_context.get_strategy(), -> 1112 model=model) 1113 1114 strategy = ds_context.get_strategy() ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs) 261 **kwargs): 262 super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs) --> 263 x, y, sample_weights = _process_tensorlike((x, y, sample_weights)) 264 sample_weight_modes = broadcast_sample_weight_modes( 265 sample_weights, sample_weight_modes) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in _process_tensorlike(inputs) 1014 return x 1015 -> 1016 inputs = nest.map_structure(_convert_numpy_and_scipy, inputs) 1017 return nest.list_to_tuple(inputs) 1018 ~\anaconda3\lib\site-packages\tensorflow\python\util\nest.py in map_structure(func, *structure, **kwargs) 657 658 return pack_sequence_as( --> 659 structure[0], [func(*x) for x in entries], 660 expand_composites=expand_composites) 661 ~\anaconda3\lib\site-packages\tensorflow\python\util\nest.py in <listcomp>(.0) 657 658 return pack_sequence_as( --> 659 structure[0], [func(*x) for x in entries], 660 expand_composites=expand_composites) 661 ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in _convert_numpy_and_scipy(x) 1009 if issubclass(x.dtype.type, np.floating): 1010 dtype = backend.floatx() -> 1011 return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype) 1012 elif scipy_sparse and scipy_sparse.issparse(x): 1013 return _scipy_sparse_to_sparse_tensor(x) ~\anaconda3\lib\site-packages\tensorflow\python\util\dispatch.py in wrapper(*args, **kwargs) 199 """Call target, and fall back on dispatchers if there is a TypeError.""" 200 try: --> 201 return target(*args, **kwargs) 202 except (TypeError, ValueError): 203 # Note: convert_to_eager_tensor currently raises a ValueError, not a ~\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor_v2_with_dispatch(value, dtype, dtype_hint, name) 1403 """ 1404 return convert_to_tensor_v2( -> 1405 value, dtype=dtype, dtype_hint=dtype_hint, name=name) 1406 1407 ~\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name) 1413 name=name, 1414 preferred_dtype=dtype_hint, -> 1415 as_ref=False) 1416 1417 ~\anaconda3\lib\site-packages\tensorflow\python\profiler\trace.py in wrapped(*args, **kwargs) 161 with Trace(trace_name, **trace_kwargs): 162 return func(*args, **kwargs) --> 163 return func(*args, **kwargs) 164 165 return wrapped ~\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types) 1538 1539 if ret is None: -> 1540 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) 1541 1542 if ret is NotImplemented: ~\anaconda3\lib\site-packages\tensorflow\python\framework\tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***) 50 def _default_conversion_function(value, dtype, name, as_ref): 51 del as_ref # Unused. ---> 52 return constant_op.constant(value, dtype, name=name) 53 54 ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name) 263 """ 264 return _constant_impl(value, dtype, shape, name, verify_shape=False, --> 265 allow_broadcast=True) 266 267 ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast) 274 with trace.Trace("tf.constant"): 275 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape) --> 276 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape) 277 278 g = ops.get_default_graph() ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape) 299 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape): 300 """Implementation of eager constant.""" --> 301 t = convert_to_eager_tensor(value, ctx, dtype) 302 if shape is None: 303 return t ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype) 96 dtype = dtypes.as_dtype(dtype).as_datatype_enum 97 ctx.ensure_initialized() ---> 98 return ops.EagerTensor(value, ctx.device_name, dtype) 99 100 ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list).
-
أقوم بتدريب شبكة عصبونية للتعرف على الأرقام mnist ولكنها لا تعمل على google colab import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense , Activation, Dropout from keras.models import Sequential from keras.utils.np_utils import to_categorical from keras.datasets import mnist (x_train, y_train),(x_vaild, y_vaild) = mnist.load_data() y_train = to_categorical(y_train) y_vaild = to_categorical(y_vaild) image_size = x_train.shape[1] input_size = image_size * image_size x_train = np.reshape(x_train, [-1, input_size]) x_train = x_train.astype('float32') / 255 x_test = np.reshape(x_test, [-1, input_size]) x_test = x_test.astype('float32') / 255 batch_size = 32 hidden_units = 256 model = Sequential() model.add(Dense(hidden_units, input_dim=input_size)) model.add(Activation('relu')) model.add(Dropout(0,45)) model.add(Dense(hidden_units)) model.add(Activation('relu')) model.add(Dropout(0,45)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=5,validation_data=[x_vaild, y_vaild], batch_size=batch_size)) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-10-51503a456d4c> in <module>() 34 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) 35 ---> 36 model.fit(x_train, y_train, epochs=5,validation_data=[x_vaild, y_vaild], batch_size=batch_size) 9 frames /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, kwargs) 984 except Exception as e: # pylint:disable=broad-except 985 if hasattr(e, "ag_error_metadata"): --> 986 raise e.ag_error_metadata.to_exception(e) 987 else: 988 raise TypeError: in user code: /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:830 train_function * return step_function(self, iterator) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:813 run_step * outputs = model.train_step(data) /usr/local/lib/python3.7/dist-packages/keras/engine/training.py:770 train_step * y_pred = self(x, training=True) /usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py:1006 __call__ * outputs = call_fn(inputs, *args, **kwargs) /usr/local/lib/python3.7/dist-packages/keras/engine/sequential.py:375 call * return super(Sequential, self).call(inputs, training=training, mask=mask) /usr/local/lib/python3.7/dist-packages/keras/engine/functional.py:416 call * inputs, training=training, mask=mask) /usr/local/lib/python3.7/dist-packages/keras/engine/functional.py:551 _run_internal_graph * outputs = node.layer(*args, **kwargs) /usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py:1006 __call__ * outputs = call_fn(inputs, *args, **kwargs) /usr/local/lib/python3.7/dist-packages/keras/layers/core.py:205 dropped_inputs * inputs, /usr/local/lib/python3.7/dist-packages/keras/utils/control_flow_util.py:107 smart_cond * pred, true_fn=true_fn, false_fn=false_fn, name=name) /usr/local/lib/python3.7/dist-packages/keras/layers/core.py:195 _get_noise_shape * for i, value in enumerate(self.noise_shape): /usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:400 enumerate_ return _py_enumerate(s, start) /usr/local/lib/python3.7/dist-packages/tensorflow/python/autograph/operators/py_builtins.py:408 _py_enumerate return enumerate(s, start) TypeError: 'int' object is not iterable
-
ماهي ال KL-Divergence وكيف يمكن استخدامها في Kears؟
-
أحتاج طريقة لإنشاء مجموعة بيانات عشوائية لمهمة توقع في مكتبة Sklearn؟
-
كيف نقوم بتطبيق خوارزمية ExtraTreesRegressor في Sklearn؟
-
تطبيق التحويل PowerTransformer باستخدام مكتبة Sklearn؟
-
كيف نقوم بتطبيق خوارزمية AdaBoostRegressor في مكتبة Sklearn؟
-
قمت ببناء نموذج لتصنيف الصور مكون من عدة طبقات CNN و AveragePooling لكن لا أعلم سبب الخطأ التالي: # write your model here, we prefer that you call it model2 to make comparisions easier later: model2 = keras.Sequential() model2.add(layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(150,150,3))) model2.add(layers.AveragePooling2D()) model2.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model2.add(layers.AveragePooling2D()) model2.add(layers.Dense(units=120, activation='tanh')) model2.add(layers.Dense(units=84, activation='tanh')) model2.add(layers.Dense(units=6, activation = 'softmax')) model2.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) model2.fit(train_images, train_labels, batch_size=32, epochs=5, validation_split = 0.2) --------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) <ipython-input-9-89da9186b401> in <module> 9 model2.add(layers.Dense(units=6, activation = 'softmax')) 10 model2.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) ---> 11 model2.fit(train_images, train_labels, batch_size=32, epochs=5, validation_split = 0.2) ~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing) 1098 _r=1): 1099 callbacks.on_train_batch_begin(step) -> 1100 tmp_logs = self.train_function(iterator) 1101 if data_handler.should_sync: 1102 context.async_wait() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds) 826 tracing_count = self.experimental_get_tracing_count() 827 with trace.Trace(self._name) as tm: --> 828 result = self._call(*args, **kwds) 829 compiler = "xla" if self._experimental_compile else "nonXla" 830 new_tracing_count = self.experimental_get_tracing_count() ~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds) 886 # Lifting succeeded, so variables are initialized and we can run the 887 # stateless function. --> 888 return self._stateless_fn(*args, **kwds) 889 else: 890 _, _, _, filtered_flat_args = \ ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs) 2941 filtered_flat_args) = self._maybe_define_function(args, kwargs) 2942 return graph_function._call_flat( -> 2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access 2944 2945 @property ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager) 1917 # No tape is watching; skip to running the function. 1918 return self._build_call_outputs(self._inference_function.call( -> 1919 ctx, args, cancellation_manager=cancellation_manager)) 1920 forward_backward = self._select_forward_and_backward_functions( 1921 args, ~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager) 558 inputs=args, 559 attrs=attrs, --> 560 ctx=ctx) 561 else: 562 outputs = execute.execute_with_cancellation( ~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) 58 ctx.ensure_initialized() 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, ---> 60 inputs, attrs, num_outputs) 61 except core._NotOkStatusException as e: 62 if name is not None: InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [41472,6] and labels shape [32] [[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits (defined at <ipython-input-9-89da9186b401>:11) ]] [Op:__inference_train_function_1875] Function call stack: train_function
-
أقوم بالعمل على موديل بسيط في keras ولكن لا أعرف ما الخطأ في keras.engine.Network: import keras from keras import backend from keras.models import Sequential from keras import layers from keras.datasets import mnist model = Sequential() model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu', input_shape=( 28*28,))) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) def init(model): session = backend.get_session() for layer in model.layers: if isinstance(layer,keras.engine.Network): init(layer) init(model) --------------------------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-9-487617649ac0> in <module>() 16 if isinstance(layer,keras.engine.Network): 17 init(layer) ---> 18 init(model) <ipython-input-9-487617649ac0> in init(model) 14 session = backend.get_session() 15 for layer in model.layers: ---> 16 if isinstance(layer,keras.engine.Network): 17 init(layer) 18 init(model) AttributeError: module 'keras.engine' has no attribute 'Network'
-
خوارزمية التحسين Adadelta Optimization واستخدامها في Keras؟
-
ال Poisson loss في Kears وكيف يتم استخدامها؟