コメント一覧

まえへ つぎへ
104 投稿者:名無しさん 2020/01/29 09:23:37
from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pd plt.style.use('ggplot')4 717- zICSV) EA t 1(1*) delimiter=";") pd.read csv("winequality-red.csv" !! ["fixed acidity","volatile acidity","citric acid", = sa "chlorides "free sulfur dioxide" y = "residual sugar "total sulfur dioxide", "density", "pH", "sulphates "alcohol"] wine["quality"] # labell fis, axn = plt.subplots(11, sharey-True). for i, name enumerate(names):1 axn[i].set title(name) axn[i].scatter (wine[name], y)4 plt.show(04


105 投稿者:名無しさん 2020/01/29 09:32:36
from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pd plt.style.use('ggplot')4 717- zICSV) EA t 1(1*) delimiter=";") pd.read csv("winequality-red.csv" !! ["fixed acidity","volatile acidity","citric acid", = sa "chlorides "free sulfur dioxide" y = "residual sugar "total sulfur dioxide", "density", "pH", "sulphates "alcohol"] wine["quality"] # labell fis, axn = plt.subplots(11, sharey-True). for i, name enumerate(names):1 axn[i].set title(name) axn[i].scatter (wine[name], y)4 plt.show(04


106 投稿者:名無しさん 2020/01/29 09:32:56
01 from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pdl from sklearn.decomposition import TruncatedSVD from sklearn.decomposition import PCAJ # 717- (CSV) t wine = pd.read_csv("winequality-white.csv", delimiter=";") 1(1*) XOL "total sulfur dioxide "alcohol"]] # datal wine["quality"] # label4 wine[["fixed acidity","volatile acidity","citric acid", "residual sugar","chlorides !! "free sulfur dioxide' density' y = sulphates",t Hd # E comp = TruncatedSVD(n_components=2) X_reduced = comp.fit_transform(X) # plt.style.use('ggplot') plt.scatter (4 X_reduced[: ,0], 4 X_reduced[:,1], 4 plt.show() s=y*3, cmap=" Reds")


107 投稿者:名無しさん 2020/02/07 23:31:46
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as pyplot
im_rows = 28
im_cols = 28
im_color = 1
in_shape = (im_rows, im_cols, im_color)
out_size = 10

(X_train, y_train),(X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(-1, im_rows, im_cols, im_color)
X_train = X_train.astype('float32') / 255
X_test = X_test.reshape(-1, im_rows, im_cols, im_color)
X_test = X_test.astype('float32') / 255

y_train = leras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)


model = Sequential()
model.add(Conv2D(32,
kernel_size=(3,3),
activation='relu',
input_shape=in_shape))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(out_size,activation='softmax'))

model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])

hist = model.fit(X_train, y_train,
batch_size=128,
epochs=12,
verbose=1,
validation_data=(X_test,y_test))

score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=' score[1], 'loss=',score[0])

plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train','test'],loc='upper left')

plt.plot(hlist.history['loss'])
plt.plot(hlist.history['val_loss])
plt.title('Loss')
plt.legend(['train','test'],loc='upper left')
plt.show()























108 投稿者:名無しさん 2020/02/08 17:59:28
#
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt

#
in_size = 28 * 28
out_size = 10

#
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#
X_train = X_train.reshape(-1, 784).astype('float32') / 255
X_test = X_test.reshape(-1,784).astype('float32') / 255
#
y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)

#
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dropout(0.2))
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(out_size, activation='softmax'))

#
model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])

#
hist = model.fit(X_train, y_train,
batch_size=128,
epochs=50,
verbose=1,
validation_data=(X_test, y_test))

#
score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=',score[1],'loss=',score[0])

#
#
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train','test'],loc = 'upper left')
plt.show()

#
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train','test'],loc='upper left')
plt.show()


109 投稿者:名無しさん 2020/02/10 10:22:47
#
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt

#
in_size = 28 * 28
out_size = 10

#
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#
X_train = X_train.reshape(-1, 784).astype('float32') / 255
X_test = X_test.reshape(-1,784).astype('float32') / 255
#
y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)

#
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dropout(0.2))
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(out_size, activation='softmax'))

#
model.compile(
    loss='categorical_crossentropy',
    optimizer=RMSprop(),
    metrics=['accuracy'])

#
hist = model.fit(X_train, y_train,
    batch_size=128,
    epochs=50,
    verbose=1,
    validation_data=(X_test, y_test))

#
score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=',score[1],'loss=',score[0])

#
#
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train','test'],loc = 'upper left')
plt.show()

#
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train','test'],loc='upper left')
plt.show()



110 投稿者:名無しさん 2020/02/14 12:03:41
arigatou tion


111 投稿者:Tor常用おじさん 2020/04/08 15:52:41
今回のビデオのですね目玉はですね、僕のシャワーシーンなのでお風呂に浸かってですね、イチモツをしごいてるところで抜いてください!


112 投稿者:????? 2020/04/09 04:34:25
http://mewkid.net/when-is-xaxlop/ - Amoxicillin 500 Mg <a href="http://mewkid.net/when-is-xaxlop/">Amoxicillin</a> flk.ldqx.minmee.jp.kok.ip http://mewkid.net/when-is-xaxlop/


113 投稿者:Tor常用おじさん 2020/04/09 21:46:26
>>117
誰だよ。





パスワード:


私は(カタカナで)ロボットではありません