我正在尝试通过 jupyter notebook 在 watson studio 上部署我的模型。我得到了一个状态为 deploy_success 的评分端点,但无法看到我使用 watson 机器学习 python 客户端的任何部署
这是我使用的代码
import pandas as pd
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0
# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share the notebook.
client_69afffce8ec943228b747f15c49a1424 = ibm_boto3.client(service_name='s3',
ibm_api_key_id='*****',
ibm_auth_endpoint="url",
config=Config(signature_version='oauth'),
endpoint_url='url')
body = client_xxxxxxxxx.get_object(Bucket='ibmhc-donotdelete-pr-te37jcxr6hwuwb',Key='fulfilment_center_info.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
center = pd.read_csv(body)
body = client_xxxxxxxx.get_object(Bucket='ibmhc-donotdelete-pr-te37jcxr6hwuwb',Key='ibmhacktest.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
test = pd.read_csv(body)
body = client_xxxxxxxxxx.get_object(Bucket='ibmhc-donotdelete-pr-te37jcxr6hwuwb',Key='meal_info.csv')['Body']
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
meal = pd.read_csv(body)
body = client_xxxxxxxxx.get_object(Bucket='ibmhc-donotdelete-pr-te37jcxr6hwuwb',Key='train.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
data = pd.read_csv(body)
data=pd.concat([data,test],axis=0)
data=data.merge(center,on='center_id',how='left')
data=data.merge(meal,on='meal_id',how='left')
data['discount amount']=data['base_price']-data['checkout_price']
data['discount percent'] = ((data['base_price']-data['checkout_price'])/data['base_price'])*100
data['discount y/n'] = [1 if x>0 else 0 for x in (data['base_price']-data['checkout_price'])]
data=data.sort_values(['center_id', 'meal_id', 'week']).reset_index()
data['compare_week_price'] = data['checkout_price'] - data['checkout_price'].shift(1)
data['compare_week_price'][data['week']==1]=0
data=data.sort_values(by='index').reset_index().drop(['level_0','index'],axis=1)
data['compare_week_price y/n'] = [1 if x>0 else 0 for x in data['compare_week_price']]
city4={590:'CH1', 526:'CH2', 638:'CH3'}
data['city_enc_4']=data['city_code'].map(city4)
data['city_enc_4']=data['city_enc_4'].fillna('CH4')
datax=data.copy()
datax['center_id']=datax['center_id'].astype('object')
datax['meal_id']=datax['meal_id'].astype('object')
datax['region_code']=datax['region_code'].astype('object')
obj=datax[['center_id','meal_id','region_code','center_type','category','cuisine','city_enc_4']]
num=datax.drop(['center_id','meal_id','region_code','center_type','category','cuisine','city_enc_4'],axis=1)
encode1=pd.get_dummies(obj,drop_first = True)
datax=pd.concat([num,encode1],axis=1)
sc=StandardScaler()
num=datax[['checkout_price','base_price','discount amount','discount percent','compare_week_price']]
scal= pd.DataFrame(sc.fit_transform(num),columns=num.columns)
datas=pd.concat([scal,cat],axis=1)
datay=datas.copy()
datay['Quarter']=(datas['week']/13).astype('int64')
datay['Quarter'] = datay['Quarter'].map({0:'Q1',
1:'Q2',
2:'Q3',
3:'Q4',
4:'Q1',
5:'Q2',
6:'Q3',
7:'Q4',
8:'Q1',
9:'Q2',
10:'Q3',
11:'Q4'})
datay['Quarter'].value_counts()
datay['Year']=(datas['week']/52).astype('int64')
datay['Year'] = datay['Year'].map({0:'Y1',
1:'Y2',
2:'Y3'})
objy=datay[['Quarter', 'Year']]
numy=datay.drop(['Quarter', 'Year'],axis=1)
encode1y=pd.get_dummies(objy,drop_first = True)
encode1y.head()
datay=pd.concat([numy,encode1y],axis=1)
datay['num_orders']=np.log1p(datay['num_orders'])
train=datay[datay['week'].isin(range(1,136))]
test=datay[datay['week'].isin(range(136,146))]
X_train=train.drop(['id','num_orders','week','discount amount','city_code'],axis=1)
y_train=train['num_orders']
X_test=test.drop(['id','num_orders','week','discount amount','city_code'],axis=1)
y_test=test['num_orders']
reg = LinearRegression()
reg.fit(X_train,y_train)
y_pred=reg.predict(X_test)
wml_credentials={
"apikey": "*****",
"iam_apikey_description": "*****",
"iam_apikey_name": "Service credentials-1",
"iam_role_crn": "******",
"iam_serviceid_crn": "******",
"instance_id": "*****",
"url": "******"
}
client = WatsonMachineLearningAPIClient(wml_credentials)
metadata={
client.repository.ModelMetaNames.DESCRIPTION:'data',
client.repository.ModelMetaNames.AUTHOR_NAME:'teja',
client.repository.ModelMetaNames.NAME:"linear",
client.repository.ModelMetaNames.FRAMEWORK_NAME:"scikit-learn",
client.repository.ModelMetaNames.FRAMEWORK_VERSION:"0.22"
}
model_details=client.repository.store_model(reg,meta_props=metadata)
published_model_uid=client.repository.get_model_uid(model_details)
created_deployment=client.deployments.create(published_model_uid,name="Reg")
scoring_endpoint = client.deployments.get_scoring_url(created_deployment)
client.deployments.list()
我什至能够在 client.deployments.list() 下看到服务我无法通过 cloudfound 连接它。所以需要帮助部署这个模型