• Machine Learning for hackers读书笔记(十)KNN:推荐系统


    #一,自己写KNN

    df<-read.csv('G:\dataguru\ML_for_Hackers\ML_for_Hackers-master\10-Recommendations\data\example_data.csv')
    head(df)

    #得出距离矩阵
    distance.matrix <- function(df)
    {

    #生成一万个NA,并转成100*100的矩阵
    distance <- matrix(rep(NA, nrow(df) ^ 2), nrow = nrow(df))

    #计算两两之间的欧氏距离

    for (i in 1:nrow(df))

    {
    for (j in 1:nrow(df))
    {
    distance[i, j] <- sqrt((df[i, 'X'] - df[j, 'X']) ^ 2 + (df[i, 'Y'] - df[j, 'Y']) ^ 2)
    }
    }
    return(distance)
    }

    #查找与数据点i距离最短的前k个点
    k.nearest.neighbors <- function(i, distance, k = 5)
    {

    #distance[i, ]是所有点与点i的距离,排个序,取K个下标,从2开始的原因是排第1位的就是是数据点i自己
    return(order(distance[i, ])[2:(k + 1)])
    }

    #得出预测值
    knn <- function(df, k = 5)
    {

    #得出距离矩阵
    distance <- distance.matrix(df)

    #predictions存NA
    predictions <- rep(NA, nrow(df))
    for (i in 1:nrow(df))
    {

    #得出与i最近的K个点的下标
    indices <- k.nearest.neighbors(i, distance, k = k)

    #均值大于0.5赋1.否则赋0
    predictions[i] <- ifelse(mean(df[indices, 'Label']) > 0.5, 1, 0)
    }
    return(predictions)
    }

    #添加预测列
    df <- transform(df, kNNPredictions = knn(df))

    #以下是计算预测错误的个数,共7个,总共也就100个,正确率是93%
    sum(with(df, Label != kNNPredictions))
    #把刚才自己写的KNN函数删除
    rm('knn')

    #二、以下才是用R中的函数来做KNN

    library('class')
    df<-read.csv('G:\dataguru\ML_for_Hackers\ML_for_Hackers-master\10-Recommendations\data\example_data.csv')
    n <- nrow(df)
    set.seed(1)

    #从1到n中随机抽一半作为训练集,剩下的为测试集
    indices <- sort(sample(1:n, n * (1 / 2)))
    training.x <- df[indices, 1:2]
    test.x <- df[-indices, 1:2]
    training.y <- df[indices, 3]
    test.y <- df[-indices, 3]
    # There's a bug here!
    predicted.y <- knn(training.x, test.x, training.y, k = 5)

    #预测错了7个点,但测试集才50条观测行,因此正确率86%
    sum(predicted.y != test.y)

    #下面看看逻辑回归
    logit.model <- glm(Label ~ X + Y, data = df[indices, ])
    predictions <- as.numeric(predict(logit.model, newdata = df[-indices, ]) > 0)
    sum(predictions != test.y)

    #结果是50行预测错了16个点,正确率只有68%,因此结论是如果问题完全不是线性时,K近邻的表现好过GLM

    #三、以下进行推荐案例,用Kaggle的数据,根据一个程序员已经安装的程序包来预测这个程序员是否会安装另一个程序包

    installations <- read.csv('G:\dataguru\ML_for_Hackers\ML_for_Hackers-master\10-Recommendations\data\installations.csv')
    head(installations)
    library('reshape')

    #数据集中共三列,分别是Package,User,Installed

    #cast函数的作用:将数据集中的数据,User为行,Package为列,值为是否安装

    #结果矩阵中,第一列是用户名

    user.package.matrix <- cast(installations, User ~ Package, value = 'Installed')

    row.names(user.package.matrix) <- user.package.matrix[, 1]

    user.package.matrix <- user.package.matrix[, -1]

    #计算一下相关性

    similarities <- cor(user.package.matrix)

    #把相似度转换为距离矩阵,把相似度1转换为距离0,把相似度-1转换为距离无穷大
    distances <- -log((similarities / 2) + 0.5)

    #返回与数据点i最近的K个点的下标

    k.nearest.neighbors <- function(i, distances, k = 25)
    {
    return(order(distances[i, ])[2:(k + 1)])
    }

    installation.probability <- function(user, package, user.package.matrix, distances, k = 25)
    {
    neighbors <- k.nearest.neighbors(package, distances, k = k)
    return(mean(sapply(neighbors, function (neighbor) {user.package.matrix[user, neighbor]})))
    }

    #对于用户1有多少概率安装程序包1

    installation.probability(1, 1, user.package.matrix, distances)

    #计算出用户最可能安装的程序包,按概率排序

    most.probable.packages <- function(user, user.package.matrix, distances, k = 25)
    {
    return(order(sapply(1:ncol(user.package.matrix),
    function (package)
    {
    installation.probability(user,
    package,
    user.package.matrix,
    distances,
    k = k)
    }),
    decreasing = TRUE))
    }

    user <- 1

    listing <- most.probable.packages(user, user.package.matrix, distances)

    colnames(user.package.matrix)[listing[1:10]]

  • 相关阅读:
    PHP漏洞全解(四)-xss跨站脚本攻击
    PHP漏洞全解(三)-客户端脚本植入
    Oauth2 接口api
    Linux重复执行上条命令
    Nginx配置文件nginx.conf中文详解
    资料收集
    Apache Rewrite常用设置说明
    微信分享,使用js,分享给朋友,朋友圈,QQ微博
    SSHFS
    Navicate
  • 原文地址:https://www.cnblogs.com/MarsMercury/p/4964365.html
Copyright © 2020-2023  润新知