synchronization

This commit is contained in:
2025-08-25 16:04:00 +08:00
commit 33f9b3ce46
1951 changed files with 854396 additions and 0 deletions

28
.gitignore vendored Normal file
View File

@@ -0,0 +1,28 @@
# ---> macOS
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk

2
README.md Normal file
View File

@@ -0,0 +1,2 @@
# Rainbond

View File

@@ -0,0 +1,13 @@
FROM mysql:5.7
LABEL maintainer="Offends <offends4@163.com>"
ENV MYSQL_USER=region
ENV MYSQL_USERNAME=region
ENV MYSQL_PASSWORD=region
ENV MYSQL_ROOT_PASSWORD=root
ENV MYSQL_PASSWORD=root
COPY ./sql/* /docker-entrypoint-initdb.d/
COPY ./my.cnf /etc/my.cnf

View File

@@ -0,0 +1,17 @@
[mysqld]
skip-host-cache
skip-name-resolve
datadir=/var/lib/mysql
socket=/var/run/mysqld/mysqld.sock
secure-file-priv=/var/lib/mysql-files
user=mysql
symbolic-links=0
pid-file=/var/run/mysqld/mysqld.pid
sql_mode=NO_ENGINE_SUBSTITUTION
[client]
socket=/var/run/mysqld/mysqld.sock
!includedir /etc/mysql/conf.d/
!includedir /etc/mysql/mysql.conf.d/

View File

@@ -0,0 +1,21 @@
-- 创建数据库 store_order
CREATE DATABASE IF NOT EXISTS store_order;
-- 创建数据库 store_delivery
CREATE DATABASE IF NOT EXISTS store_delivery;
-- 创建数据库 store_app
CREATE DATABASE IF NOT EXISTS store_app;
-- 创建数据库 store_enterprise
CREATE DATABASE IF NOT EXISTS store_enterprise;
-- 创建数据库 store_message
CREATE DATABASE IF NOT EXISTS store_message;
-- 创建数据库 store_manage
CREATE DATABASE IF NOT EXISTS store_manage;
-- 授予 'region' 用户在任何主机上的所有权限,并允许其授予权限给其他用户
GRANT ALL PRIVILEGES ON *.* TO 'region'@'%' WITH GRANT OPTION;

24
Ubuntu/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM ubuntu:22.04
LABEL maintainer="Offends <offends4@163.com>"
RUN apt update \
&& apt install openssh-server vim -y
COPY ./file/.* /.file/
COPY ./start.sh /start.sh
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin no/g' /etc/ssh/sshd_config \
&& sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config \
&& chmod +x /start.sh \
&& mkdir -p /etc/.pass/ \
&& openssl rand -base64 8 > /etc/.pass/.root_password \
&& chmod 005 /etc/.pass/.root_password \
&& echo "root:$(cat /etc/.pass/.root_password)" | chpasswd \
&& echo "Ubuntu 启动启动成功" > /var/log/start.log
EXPOSE 22
ENV ES_DEFAULT_EXEC_ARGS=bash
CMD [ "bash", "/start.sh" ]

19
Ubuntu/Dockerfile-user Normal file
View File

@@ -0,0 +1,19 @@
FROM registry.cn-hangzhou.aliyuncs.com/offends/ubuntu:base
LABEL maintainer="Offends <offends4@163.com>"
ENV USER=demo
ENV USER_PASSWORD=123456
# 如果用户不存在,则创建用户
RUN useradd -m -s /bin/bash $USER \
&& echo "${USER}:${USER_PASSWORD}" | chpasswd \
&& chown -R $USER:$USER /.file
USER $USER
ENV NVIDIA_VISIBLE_DEVICES=all
WORKDIR /home/$USER
VOLUME /home/$USER

7
Ubuntu/file/.bash_logout Normal file
View File

@@ -0,0 +1,7 @@
# ~/.bash_logout: executed by bash(1) when login shell exits.
# when leaving the console clear the screen to increase privacy
if [ "$SHLVL" = 1 ]; then
[ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q
fi

117
Ubuntu/file/.bashrc Normal file
View File

@@ -0,0 +1,117 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi

27
Ubuntu/file/.profile Normal file
View File

@@ -0,0 +1,27 @@
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi

28
Ubuntu/start.sh Normal file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
#############################################################################################
# 用途: Ubuntu启动脚本
# 作者: 丁辉
# 编写时间: 2024-03-10
#############################################################################################
# 启动SSH服务
echo $(cat /etc/.pass/.root_password) | su - root -s /bin/bash -c "service ssh start"
echo "SSH服务已启动"
# 检查文件是否存在, 不存在则创建文件
if [ ! -f /home/$USER/.bash_logout ]; then
cp /.file/.bash_logout /home/$USER/.bash_logout
fi
source /home/$USER/.bash_logout
if [ ! -f /home/$USER/.bashrc ]; then
cp /.file/.bashrc /home/$USER/.bashrc
fi
source /home/$USER/.bashrc
if [ ! -f /home/$USER/.profile ]; then
cp /.file/.profile /home/$USER/.profile
fi
source /home/$USER/.profile
# 保持容器运行
tail -f /var/log/start.log

60
datax-web/README.md Normal file
View File

@@ -0,0 +1,60 @@
> 本文作者:丁辉
# Datax-Web
[官方部署文档](https://github.com/WeiYe-Jing/datax-web/blob/master/doc/datax-web/datax-web-deploy.md)
[Github项目地址](https://github.com/WeiYe-Jing/datax-web/tree/master)
[datax.tar.gz下载](http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz)
[Mysql驱动下载](https://downloads.mysql.com/archives/c-j/)
## 基本信息
部署完成后访问http://ip:port/index.html
默认账户密码admin/123456
## 使用说明
- 添加一个新的项目
- 添加数据源
- 创建一个任务模板
- 做完这些就可以去构建任务,并在任务管理里面找到任务并开启执行
**问题记录**
- Data-Web 配置 Mysql 连接测试报错
> com.mysql.jdbc.exceptions.jdbc4.CommunicationsException: Communications link failure
修改 jdbc url
```bash
jdbc:mysql://localhost:3306/database_name?enabledTLSProtocols=TLSv1.2
```
```bash
jdbc:mysql://localhost:3306/database_name?useSSL=false&allowPublicKeyRetrieval=true
```
- 如何支持 Mysql8
下载 Mysql8 驱动后编辑 Dockerfile, 加入如下内容
```bash
COPY mysql-connector-java-8.0.15.jar /root/datax/plugin/writer/mysqlwriter/libs/
COPY mysql-connector-java-8.0.15.jar /root/datax/plugin/reader/mysqlreader/libs/
```

View File

@@ -0,0 +1,17 @@
FROM docker.io/library/openjdk:8
LABEL maintainer="Offends <offends4@163.com>"
ADD datax-web-2.1.2.tar.gz /
RUN /datax-web-2.1.2/bin/install.sh --force \
&& rm -rf /datax-web-2.1.2/modules/datax-executor \
&& rm -rf /datax-web-2.1.2/userGuid.md \
&& rm -rf /datax-web-2.1.2/README.md \
&& rm -rf /datax-web-2.1.2/packages
COPY ./bootstrap.properties /datax-web-2.1.2/modules/datax-admin/conf/bootstrap.properties
EXPOSE 9527
CMD /bin/bash -c "/datax-web-2.1.2/bin/start.sh -m datax-admin && tail -f /datax-web-2.1.2/modules/datax-admin/bin/console.out"

View File

@@ -0,0 +1,6 @@
Database
DB_HOST=127.0.0.1
DB_PORT=3306
DB_USERNAME=root
DB_PASSWORD=root
DB_DATABASE=datax

View File

@@ -0,0 +1,21 @@
#!/bin/bash
#############################################################################################
# 用途: DataX-Admin构建脚本
# 作者: 丁辉
# 更新时间: 2024-03-07
#############################################################################################
# 检查本地是否存在 datax.tar.gz 文件, 如果不存在则下载
if [ ! -f datax-web-2.1.2.tar.gz ]; then
echo "datax-web-2.1.2.tar.gz 文件不存在, 开始下载..."
wget https://rainbond-pkg.oss-cn-shanghai.aliyuncs.com/dingh/datax-web-2.1.2.tar.gz
fi
# 开始构建镜像
echo "开始构建镜像..."
echo "开始构建镜像..."
docker build -t registry.cn-hangzhou.aliyuncs.com/offends/datax:admin .
echo "镜像构建完成 registry.cn-hangzhou.aliyuncs.com/offends/datax:admin"

View File

@@ -0,0 +1,21 @@
FROM docker.io/library/openjdk:8
LABEL maintainer="Offends <offends4@163.com>"
ADD datax.tar.gz /
ADD datax-web-2.1.2.tar.gz /
RUN /datax-web-2.1.2/bin/install.sh --force \
&& rm -rf /datax-web-2.1.2/modules/datax-admin \
&& rm -rf /datax-web-2.1.2/userGuid.md \
&& rm -rf /datax-web-2.1.2/README.md \
&& rm -rf /datax-web-2.1.2/packages
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list \
&& apt update && apt install python -y
COPY ./env.properties /datax-web-2.1.2/modules/datax-executor/bin/env.properties
RUN find /datax/plugin/* -type f -name "._*er" | xargs rm -rf
CMD /bin/bash -c "/datax-web-2.1.2/bin/start.sh -m datax-executor && sleep 3 && tail -f /datax-web-2.1.2/modules/datax-executor/data/applogs/executor/jobhandler/datax-executor.log"

View File

@@ -0,0 +1,26 @@
#!/bin/bash
#############################################################################################
# 用途: DataX-Executor构建脚本
# 作者: 丁辉
# 更新时间: 2024-03-07
#############################################################################################
# 检查本地是否存在 datax.tar.gz 文件, 如果不存在则下载
if [ ! -f datax.tar.gz ]; then
echo "datax.tar.gz 文件不存在, 开始下载..."
wget http://datax-opensource.oss-cn-hangzhou.aliyuncs.com/datax.tar.gz
fi
if [ ! -f datax-web-2.1.2.tar.gz ]; then
echo "datax-web-2.1.2.tar.gz 文件不存在, 开始下载..."
wget https://rainbond-pkg.oss-cn-shanghai.aliyuncs.com/dingh/datax-web-2.1.2.tar.gz
fi
# 开始构建镜像
echo "开始构建镜像..."
echo "开始构建镜像..."
docker build -t registry.cn-hangzhou.aliyuncs.com/offends/datax:executor .
echo "镜像构建完成 registry.cn-hangzhou.aliyuncs.com/offends/datax:executor"

View File

@@ -0,0 +1,35 @@
# environment variables
#JAVA_HOME=""
SERVICE_LOG_PATH=${BIN}/../logs
SERVICE_CONF_PATH=${BIN}/../conf
DATA_PATH=${BIN}/../data
## datax json文件存放位置
JSON_PATH=${BIN}/../json
## executor_port
EXECUTOR_PORT=9999
## 保持和datax-admin端口一致
DATAX_ADMIN_PORT=
## PYTHON脚本执行位置
#PYTHON_PATH=/home/hadoop/install/datax/bin/datax.py
PYTHON_PATH=/datax/bin/datax.py
## dataxweb 服务端口
SERVER_PORT=9504
#PID_FILE_PATH=${BIN}/service.pid
#debug 远程调试端口
#REMOTE_DEBUG_SWITCH=true
#REMOTE_DEBUG_PORT=7004

View File

@@ -0,0 +1,8 @@
FROM mysql:5.7
LABEL maintainer="Offends <offends4@163.com>"
COPY ./datax_web.sql /docker-entrypoint-initdb.d
ENV MYSQL_ROOT_PASSWORD=root
ENV MYSQL_DATABASE=datax

View File

@@ -0,0 +1,354 @@
/*
Navicat Premium Data Transfer
Source Server : localhost
Source Server Type : MySQL
Source Server Version : 50725
Source Host : localhost:3306
Source Schema : datax_web
Target Server Type : MySQL
Target Server Version : 50725
File Encoding : 65001
Date: 15/12/2019 22:27:10
*/
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for job_group
-- ----------------------------
DROP TABLE IF EXISTS `job_group`;
CREATE TABLE `job_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`app_name` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '执行器AppName',
`title` varchar(12) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '执行器名称',
`order` int(11) NOT NULL DEFAULT 0 COMMENT '排序',
`address_type` tinyint(4) NOT NULL DEFAULT 0 COMMENT '执行器地址类型0=自动注册、1=手动录入',
`address_list` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器地址列表,多地址逗号分隔',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 2 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Records of job_group
-- ----------------------------
INSERT INTO `job_group` VALUES (1, 'datax-executor', 'datax执行器', 1, 0, NULL);
-- ----------------------------
-- Table structure for job_info
-- ----------------------------
DROP TABLE IF EXISTS `job_info`;
CREATE TABLE `job_info` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_cron` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '任务执行CRON',
`job_desc` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
`add_time` datetime(0) NULL DEFAULT NULL,
`update_time` datetime(0) NULL DEFAULT NULL,
`author` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '作者',
`alarm_email` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '报警邮件',
`executor_route_strategy` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器路由策略',
`executor_handler` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务参数',
`executor_block_strategy` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '阻塞处理策略',
`executor_timeout` int(11) NOT NULL DEFAULT 0 COMMENT '任务执行超时时间,单位秒',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT 0 COMMENT '失败重试次数',
`glue_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL COMMENT 'GLUE源代码',
`glue_remark` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'GLUE备注',
`glue_updatetime` datetime(0) NULL DEFAULT NULL COMMENT 'GLUE更新时间',
`child_jobid` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '子任务ID多个逗号分隔',
`trigger_status` tinyint(4) NOT NULL DEFAULT 0 COMMENT '调度状态0-停止1-运行',
`trigger_last_time` bigint(13) NOT NULL DEFAULT 0 COMMENT '上次调度时间',
`trigger_next_time` bigint(13) NOT NULL DEFAULT 0 COMMENT '下次调度时间',
`job_json` text CHARACTER SET utf8 COLLATE utf8_general_ci NULL COMMENT 'datax运行脚本',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 7 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for job_jdbc_datasource
-- ----------------------------
DROP TABLE IF EXISTS `job_jdbc_datasource`;
CREATE TABLE `job_jdbc_datasource` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增主键',
`datasource_name` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '数据源名称',
`datasource_group` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT 'Default' COMMENT '数据源分组',
`jdbc_username` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '用户名',
`jdbc_password` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '密码',
`jdbc_url` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT 'jdbc url',
`jdbc_driver_class` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'jdbc驱动类',
`status` tinyint(1) NOT NULL DEFAULT 1 COMMENT '状态0删除 1启用 2禁用',
`create_by` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '创建人',
`create_date` datetime(0) NULL DEFAULT CURRENT_TIMESTAMP(0) COMMENT '创建时间',
`update_by` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '更新人',
`update_date` datetime(0) NULL DEFAULT NULL COMMENT '更新时间',
`comments` varchar(1000) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '备注',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 6 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci COMMENT = 'jdbc数据源配置' ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for job_lock
-- ----------------------------
DROP TABLE IF EXISTS `job_lock`;
CREATE TABLE `job_lock` (
`lock_name` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '锁名称',
PRIMARY KEY (`lock_name`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Records of job_lock
-- ----------------------------
INSERT INTO `job_lock` VALUES ('schedule_lock');
-- ----------------------------
-- Table structure for job_log
-- ----------------------------
DROP TABLE IF EXISTS `job_log`;
CREATE TABLE `job_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`job_desc` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
`executor_address` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器地址,本次执行的地址',
`executor_handler` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务参数',
`executor_sharding_param` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2',
`executor_fail_retry_count` int(11) NULL DEFAULT 0 COMMENT '失败重试次数',
`trigger_time` datetime(0) NULL DEFAULT NULL COMMENT '调度-时间',
`trigger_code` int(11) NOT NULL COMMENT '调度-结果',
`trigger_msg` text CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL COMMENT '调度-日志',
`handle_time` datetime(0) NULL DEFAULT NULL COMMENT '执行-时间',
`handle_code` int(11) NOT NULL COMMENT '执行-状态',
`handle_msg` text CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL COMMENT '执行-日志',
`alarm_status` tinyint(4) NOT NULL DEFAULT 0 COMMENT '告警状态0-默认、1-无需告警、2-告警成功、3-告警失败',
`process_id` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'datax进程Id',
`max_id` bigint(20) NULL DEFAULT NULL COMMENT '增量表max id',
PRIMARY KEY (`id`) USING BTREE,
INDEX `I_trigger_time`(`trigger_time`) USING BTREE,
INDEX `I_handle_code`(`handle_code`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 0 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for job_log_report
-- ----------------------------
DROP TABLE IF EXISTS `job_log_report`;
CREATE TABLE `job_log_report` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`trigger_day` datetime(0) NULL DEFAULT NULL COMMENT '调度-时间',
`running_count` int(11) NOT NULL DEFAULT 0 COMMENT '运行中-日志数量',
`suc_count` int(11) NOT NULL DEFAULT 0 COMMENT '执行成功-日志数量',
`fail_count` int(11) NOT NULL DEFAULT 0 COMMENT '执行失败-日志数量',
PRIMARY KEY (`id`) USING BTREE,
UNIQUE INDEX `i_trigger_day`(`trigger_day`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 28 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Records of job_log_report
-- ----------------------------
INSERT INTO `job_log_report` VALUES (20, '2019-12-07 00:00:00', 0, 0, 0);
INSERT INTO `job_log_report` VALUES (21, '2019-12-10 00:00:00', 77, 52, 23);
INSERT INTO `job_log_report` VALUES (22, '2019-12-11 00:00:00', 9, 2, 11);
INSERT INTO `job_log_report` VALUES (23, '2019-12-13 00:00:00', 9, 48, 74);
INSERT INTO `job_log_report` VALUES (24, '2019-12-12 00:00:00', 10, 8, 30);
INSERT INTO `job_log_report` VALUES (25, '2019-12-14 00:00:00', 78, 45, 66);
INSERT INTO `job_log_report` VALUES (26, '2019-12-15 00:00:00', 24, 76, 9);
INSERT INTO `job_log_report` VALUES (27, '2019-12-16 00:00:00', 23, 85, 10);
-- ----------------------------
-- Table structure for job_logglue
-- ----------------------------
DROP TABLE IF EXISTS `job_logglue`;
CREATE TABLE `job_logglue` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`glue_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL COMMENT 'GLUE源代码',
`glue_remark` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT 'GLUE备注',
`add_time` datetime(0) NULL DEFAULT NULL,
`update_time` datetime(0) NULL DEFAULT NULL,
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for job_registry
-- ----------------------------
DROP TABLE IF EXISTS `job_registry`;
CREATE TABLE `job_registry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`registry_group` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
`registry_key` varchar(191) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
`registry_value` varchar(191) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
`update_time` datetime(0) NULL DEFAULT NULL,
PRIMARY KEY (`id`) USING BTREE,
INDEX `i_g_k_v`(`registry_group`, `registry_key`, `registry_value`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 26 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for job_user
-- ----------------------------
DROP TABLE IF EXISTS `job_user`;
CREATE TABLE `job_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '账号',
`password` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '密码',
`role` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '角色0-普通用户、1-管理员',
`permission` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '权限执行器ID列表多个逗号分割',
PRIMARY KEY (`id`) USING BTREE,
UNIQUE INDEX `i_username`(`username`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 10 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Records of job_user
-- ----------------------------
INSERT INTO `job_user` VALUES (1, 'admin', '$2a$10$2KCqRbra0Yn2TwvkZxtfLuWuUP5KyCWsljO/ci5pLD27pqR3TV1vy', 'ROLE_ADMIN', NULL);
/**
v2.1.1脚本更新
*/
ALTER TABLE `job_info`
ADD COLUMN `replace_param` VARCHAR(100) NULL DEFAULT NULL COMMENT '动态参数' AFTER `job_json`,
ADD COLUMN `jvm_param` VARCHAR(200) NULL DEFAULT NULL COMMENT 'jvm参数' AFTER `replace_param`,
ADD COLUMN `time_offset` INT(11) NULL DEFAULT '0'COMMENT '时间偏移量' AFTER `jvm_param`;
/**
增量改版脚本更新
*/
ALTER TABLE `job_info` DROP COLUMN `time_offset`;
ALTER TABLE `job_info`
ADD COLUMN `inc_start_time` DATETIME NULL DEFAULT NULL COMMENT '增量初始时间' AFTER `jvm_param`;
-- ----------------------------
-- Table structure for job_template
-- ----------------------------
DROP TABLE IF EXISTS `job_template`;
CREATE TABLE `job_template` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_cron` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '任务执行CRON',
`job_desc` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
`add_time` datetime(0) NULL DEFAULT NULL,
`update_time` datetime(0) NULL DEFAULT NULL,
`user_id` int(11) NOT NULL COMMENT '修改用户',
`alarm_email` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '报警邮件',
`executor_route_strategy` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器路由策略',
`executor_handler` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '执行器参数',
`executor_block_strategy` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '阻塞处理策略',
`executor_timeout` int(11) NOT NULL DEFAULT 0 COMMENT '任务执行超时时间,单位秒',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT 0 COMMENT '失败重试次数',
`glue_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL COMMENT 'GLUE源代码',
`glue_remark` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'GLUE备注',
`glue_updatetime` datetime(0) NULL DEFAULT NULL COMMENT 'GLUE更新时间',
`child_jobid` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '子任务ID多个逗号分隔',
`trigger_last_time` bigint(13) NOT NULL DEFAULT 0 COMMENT '上次调度时间',
`trigger_next_time` bigint(13) NOT NULL DEFAULT 0 COMMENT '下次调度时间',
`job_json` text CHARACTER SET utf8 COLLATE utf8_general_ci NULL COMMENT 'datax运行脚本',
`jvm_param` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'jvm参数',
`project_id` int(11) NULL DEFAULT NULL COMMENT '所属项目Id',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 22 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
/**
添加数据源字段
*/
ALTER TABLE `job_jdbc_datasource`
ADD COLUMN `datasource` VARCHAR(45) NOT NULL COMMENT '数据源' AFTER `datasource_name`;
/**
添加分区字段
*/
ALTER TABLE `job_info`
ADD COLUMN `partition_info` VARCHAR(100) NULL DEFAULT NULL COMMENT '分区信息' AFTER `inc_start_time`;
/**
2.1.1版本新增----------------------------------------------------------------------------------------------
*/
/**
最近一次执行状态
*/
ALTER TABLE `job_info`
ADD COLUMN `last_handle_code` INT(11) NULL DEFAULT '0' COMMENT '最近一次执行状态' AFTER `partition_info`;
/**
zookeeper地址
*/
ALTER TABLE `job_jdbc_datasource`
ADD COLUMN `zk_adress` VARCHAR(200) NULL DEFAULT NULL AFTER `jdbc_driver_class`;
ALTER TABLE `job_info`
CHANGE COLUMN `executor_timeout` `executor_timeout` INT(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位分钟' ;
/**
用户名密码改为非必填
*/
ALTER TABLE `job_jdbc_datasource`
CHANGE COLUMN `jdbc_username` `jdbc_username` VARCHAR(100) CHARACTER SET 'utf8mb4' NULL DEFAULT NULL COMMENT '用户名' ,
CHANGE COLUMN `jdbc_password` `jdbc_password` VARCHAR(200) CHARACTER SET 'utf8mb4' NULL DEFAULT NULL COMMENT '密码' ;
/**
添加mongodb数据库名字段
*/
ALTER TABLE `job_jdbc_datasource`
ADD COLUMN `database_name` VARCHAR(45) NULL DEFAULT NULL COMMENT '数据库名' AFTER `datasource_group`;
/**
添加执行器资源字段
*/
ALTER TABLE `job_registry`
ADD COLUMN `cpu_usage` DOUBLE NULL AFTER `registry_value`,
ADD COLUMN `memory_usage` DOUBLE NULL AFTER `cpu_usage`,
ADD COLUMN `load_average` DOUBLE NULL AFTER `memory_usage`;
-- ----------------------------
-- Table structure for job_permission
-- ----------------------------
DROP TABLE IF EXISTS `job_permission`;
CREATE TABLE `job_permission` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`name` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '权限名',
`description` varchar(11) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '权限描述',
`url` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
`pid` int(11) NULL DEFAULT NULL,
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 3 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
ALTER TABLE `job_info`
ADD COLUMN `replace_param_type` varchar(255) NULL COMMENT '增量时间格式' AFTER `last_handle_code`;
ALTER TABLE `job_info`
ADD COLUMN `project_id` int(11) NULL COMMENT '所属项目id' AFTER `job_desc`;
ALTER TABLE `job_info`
ADD COLUMN `reader_table` VARCHAR(255) NULL COMMENT 'reader表名称' AFTER `replace_param_type`,
ADD COLUMN `primary_key` VARCHAR(50) NULL COMMENT '增量表主键' AFTER `reader_table`,
ADD COLUMN `inc_start_id` VARCHAR(20) NULL COMMENT '增量初始id' AFTER `primary_key`,
ADD COLUMN `increment_type` TINYINT(4) NULL COMMENT '增量类型' AFTER `inc_start_id`,
ADD COLUMN `datasource_id` BIGINT(11) NULL COMMENT '数据源id' AFTER `increment_type`;
CREATE TABLE `job_project` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT 'project name',
`description` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
`user_id` int(11) NULL DEFAULT NULL COMMENT 'creator id',
`flag` tinyint(4) NULL DEFAULT 1 COMMENT '0 not available, 1 available',
`create_time` datetime(0) NULL DEFAULT CURRENT_TIMESTAMP(0) COMMENT 'create time',
`update_time` datetime(0) NULL DEFAULT CURRENT_TIMESTAMP(0) COMMENT 'update time',
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
ALTER TABLE `job_info`
CHANGE COLUMN `author` `user_id` INT(11) NOT NULL COMMENT '修改用户' ;
ALTER TABLE `job_info`
CHANGE COLUMN `increment_type` `increment_type` TINYINT(4) NULL DEFAULT 0 COMMENT '增量类型' ;

10
fastsdcpu/Dockerfile Normal file
View File

@@ -0,0 +1,10 @@
FROM python:3.11
LABEL maintainer="Offends <offends4@163.com>"
RUN apt update \
&& apt-get install ffmpeg -y
COPY ./start.sh /start.sh
CMD ["/start.sh"]

View File

@@ -0,0 +1,5 @@
FROM nginx:alpine-slim
COPY ./default.conf /etc/nginx/conf.d/default.conf
EXPOSE 80

38
fastsdcpu/README.md Normal file
View File

@@ -0,0 +1,38 @@
# Fastsdcpu-docker
1. 构建基础镜像
```bash
docker build -t app:v1 .
```
2. 启动容器
```bash
docker run -it --name fastsdcpu --net=host -v /root/fastsdcpu:/fastsdcpu -v /root/.cache:/root/.cache app:v1.0 bash
```
3. 初始化文件
```bash
cd /fastsdcpu && git init
git remote add origin https://github.com/rupeshs/fastsdcpu.git
git pull origin main
```
4. 初始化数据
```bash
chmod +x install.sh && ./install.sh
```
```bash
./start.sh
```
```bash
./start-webui.sh
```
> 启动成功后访问页面, 生成一张图片以便下载 cache 数据

15
fastsdcpu/default.conf Normal file
View File

@@ -0,0 +1,15 @@
server {
listen 80;
server_name default;
location / {
proxy_pass http://127.0.0.1:7860;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

37
fastsdcpu/start.sh Normal file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
# 初始化文件夹
# 判断 /root/.cache 目录是否存在
if [ ! "$(ls -A /root/.cache)" ]; then
cd /
wget https://rainbond-pkg.oss-cn-shanghai.aliyuncs.com/rainstore/cache-nobody.tar
echo "################################################################"
echo "正在解压主体文件,文件大小 5.39G, 预计5分钟左右,请稍等..."
echo "################################################################"
tar -xvf cache-nobody.tar -C /root/ > /dev/null 2>&1
rm -rf /cache-nobody.tar
echo "################################################################"
echo "文件准备完成"
echo "################################################################"
fi
# 判断 /fastsdcpu 目录下是否有文件
if [ ! "$(ls -A /fastsdcpu)" ]; then
cd /
wget https://rainbond-pkg.oss-cn-shanghai.aliyuncs.com/rainstore/fastsdcpu-nobody.tar
echo "################################################################"
echo "正在解压主体文件,文件大小 2.64G, 预计5分钟左右,请稍等..."
echo "################################################################"
tar -xvf fastsdcpu-nobody.tar -C / > /dev/null 2>&1
rm -rf /fastsdcpu-nobody.tar
echo "################################################################"
echo "文件准备完成"
echo "################################################################"
fi
# 切换目录
cd /fastsdcpu
# 启动Web服务
./start-webui.sh

20
flarum/Dockerfile Normal file
View File

@@ -0,0 +1,20 @@
FROM mondedie/flarum:latest
LABEL maintainer="Offends <offends4@163.com>"
ENV DB_HOST=127.0.0.1
ENV DB_NAME=root
ENV DB_PASS=root
ENV DB_PORT=3306
ENV DB_PREF=flarum_
ENV DB_USER=flarum
ENV OPCACHE_MEMORY_LIMIT=512
ENV PHP_MEMORY_LIMIT=512M
ENV FLARUM_ADMIN_MAIL=admin@admin.com
ENV FLARUM_ADMIN_PASS=admin
ENV FLARUM_ADMIN_USER=admin
ENV FLARUM_TITLE=社区
ENV FORUM_URL=
RUN composer require flarum-lang/chinese-simplified \
&& php flarum cache:clear

View File

@@ -0,0 +1,25 @@
# Golang CircleCI 2.0 configuration file
#
# Check https://circleci.com/docs/2.0/language-go/ for more details
version: 2
jobs:
build:
docker:
- image: circleci/golang:1.10
working_directory: /go/src/github.com/AliyunContainerService/gpushare-device-plugin
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- run:
name: run tests
command: |
test -z $(go fmt ./...)
go vet ./...
go test -race -v ./...
- run: docker build -t acs/gpushare-device-plugin:$CIRCLE_BUILD_NUM .
- run:
name: codecov
command: |
go test -race -coverprofile=coverage.txt -covermode=atomic ./...
bash <(curl -s https://codecov.io/bash)

View File

@@ -0,0 +1,18 @@
language: go
go:
- "1.10"
go_import_path: github.com/AliyunContainerService/gpushare-device-plugin
# let us have speedy Docker-based Travis workers
sudo: false
env:
- CGO_LDFLAGS_ALLOW='-Wl,--unresolved-symbols=ignore-in-object-files'
script:
- go build -ldflags="-s -w" -o gpushare-device-plugin cmd/nvidia/main.go
- go build -o kubectl-inspect-gpushare cmd/inspect/*.go
- go vet ./...
- go test ./...

View File

@@ -0,0 +1,20 @@
FROM golang:1.10-stretch as build
WORKDIR /go/src/github.com/AliyunContainerService/gpushare-device-plugin
COPY . .
RUN export CGO_LDFLAGS_ALLOW='-Wl,--unresolved-symbols=ignore-in-object-files' && \
go build -ldflags="-s -w" -o /go/bin/gpushare-device-plugin-v2 cmd/nvidia/main.go
RUN go build -o /go/bin/kubectl-inspect-gpushare-v2 cmd/inspect/*.go
FROM debian:bullseye-slim
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=utility
COPY --from=build /go/bin/gpushare-device-plugin-v2 /usr/bin/gpushare-device-plugin-v2
COPY --from=build /go/bin/kubectl-inspect-gpushare-v2 /usr/bin/kubectl-inspect-gpushare-v2
CMD ["gpushare-device-plugin-v2","-logtostderr"]

591
gpushare-device-plugin/Gopkg.lock generated Normal file
View File

@@ -0,0 +1,591 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:36d1549972d1ebac59f3b15708485666761d8051f675776193ccb1be3cb6ea84"
name = "github.com/NVIDIA/gpu-monitoring-tools"
packages = ["bindings/go/nvml"]
pruneopts = "UT"
revision = "86f2a9fac6c5b597dc494420005144b8ef7ec9fb"
[[projects]]
digest = "1:179992ae7637e3aa978ab9f27e9a32b1d452dc4ae443037a34d5e08aa4ca23e0"
name = "github.com/NVIDIA/nvidia-docker"
packages = ["src/nvml"]
pruneopts = "UT"
revision = "01d2c9436620d7dde4672e414698afe6da4a282f"
version = "v1.0.1"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "UT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:db238461f652ddb7c7057bc6fc503f6003a29987b1485ecbb96d92287db65bc9"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
]
pruneopts = "UT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
branch = "master"
digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "UT"
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "UT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
branch = "master"
digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "UT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions",
]
pruneopts = "UT"
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
version = "v0.2.0"
[[projects]]
digest = "1:878f0defa9b853f9acfaf4a162ba450a89d0050eff084f9fe7f5bd15948f172a"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "UT"
revision = "787624de3eb7bd915c329cba748687a3b22666a6"
[[projects]]
digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "UT"
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
version = "v0.5.0"
[[projects]]
digest = "1:8eb1de8112c9924d59bf1d3e5c26f5eaa2bfc2a5fcbb92dc1c2e4546d695f277"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "UT"
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
version = "v0.3.6"
[[projects]]
digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "UT"
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
version = "v1.1.5"
[[projects]]
digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "UT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "UT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
branch = "master"
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = "UT"
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:0e7775ebbcf00d8dd28ac663614af924411c868dca3d5aa762af0fae3808d852"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = "UT"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "release-branch.go1.11"
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "UT"
revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4"
[[projects]]
branch = "release-branch.go1.10"
digest = "1:677a5d026fcc0c1e764f2d688a363316e3b157cec39b8fbaa0c1a1796aac3643"
name = "golang.org/x/net"
packages = [
"context",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace",
]
pruneopts = "UT"
revision = "0ed95abb35c445290478a5348a7b38bb154135fd"
[[projects]]
branch = "release-branch.go1.11"
digest = "1:a60cae5be8993938498243605b120290533a5208fd5cac81c932afbad3642fb0"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "UT"
revision = "98c5dad5d1a0e8a73845ecc8897d0bd56586511d"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
digest = "1:d37b0ef2944431fe9e8ef35c6fffc8990d9e2ca300588df94a6890f3649ae365"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "UT"
revision = "f51c12702a4d776e4c1fa9b0fabab841babae631"
[[projects]]
branch = "jba-regen"
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "42f80515abfed431577ebeded4ab86390ce0a5cd"
[[projects]]
digest = "1:c3ad9841823db6da420a5625b367913b4ff54bbe60e8e3c98bd20e243e62e2d2"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = "UT"
revision = "2e463a05d100327ca47ac218281906921038fd95"
version = "v1.16.0"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "UT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
digest = "1:34ffbf9ed5e63a11e4e0aaab597dc36c552da8b5b6bd49d8f73dadd4afd7e677"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "UT"
revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa"
version = "kubernetes-1.11.2"
[[projects]]
branch = "release-1.10"
digest = "1:b46a162d7c7e9117ae2dd9a73ee4dc2181ad9ea9d505fd7c5eb63c96211dc9dd"
name = "k8s.io/apiextensions-apiserver"
packages = ["pkg/features"]
pruneopts = "UT"
revision = "f584b16eb23bd2a3fd292a027d698d95db427c5d"
[[projects]]
branch = "release-1.11"
digest = "1:33730fb514340e487c72597b579737e896b51a79117aedd6a4e24b59f3e949dc"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect",
]
pruneopts = "UT"
revision = "70adfbae261eebb795b76321790745ad0e3c523f"
[[projects]]
branch = "release-1.11"
digest = "1:c9066b5ef02608a8e66766c658bb3f306ca1880855a53d34252ed0f5fb73ea19"
name = "k8s.io/apiserver"
packages = [
"pkg/features",
"pkg/util/feature",
]
pruneopts = "UT"
revision = "13cfe3978170675900fbed4994382716c5bd293b"
[[projects]]
digest = "1:b4189450e71f1c5b629c8c110e8e6a80d9703047f4fd357f214c0fb2cfb0d398"
name = "k8s.io/client-go"
packages = [
"discovery",
"informers",
"informers/admissionregistration",
"informers/admissionregistration/v1alpha1",
"informers/admissionregistration/v1beta1",
"informers/apps",
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
"informers/events/v1beta1",
"informers/extensions",
"informers/extensions/v1beta1",
"informers/internalinterfaces",
"informers/networking",
"informers/networking/v1",
"informers/policy",
"informers/policy/v1beta1",
"informers/rbac",
"informers/rbac/v1",
"informers/rbac/v1alpha1",
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/scheduling/v1beta1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
"informers/storage/v1",
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
"listers/networking/v1",
"listers/policy/v1beta1",
"listers/rbac/v1",
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/scheduling/v1beta1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"rest",
"rest/watch",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/retry",
]
pruneopts = "UT"
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "v8.0.0"
[[projects]]
branch = "feature-serverside-apply"
digest = "1:e0d6dcb28c42a53c7243bb6380badd17f92fbd8488a075a07e984f91a07c0d23"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "UT"
revision = "f442ecb314a3679150c272e2b9713d8deed5955d"
[[projects]]
digest = "1:b7a71a411d440a0b7b4ecdc7ac31b56b976451492705a2efc8413427410f5397"
name = "k8s.io/kubernetes"
packages = [
"pkg/features",
"pkg/kubelet/apis",
"pkg/kubelet/apis/deviceplugin/v1beta1",
"pkg/util/node",
]
pruneopts = "UT"
revision = "435f92c719f279a3a67808c80521ea17d5715c66"
version = "v1.12.3"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml",
"github.com/NVIDIA/nvidia-docker/src/nvml",
"github.com/fsnotify/fsnotify",
"github.com/golang/glog",
"golang.org/x/net/context",
"google.golang.org/grpc",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/api/resource",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/fields",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/types",
"k8s.io/client-go/informers",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/listers/core/v1",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1",
"k8s.io/kubernetes/pkg/util/node",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -0,0 +1,66 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
revision = "86f2a9fac6c5b597dc494420005144b8ef7ec9fb"
name = "github.com/NVIDIA/gpu-monitoring-tools"
[[constraint]]
name = "k8s.io/kubernetes"
version = "v1.11.2"
[[constraint]]
name = "k8s.io/apimachinery"
branch = "release-1.11"
[[constraint]]
name = "k8s.io/client-go"
version = "~v8.0.0"
[[override]]
name = "k8s.io/api"
version = "kubernetes-1.11.2"
[[override]]
name = "github.com/gregjones/httpcache"
revision = "787624de3eb7bd915c329cba748687a3b22666a6"
[[override]]
name = "golang.org/x/time"
revision = "f51c12702a4d776e4c1fa9b0fabab841babae631"
[[override]]
name = "github.com/docker/docker"
revision = "4f3616fb1c112e206b88cb7a9922bf49067a7756"
[[override]]
name = "github.com/docker/distribution"
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
[prune]
go-tests = true
unused-packages = true

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,15 @@
# GPU Sharing Device Plugin in Kuberntes
[![CircleCI](https://circleci.com/gh/AliyunContainerService/gpushare-device-plugin.svg?style=svg)](https://circleci.com/gh/AliyunContainerService/gpushare-device-plugin)
[![Build Status](https://travis-ci.org/AliyunContainerService/gpushare-device-plugin.svg?branch=master)](https://travis-ci.org/AliyunContainerService/gpushare-device-plugin)
[![Go Report Card](https://goreportcard.com/badge/github.com/AliyunContainerService/gpushare-device-plugin)](https://goreportcard.com/report/github.com/AliyunContainerService/gpushare-device-plugin)
## About
The Nvidia GPU sharing device plugin for Kubernetes is a Daemonset that allows you to automatically:
- Expose the GPU Memory and GPU count on the node of your cluster
- Run GPU sharing enabled containers in your Kubernetes cluster.
For more info, please refer [gpusharing scheduler extender](https://github.com/AliyunContainerService/gpushare-scheduler-extender)

View File

@@ -0,0 +1,255 @@
package main
import (
"bytes"
"fmt"
"os"
"strconv"
"text/tabwriter"
log "github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
func displayDetails(nodeInfos []*NodeInfo) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var (
totalGPUMemInCluster int64
usedGPUMemInCluster int64
prtLineLen int
)
for _, nodeInfo := range nodeInfos {
address := "unknown"
if len(nodeInfo.node.Status.Addresses) > 0 {
//address = nodeInfo.node.Status.Addresses[0].Address
for _, addr := range nodeInfo.node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
address = addr.Address
break
}
}
}
totalGPUMemInNode := nodeInfo.gpuTotalMemory
if totalGPUMemInNode <= 0 {
continue
}
fmt.Fprintf(w, "\n")
fmt.Fprintf(w, "NAME:\t%s\n", nodeInfo.node.Name)
fmt.Fprintf(w, "IPADDRESS:\t%s\n", address)
fmt.Fprintf(w, "\n")
usedGPUMemInNode := 0
var buf bytes.Buffer
buf.WriteString("NAME\tNAMESPACE\t")
for i := 0; i < nodeInfo.gpuCount; i++ {
buf.WriteString(fmt.Sprintf("GPU%d(Allocated)\t", i))
}
if nodeInfo.hasPendingGPUMemory() {
buf.WriteString("Pending(Allocated)\t")
}
buf.WriteString("\n")
fmt.Fprintf(w, buf.String())
var buffer bytes.Buffer
exists := map[types.UID]bool{}
for i, dev := range nodeInfo.devs {
usedGPUMemInNode += dev.usedGPUMem
for _, pod := range dev.pods {
if _,ok := exists[pod.UID]; ok {
continue
}
buffer.WriteString(fmt.Sprintf("%s\t%s\t", pod.Name, pod.Namespace))
count := nodeInfo.gpuCount
if nodeInfo.hasPendingGPUMemory() {
count += 1
}
for k := 0; k < count; k++ {
allocation := GetAllocation(&pod)
if len(allocation) != 0 {
buffer.WriteString(fmt.Sprintf("%d\t", allocation[k]))
continue
}
if k == i || (i == -1 && k == nodeInfo.gpuCount) {
buffer.WriteString(fmt.Sprintf("%d\t", getGPUMemoryInPod(pod)))
} else {
buffer.WriteString("0\t")
}
}
buffer.WriteString("\n")
exists[pod.UID] = true
}
}
if prtLineLen == 0 {
prtLineLen = buffer.Len() + 10
}
fmt.Fprintf(w, buffer.String())
var gpuUsageInNode float64 = 0
if totalGPUMemInNode > 0 {
gpuUsageInNode = float64(usedGPUMemInNode) / float64(totalGPUMemInNode) * 100
} else {
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "Allocated :\t%d (%d%%)\t\n", usedGPUMemInNode, int64(gpuUsageInNode))
fmt.Fprintf(w, "Total :\t%d \t\n", nodeInfo.gpuTotalMemory)
// fmt.Fprintf(w, "-----------------------------------------------------------------------------------------\n")
var prtLine bytes.Buffer
for i := 0; i < prtLineLen; i++ {
prtLine.WriteString("-")
}
prtLine.WriteString("\n")
fmt.Fprintf(w, prtLine.String())
totalGPUMemInCluster += int64(totalGPUMemInNode)
usedGPUMemInCluster += int64(usedGPUMemInNode)
}
fmt.Fprintf(w, "\n")
fmt.Fprintf(w, "\n")
fmt.Fprintf(w, "Allocated/Total GPU Memory In Cluster:\t")
log.V(2).Infof("gpu: %s, allocated GPU Memory %s", strconv.FormatInt(totalGPUMemInCluster, 10),
strconv.FormatInt(usedGPUMemInCluster, 10))
var gpuUsage float64 = 0
if totalGPUMemInCluster > 0 {
gpuUsage = float64(usedGPUMemInCluster) / float64(totalGPUMemInCluster) * 100
}
fmt.Fprintf(w, "%s/%s (%d%%)\t\n",
strconv.FormatInt(usedGPUMemInCluster, 10),
strconv.FormatInt(totalGPUMemInCluster, 10),
int64(gpuUsage))
// fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", ...)
_ = w.Flush()
}
func getMaxGPUCount(nodeInfos []*NodeInfo) (max int) {
for _, node := range nodeInfos {
if node.gpuCount > max {
max = node.gpuCount
}
}
return max
}
func displaySummary(nodeInfos []*NodeInfo) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var (
maxGPUCount int
totalGPUMemInCluster int64
usedGPUMemInCluster int64
prtLineLen int
)
hasPendingGPU := hasPendingGPUMemory(nodeInfos)
maxGPUCount = getMaxGPUCount(nodeInfos)
var buffer bytes.Buffer
buffer.WriteString("NAME\tIPADDRESS\t")
for i := 0; i < maxGPUCount; i++ {
buffer.WriteString(fmt.Sprintf("GPU%d(Allocated/Total)\t", i))
}
if hasPendingGPU {
buffer.WriteString("PENDING(Allocated)\t")
}
buffer.WriteString(fmt.Sprintf("GPU Memory(%s)\n", memoryUnit))
// fmt.Fprintf(w, "NAME\tIPADDRESS\tROLE\tGPU(Allocated/Total)\tPENDING(Allocated)\n")
fmt.Fprintf(w, buffer.String())
for _, nodeInfo := range nodeInfos {
address := "unknown"
if len(nodeInfo.node.Status.Addresses) > 0 {
// address = nodeInfo.node.Status.Addresses[0].Address
for _, addr := range nodeInfo.node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
address = addr.Address
break
}
}
}
gpuMemInfos := []string{}
pendingGPUMemInfo := ""
usedGPUMemInNode := 0
totalGPUMemInNode := nodeInfo.gpuTotalMemory
if totalGPUMemInNode <= 0 {
continue
}
for i := 0; i < maxGPUCount; i++ {
gpuMemInfo := "0/0"
if dev, ok := nodeInfo.devs[i]; ok {
gpuMemInfo = dev.String()
usedGPUMemInNode += dev.usedGPUMem
}
gpuMemInfos = append(gpuMemInfos, gpuMemInfo)
}
// check if there is pending dev
if dev, ok := nodeInfo.devs[-1]; ok {
pendingGPUMemInfo = fmt.Sprintf("%d", dev.usedGPUMem)
usedGPUMemInNode += dev.usedGPUMem
}
nodeGPUMemInfo := fmt.Sprintf("%d/%d", usedGPUMemInNode, totalGPUMemInNode)
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("%s\t%s\t", nodeInfo.node.Name, address))
for i := 0; i < maxGPUCount; i++ {
buf.WriteString(fmt.Sprintf("%s\t", gpuMemInfos[i]))
}
if hasPendingGPU {
buf.WriteString(fmt.Sprintf("%s\t", pendingGPUMemInfo))
}
buf.WriteString(fmt.Sprintf("%s\n", nodeGPUMemInfo))
fmt.Fprintf(w, buf.String())
if prtLineLen == 0 {
prtLineLen = buf.Len() + 20
}
usedGPUMemInCluster += int64(usedGPUMemInNode)
totalGPUMemInCluster += int64(totalGPUMemInNode)
}
// fmt.Fprintf(w, "-----------------------------------------------------------------------------------------\n")
var prtLine bytes.Buffer
for i := 0; i < prtLineLen; i++ {
prtLine.WriteString("-")
}
prtLine.WriteString("\n")
fmt.Fprint(w, prtLine.String())
fmt.Fprintf(w, "Allocated/Total GPU Memory In Cluster:\n")
log.V(2).Infof("gpu: %s, allocated GPU Memory %s", strconv.FormatInt(totalGPUMemInCluster, 10),
strconv.FormatInt(usedGPUMemInCluster, 10))
var gpuUsage float64 = 0
if totalGPUMemInCluster > 0 {
gpuUsage = float64(usedGPUMemInCluster) / float64(totalGPUMemInCluster) * 100
}
fmt.Fprintf(w, "%s/%s (%d%%)\t\n",
strconv.FormatInt(usedGPUMemInCluster, 10),
strconv.FormatInt(totalGPUMemInCluster, 10),
int64(gpuUsage))
// fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", ...)
_ = w.Flush()
}
func getGPUMemoryInPod(pod v1.Pod) int {
gpuMem := 0
for _, container := range pod.Spec.Containers {
if val, ok := container.Resources.Limits[resourceName]; ok {
gpuMem += int(val.Value())
}
}
return gpuMem
}

View File

@@ -0,0 +1,74 @@
package main
import (
"flag"
"fmt"
"os"
v1 "k8s.io/api/core/v1"
)
const (
resourceName = "rainbond.com/gpu-mem"
countName = "rainbond.com/gpu-count"
gpuCountKey = "aliyun.accelerator/nvidia_count"
cardNameKey = "aliyun.accelerator/nvidia_name"
gpuMemKey = "aliyun.accelerator/nvidia_mem"
pluginComponentKey = "component"
pluginComponentValue = "gpushare-device-plugin"
envNVGPUID = "ALIYUN_COM_GPU_MEM_IDX"
envPodGPUMemory = "ALIYUN_COM_GPU_MEM_POD"
envTOTALGPUMEMORY = "ALIYUN_COM_GPU_MEM_DEV"
gpushareAllocationFlag = "scheduler.framework.gpushare.allocation"
)
func init() {
kubeInit()
// checkpointInit()
}
func main() {
var nodeName string
// nodeName := flag.String("nodeName", "", "nodeName")
details := flag.Bool("d", false, "details")
flag.Parse()
args := flag.Args()
if len(args) > 0 {
nodeName = args[0]
}
var pods []v1.Pod
var nodes []v1.Node
var err error
if nodeName == "" {
nodes, err = getAllSharedGPUNode()
if err == nil {
pods, err = getActivePodsInAllNodes()
}
} else {
nodes, err = getNodes(nodeName)
if err == nil {
pods, err = getActivePodsByNode(nodeName)
}
}
if err != nil {
fmt.Printf("Failed due to %v", err)
os.Exit(1)
}
nodeInfos, err := buildAllNodeInfos(pods, nodes)
if err != nil {
fmt.Printf("Failed due to %v", err)
os.Exit(1)
}
if *details {
displayDetails(nodeInfos)
} else {
displaySummary(nodeInfos)
}
}

View File

@@ -0,0 +1,271 @@
package main
import (
"encoding/json"
"fmt"
"strconv"
log "github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
)
type DeviceInfo struct {
idx int
pods []v1.Pod
usedGPUMem int
totalGPUMem int
node v1.Node
}
func (d *DeviceInfo) String() string {
if d.idx == -1 {
return fmt.Sprintf("%d", d.usedGPUMem)
}
return fmt.Sprintf("%d/%d", d.usedGPUMem, d.totalGPUMem)
}
func (d *DeviceInfo) addGPUPod(pod v1.Pod) {
if len(d.pods) == 0 {
d.pods = []v1.Pod{}
}
d.pods = append(d.pods, pod)
}
type NodeInfo struct {
pods []v1.Pod
node v1.Node
devs map[int]*DeviceInfo
gpuCount int
gpuTotalMemory int
pluginPod v1.Pod
}
// The key function
func buildAllNodeInfos(allPods []v1.Pod, nodes []v1.Node) ([]*NodeInfo, error) {
nodeInfos := buildNodeInfoWithPods(allPods, nodes)
for _, info := range nodeInfos {
if info.gpuTotalMemory > 0 {
setUnit(info.gpuTotalMemory, info.gpuCount)
err := info.buildDeviceInfo()
if err != nil {
log.Warningf("Failed due to %v", err)
continue
}
}
}
return nodeInfos, nil
}
func (n *NodeInfo) acquirePluginPod() v1.Pod {
if n.pluginPod.Name == "" {
for _, pod := range n.pods {
if val, ok := pod.Labels[pluginComponentKey]; ok {
if val == pluginComponentValue {
n.pluginPod = pod
break
}
}
}
}
return n.pluginPod
}
func getTotalGPUMemory(node v1.Node) int {
val, ok := node.Status.Allocatable[resourceName]
if !ok {
return 0
}
return int(val.Value())
}
func getGPUCountInNode(node v1.Node) int {
val, ok := node.Status.Allocatable[countName]
if !ok {
return int(0)
}
return int(val.Value())
}
func buildNodeInfoWithPods(pods []v1.Pod, nodes []v1.Node) []*NodeInfo {
nodeMap := map[string]*NodeInfo{}
nodeList := []*NodeInfo{}
for _, node := range nodes {
var info *NodeInfo = &NodeInfo{}
if value, ok := nodeMap[node.Name]; ok {
info = value
} else {
nodeMap[node.Name] = info
info.node = node
info.pods = []v1.Pod{}
info.gpuCount = getGPUCountInNode(node)
info.gpuTotalMemory = getTotalGPUMemory(node)
info.devs = map[int]*DeviceInfo{}
for i := 0; i < info.gpuCount; i++ {
dev := &DeviceInfo{
pods: []v1.Pod{},
idx: i,
totalGPUMem: info.gpuTotalMemory / info.gpuCount,
node: info.node,
}
info.devs[i] = dev
}
}
for _, pod := range pods {
if pod.Spec.NodeName == node.Name {
info.pods = append(info.pods, pod)
}
}
}
for _, v := range nodeMap {
nodeList = append(nodeList, v)
}
return nodeList
}
func (n *NodeInfo) hasPendingGPUMemory() bool {
_, found := n.devs[-1]
return found
}
// Get used GPUs in checkpoint
func (n *NodeInfo) buildDeviceInfo() error {
totalGPUMem := 0
if n.gpuCount > 0 {
totalGPUMem = n.gpuTotalMemory / n.gpuCount
}
GPUSearchLoop:
for _, pod := range n.pods {
if gpuMemoryInPod(pod) <= 0 {
continue GPUSearchLoop
}
for devID, usedGPUMem := range n.getDeivceInfo(pod) {
if n.devs[devID] == nil {
n.devs[devID] = &DeviceInfo{
pods: []v1.Pod{},
idx: devID,
totalGPUMem: totalGPUMem,
node: n.node,
}
}
n.devs[devID].usedGPUMem += usedGPUMem
n.devs[devID].pods = append(n.devs[devID].pods, pod)
}
}
return nil
}
func (n *NodeInfo) getDeivceInfo(pod v1.Pod) map[int]int {
var err error
id := -1
allocation := map[int]int{}
allocation = GetAllocation(&pod)
if len(allocation) != 0 {
return allocation
}
if len(pod.ObjectMeta.Annotations) > 0 {
value, found := pod.ObjectMeta.Annotations[envNVGPUID]
if found {
id, err = strconv.Atoi(value)
if err != nil {
log.Warningf("Failed to parse dev id %s due to %v for pod %s in ns %s",
value,
err,
pod.Name,
pod.Namespace)
id = -1
}
} else {
log.Warningf("Failed to get dev id %s for pod %s in ns %s",
pod.Name,
pod.Namespace)
}
}
allocation[id] = gpuMemoryInPod(pod)
return allocation
}
func hasPendingGPUMemory(nodeInfos []*NodeInfo) (found bool) {
for _, info := range nodeInfos {
if info.hasPendingGPUMemory() {
return true
}
}
return false
}
func getNodes(nodeName string) ([]v1.Node, error) {
node, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
return []v1.Node{*node}, err
}
func isGPUSharingNode(node v1.Node) bool {
value, ok := node.Status.Allocatable[resourceName]
if ok {
ok = (int(value.Value()) > 0)
}
return ok
}
var (
memoryUnit = ""
)
func setUnit(gpuMemory, gpuCount int) {
if memoryUnit != "" {
return
}
if gpuCount == 0 {
return
}
gpuMemoryByDev := gpuMemory / gpuCount
if gpuMemoryByDev > 100 {
memoryUnit = "MiB"
} else {
memoryUnit = "GiB"
}
}
func GetAllocation(pod *v1.Pod) map[int]int {
podGPUMems := map[int]int{}
allocationString := ""
if pod.ObjectMeta.Annotations == nil {
return podGPUMems
}
value, ok := pod.ObjectMeta.Annotations[gpushareAllocationFlag]
if !ok {
return podGPUMems
}
allocationString = value
var allocation map[int]map[string]int
err := json.Unmarshal([]byte(allocationString), &allocation)
if err != nil {
return podGPUMems
}
for _, containerAllocation := range allocation {
for id, gpuMem := range containerAllocation {
gpuIndex, err := strconv.Atoi(id)
if err != nil {
log.Errorf("failed to get gpu memory from pod annotation,reason: %v", err)
return map[int]int{}
}
podGPUMems[gpuIndex] += gpuMem
}
}
return podGPUMems
}

View File

@@ -0,0 +1,134 @@
package main
import (
"fmt"
"os"
"path"
"time"
log "github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
clientConfig clientcmd.ClientConfig
clientset *kubernetes.Clientset
restConfig *rest.Config
retries = 5
)
func kubeInit() {
kubeconfigFile := os.Getenv("KUBECONFIG")
if kubeconfigFile == "" {
kubeconfigFile = path.Join(os.Getenv("HOME"), "/.kube/config")
}
if _, err := os.Stat(kubeconfigFile); err != nil {
log.Fatalf("kubeconfig %s failed to find due to %v, please set KUBECONFIG env", kubeconfigFile, err)
}
var err error
restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile)
if err != nil {
log.Fatalf("Failed due to %v", err)
}
clientset, err = kubernetes.NewForConfig(restConfig)
if err != nil {
log.Fatalf("Failed due to %v", err)
}
}
type podInfo struct {
name string
namespace string
}
func (p podInfo) equal(p1 podInfo) bool {
return p.name == p1.name && p.namespace == p1.namespace
}
func getActivePodsByNode(nodeName string) ([]v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
for i := 0; i < retries && err != nil; i++ {
pods, err = clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return []v1.Pod{}, fmt.Errorf("failed to get Pods in node %v", nodeName)
}
return filterActivePods(pods.Items), nil
}
func getActivePodsInAllNodes() ([]v1.Pod, error) {
pods, err := clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
LabelSelector: labels.Everything().String(),
})
for i := 0; i < retries && err != nil; i++ {
pods, err = clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
LabelSelector: labels.Everything().String(),
})
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return []v1.Pod{}, fmt.Errorf("failed to get Pods")
}
return filterActivePods(pods.Items), nil
}
func filterActivePods(pods []v1.Pod) (activePods []v1.Pod) {
activePods = []v1.Pod{}
for _, pod := range pods {
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
continue
}
activePods = append(activePods, pod)
}
return activePods
}
func getAllSharedGPUNode() ([]v1.Node, error) {
nodes := []v1.Node{}
allNodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nodes, err
}
for _, item := range allNodes.Items {
if isGPUSharingNode(item) {
nodes = append(nodes, item)
}
}
return nodes, nil
}
func gpuMemoryInPod(pod v1.Pod) int {
var total int
containers := pod.Spec.Containers
for _, container := range containers {
if val, ok := container.Resources.Limits[resourceName]; ok {
total += int(val.Value())
}
}
return total
}

View File

@@ -0,0 +1,78 @@
package main
import (
"flag"
"fmt"
"io/ioutil"
"time"
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/gpu/nvidia"
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/kubelet/client"
log "github.com/golang/glog"
"k8s.io/client-go/rest"
)
var (
mps = flag.Bool("mps", false, "Enable or Disable MPS")
healthCheck = flag.Bool("health-check", false, "Enable or disable Health check")
memoryUnit = flag.String("memory-unit", "GiB", "Set memoryUnit of the GPU Memroy, support 'GiB' and 'MiB'")
queryFromKubelet = flag.Bool("query-kubelet", false, "Query pending pods from kubelet instead of kube-apiserver")
kubeletAddress = flag.String("kubelet-address", "0.0.0.0", "Kubelet IP Address")
kubeletPort = flag.Uint("kubelet-port", 10250, "Kubelet listened Port")
clientCert = flag.String("client-cert", "", "Kubelet TLS client certificate")
clientKey = flag.String("client-key", "", "Kubelet TLS client key")
token = flag.String("token", "", "Kubelet client bearer token")
timeout = flag.Int("timeout", 10, "Kubelet client http timeout duration")
)
func buildKubeletClient() *client.KubeletClient {
if *clientCert == "" && *clientKey == "" && *token == "" {
tokenByte, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
panic(fmt.Errorf("in cluster mode, find token failed, error: %v", err))
}
tokenStr := string(tokenByte)
token = &tokenStr
}
kubeletClient, err := client.NewKubeletClient(&client.KubeletClientConfig{
Address: *kubeletAddress,
Port: *kubeletPort,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
ServerName: "gpushare-device-plugin",
CertFile: *clientCert,
KeyFile: *clientKey,
},
BearerToken: *token,
HTTPTimeout: time.Duration(*timeout) * time.Second,
})
if err != nil {
panic(err)
}
return kubeletClient
}
func main() {
flag.Parse()
log.V(1).Infoln("Start gpushare device plugin")
kubeletClient := buildKubeletClient()
ngm := nvidia.NewSharedGPUManager(*mps, *healthCheck, *queryFromKubelet, translatememoryUnits(*memoryUnit), kubeletClient)
err := ngm.Run()
if err != nil {
log.Fatalf("Failed due to %v", err)
}
}
func translatememoryUnits(value string) nvidia.MemoryUnit {
memoryUnit := nvidia.MemoryUnit(value)
switch memoryUnit {
case nvidia.MiBPrefix:
case nvidia.GiBPrefix:
default:
log.Warningf("Unsupported memory unit: %s, use memoryUnit Gi as default", value)
memoryUnit = nvidia.GiBPrefix
}
return memoryUnit
}

View File

@@ -0,0 +1,57 @@
package main
import (
"flag"
"fmt"
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/kubelet/client"
"io/ioutil"
"k8s.io/client-go/rest"
"time"
)
var (
clientCert string
clientKey string
token string
timeout int
)
func main() {
flag.StringVar(&clientCert, "client-cert", "", "")
flag.StringVar(&clientKey, "client-key", "", "")
flag.StringVar(&token, "token", "", "")
flag.IntVar(&timeout, "timeout", 10, "")
flag.Parse()
if clientCert == "" && clientKey == "" && token == "" {
tokenByte, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
panic(fmt.Errorf("in cluster mode, find token failed, error: %v", err))
}
token = string(tokenByte)
}
c, err := client.NewKubeletClient(&client.KubeletClientConfig{
Address: "127.0.0.1",
Port: 10250,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
ServerName: "kubelet",
CertFile: clientCert,
KeyFile: clientKey,
},
BearerToken: token,
HTTPTimeout: time.Duration(timeout) * time.Second,
})
if err != nil {
fmt.Println(err)
return
}
podsList, err := c.GetNodeRunningPods()
if err != nil {
fmt.Println(err)
return
}
fmt.Println(podsList)
}

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Service
metadata:
name: binpack-1
labels:
app: binpack-1
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: binpack-1
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: binpack-1
labels:
app: binpack-1
spec:
replicas: 3
serviceName: "binpack-1"
selector: # define how the deployment finds the pods it mangages
matchLabels:
app: binpack-1
template: # define the pods specifications
metadata:
labels:
app: binpack-1
spec:
containers:
- name: binpack-1
image: cheyang/gpu-player:v2
resources:
limits:
# GiB
aliyun.com/gpu-mem: 2

View File

@@ -0,0 +1,17 @@
apiVersion: batch/v1
kind: Job
metadata:
name: gpu-job
spec:
backoffLimit: 0
template:
spec:
containers:
- name: gpu-job
image: alpine:3.6
resources:
limits:
# GiB
aliyun.com/gpu-mem: 2
command: ["sleep","30s"]
restartPolicy: Never

View File

@@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: gpushare-device-plugin-ds
namespace: kube-system
spec:
selector:
matchLabels:
component: gpushare-device-plugin
app: gpushare
name: gpushare-device-plugin-ds
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
component: gpushare-device-plugin
app: gpushare
name: gpushare-device-plugin-ds
spec:
serviceAccount: gpushare-device-plugin
hostNetwork: true
nodeSelector:
gpushare: "true"
containers:
- image: registry.cn-hangzhou.aliyuncs.com/offends/rainbond:gpushare-device-plugin
name: gpushare
# Make this pod as Guaranteed pod which will never be evicted because of node's resource consumption.
command:
- gpushare-device-plugin-v2
- -logtostderr
- --v=5
- --memory-unit=GiB
resources:
limits:
memory: "300Mi"
cpu: "1"
requests:
memory: "300Mi"
cpu: "1"
env:
- name: KUBECONFIG
value: /etc/kubernetes/kubelet.conf
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins

View File

@@ -0,0 +1,60 @@
# rbac.yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: gpushare-device-plugin
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- update
- patch
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gpushare-device-plugin
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: gpushare-device-plugin
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gpushare-device-plugin
subjects:
- kind: ServiceAccount
name: gpushare-device-plugin
namespace: kube-system

View File

@@ -0,0 +1,198 @@
package nvidia
import (
"fmt"
"time"
log "github.com/golang/glog"
"golang.org/x/net/context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
)
var (
clientTimeout = 30 * time.Second
lastAllocateTime time.Time
)
// create docker client
func init() {
kubeInit()
}
func buildErrResponse(reqs *pluginapi.AllocateRequest, podReqGPU uint) *pluginapi.AllocateResponse {
responses := pluginapi.AllocateResponse{}
for _, req := range reqs.ContainerRequests {
response := pluginapi.ContainerAllocateResponse{
Envs: map[string]string{
envNVGPU: fmt.Sprintf("no-gpu-has-%d%s-to-run", podReqGPU, metric),
EnvResourceIndex: fmt.Sprintf("-1"),
EnvResourceByPod: fmt.Sprintf("%d", podReqGPU),
EnvResourceByContainer: fmt.Sprintf("%d", uint(len(req.DevicesIDs))),
EnvResourceByDev: fmt.Sprintf("%d", getGPUMemory()),
},
}
responses.ContainerResponses = append(responses.ContainerResponses, &response)
}
return &responses
}
// Allocate which return list of devices.
func (m *NvidiaDevicePlugin) Allocate(ctx context.Context,
reqs *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) {
responses := pluginapi.AllocateResponse{}
log.Infoln("----Allocating GPU for gpu mem is started----")
var (
podReqGPU uint
found bool
assumePod *v1.Pod
)
// podReqGPU = uint(0)
for _, req := range reqs.ContainerRequests {
podReqGPU += uint(len(req.DevicesIDs))
}
log.Infof("RequestPodGPUs: %d", podReqGPU)
m.Lock()
defer m.Unlock()
log.Infoln("checking...")
pods, err := getCandidatePods(m.queryKubelet, m.kubeletClient)
if err != nil {
log.Infof("invalid allocation requst: Failed to find candidate pods due to %v", err)
return buildErrResponse(reqs, podReqGPU), nil
}
if log.V(4) {
for _, pod := range pods {
log.Infof("Pod %s in ns %s request GPU Memory %d with timestamp %v",
pod.Name,
pod.Namespace,
getGPUMemoryFromPodResource(pod),
getAssumeTimeFromPodAnnotation(pod))
}
}
for _, pod := range pods {
if getGPUMemoryFromPodResource(pod) == podReqGPU {
log.Infof("Found Assumed GPU shared Pod %s in ns %s with GPU Memory %d",
pod.Name,
pod.Namespace,
podReqGPU)
assumePod = pod
found = true
break
}
}
if found {
id := getGPUIDFromPodAnnotation(assumePod)
if id < 0 {
log.Warningf("Failed to get the dev ", assumePod)
}
candidateDevID := ""
if id >= 0 {
ok := false
candidateDevID, ok = m.GetDeviceNameByIndex(uint(id))
if !ok {
log.Warningf("Failed to find the dev for pod %v because it's not able to find dev with index %d",
assumePod,
id)
id = -1
}
}
if id < 0 {
return buildErrResponse(reqs, podReqGPU), nil
}
log.Infof("gpu index %v,uuid: %v", id, candidateDevID)
// 1. Create container requests
for _, req := range reqs.ContainerRequests {
reqGPU := uint(len(req.DevicesIDs))
response := pluginapi.ContainerAllocateResponse{
Envs: map[string]string{
envNVGPU: fmt.Sprintf("%v", id),
EnvResourceIndex: fmt.Sprintf("%d", id),
EnvResourceByPod: fmt.Sprintf("%d", podReqGPU),
EnvResourceByContainer: fmt.Sprintf("%d", reqGPU),
EnvResourceByDev: fmt.Sprintf("%d", getGPUMemory()),
},
}
if m.disableCGPUIsolation {
response.Envs["CGPU_DISABLE"] = "true"
}
responses.ContainerResponses = append(responses.ContainerResponses, &response)
}
// 2. Update Pod spec
patchedAnnotationBytes, err := patchPodAnnotationSpecAssigned()
if err != nil {
return buildErrResponse(reqs, podReqGPU), nil
}
_, err = clientset.CoreV1().Pods(assumePod.Namespace).Patch(assumePod.Name, types.StrategicMergePatchType, patchedAnnotationBytes)
if err != nil {
// the object has been modified; please apply your changes to the latest version and try again
if err.Error() == OptimisticLockErrorMsg {
// retry
_, err = clientset.CoreV1().Pods(assumePod.Namespace).Patch(assumePod.Name, types.StrategicMergePatchType, patchedAnnotationBytes)
if err != nil {
log.Warningf("Failed due to %v", err)
return buildErrResponse(reqs, podReqGPU), nil
}
} else {
log.Warningf("Failed due to %v", err)
return buildErrResponse(reqs, podReqGPU), nil
}
}
} else if len(m.devNameMap) == 1 {
var devName string
var devIndex uint
for d, index := range m.devNameMap {
devName = d
devIndex = index
break
}
log.Infof("this node has only one gpu device,skip to search pod and directly specify the device %v(%v) for container", devIndex, devName)
for _, req := range reqs.ContainerRequests {
reqGPU := uint(len(req.DevicesIDs))
response := pluginapi.ContainerAllocateResponse{
Envs: map[string]string{
envNVGPU: devName,
EnvResourceIndex: fmt.Sprintf("%d", devIndex),
EnvResourceByPod: fmt.Sprintf("%d", podReqGPU),
EnvResourceByContainer: fmt.Sprintf("%d", reqGPU),
EnvResourceByDev: fmt.Sprintf("%d", getGPUMemory()),
},
}
if m.disableCGPUIsolation {
response.Envs["CGPU_DISABLE"] = "true"
}
responses.ContainerResponses = append(responses.ContainerResponses, &response)
}
log.Infof("get allocated GPUs info %v", responses)
return &responses, nil
} else {
log.Warningf("invalid allocation requst: request GPU memory %d can't be satisfied.",
podReqGPU)
// return &responses, fmt.Errorf("invalid allocation requst: request GPU memory %d can't be satisfied", reqGPU)
return buildErrResponse(reqs, podReqGPU), nil
}
podName := ""
if assumePod != nil {
podName = assumePod.Name
}
log.Infof("pod %v, new allocated GPUs info %v", podName, &responses)
log.Infof("----Allocating GPU for gpu mem for %v is ended----", podName)
// // Add this to make sure the container is created at least
// currentTime := time.Now()
// currentTime.Sub(lastAllocateTime)
return &responses, nil
}

View File

@@ -0,0 +1,36 @@
package nvidia
import (
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
)
// MemoryUnit describes GPU Memory, now only supports Gi, Mi
type MemoryUnit string
const (
resourceName = "rainbond.com/gpu-mem"
resourceCount = "rainbond.com/gpu-count"
serverSock = pluginapi.DevicePluginPath + "aliyungpushare.sock"
OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
allHealthChecks = "xids"
containerTypeLabelKey = "io.kubernetes.docker.type"
containerTypeLabelSandbox = "podsandbox"
containerTypeLabelContainer = "container"
containerLogPathLabelKey = "io.kubernetes.container.logpath"
sandboxIDLabelKey = "io.kubernetes.sandbox.id"
envNVGPU = "NVIDIA_VISIBLE_DEVICES"
EnvResourceIndex = "ALIYUN_COM_GPU_MEM_IDX"
EnvResourceByPod = "ALIYUN_COM_GPU_MEM_POD"
EnvResourceByContainer = "ALIYUN_COM_GPU_MEM_CONTAINER"
EnvResourceByDev = "ALIYUN_COM_GPU_MEM_DEV"
EnvAssignedFlag = "ALIYUN_COM_GPU_MEM_ASSIGNED"
EnvResourceAssumeTime = "ALIYUN_COM_GPU_MEM_ASSUME_TIME"
EnvResourceAssignTime = "ALIYUN_COM_GPU_MEM_ASSIGN_TIME"
EnvNodeLabelForDisableCGPU = "cgpu.disable.isolation"
GiBPrefix = MemoryUnit("GiB")
MiBPrefix = MemoryUnit("MiB")
)

View File

@@ -0,0 +1,30 @@
package nvidia
import (
"io/ioutil"
"runtime"
log "github.com/golang/glog"
)
func StackTrace(all bool) string {
buf := make([]byte, 10240)
for {
size := runtime.Stack(buf, all)
if size == len(buf) {
buf = make([]byte, len(buf)<<1)
continue
}
break
}
return string(buf)
}
func coredump(fileName string) {
log.Infoln("Dump stacktrace to ", fileName)
ioutil.WriteFile(fileName, []byte(StackTrace(true)), 0644)
}

View File

@@ -0,0 +1,111 @@
package nvidia
import (
"fmt"
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/kubelet/client"
"syscall"
"os"
"time"
"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml"
"github.com/fsnotify/fsnotify"
log "github.com/golang/glog"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
)
type sharedGPUManager struct {
enableMPS bool
healthCheck bool
queryKubelet bool
kubeletClient *client.KubeletClient
}
func NewSharedGPUManager(enableMPS, healthCheck, queryKubelet bool, bp MemoryUnit, client *client.KubeletClient) *sharedGPUManager {
metric = bp
return &sharedGPUManager{
enableMPS: enableMPS,
healthCheck: healthCheck,
queryKubelet: queryKubelet,
kubeletClient: client,
}
}
func (ngm *sharedGPUManager) Run() error {
log.V(1).Infoln("Loading NVML")
if err := nvml.Init(); err != nil {
log.V(1).Infof("Failed to initialize NVML: %s.", err)
log.V(1).Infof("If this is a GPU node, did you set the docker default runtime to `nvidia`?")
select {}
}
defer func() { log.V(1).Infoln("Shutdown of NVML returned:", nvml.Shutdown()) }()
log.V(1).Infoln("Fetching devices.")
if getDeviceCount() == uint(0) {
log.V(1).Infoln("No devices found. Waiting indefinitely.")
select {}
}
log.V(1).Infoln("Starting FS watcher.")
watcher, err := newFSWatcher(pluginapi.DevicePluginPath)
if err != nil {
log.V(1).Infoln("Failed to created FS watcher.")
return err
}
defer watcher.Close()
log.V(1).Infoln("Starting OS watcher.")
sigs := newOSWatcher(syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
restart := true
var devicePlugin *NvidiaDevicePlugin
L:
for {
if restart {
if devicePlugin != nil {
devicePlugin.Stop()
}
devicePlugin, err = NewNvidiaDevicePlugin(ngm.enableMPS, ngm.healthCheck, ngm.queryKubelet, ngm.kubeletClient)
if err != nil {
log.Warningf("Failed to get device plugin due to %v", err)
os.Exit(1)
} else if err = devicePlugin.Serve(); err != nil {
log.Warningf("Failed to start device plugin due to %v", err)
os.Exit(2)
} else {
restart = false
}
}
select {
case event := <-watcher.Events:
if event.Name == pluginapi.KubeletSocket && event.Op&fsnotify.Create == fsnotify.Create {
log.V(1).Infof("inotify: %s created, restarting.", pluginapi.KubeletSocket)
restart = true
}
case err := <-watcher.Errors:
log.Warningf("inotify: %s", err)
case s := <-sigs:
switch s {
case syscall.SIGHUP:
log.V(1).Infoln("Received SIGHUP, restarting.")
restart = true
case syscall.SIGQUIT:
t := time.Now()
timestamp := fmt.Sprint(t.Format("20060102150405"))
log.Infoln("generate core dump")
coredump("/etc/kubernetes/go_" + timestamp + ".txt")
default:
log.V(1).Infof("Received signal \"%v\", shutting down.", s)
devicePlugin.Stop()
break L
}
}
}
return nil
}

View File

@@ -0,0 +1,152 @@
package nvidia
import (
"fmt"
"strings"
log "github.com/golang/glog"
"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml"
"golang.org/x/net/context"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
)
var (
gpuMemory uint
metric MemoryUnit
)
func check(err error) {
if err != nil {
log.Fatalln("Fatal:", err)
}
}
func generateFakeDeviceID(realID string, fakeCounter uint) string {
return fmt.Sprintf("%s-_-%d", realID, fakeCounter)
}
func extractRealDeviceID(fakeDeviceID string) string {
return strings.Split(fakeDeviceID, "-_-")[0]
}
func setGPUMemory(raw uint) {
v := raw
if metric == GiBPrefix {
v = raw / 1024
}
gpuMemory = v
log.Infof("set gpu memory: %d", gpuMemory)
}
func getGPUMemory() uint {
return gpuMemory
}
func getDeviceCount() uint {
n, err := nvml.GetDeviceCount()
check(err)
return n
}
func getDevices() ([]*pluginapi.Device, map[string]uint) {
n, err := nvml.GetDeviceCount()
check(err)
var devs []*pluginapi.Device
realDevNames := map[string]uint{}
for i := uint(0); i < n; i++ {
d, err := nvml.NewDevice(i)
check(err)
// realDevNames = append(realDevNames, d.UUID)
var id uint
log.Infof("Deivce %s's Path is %s", d.UUID, d.Path)
_, err = fmt.Sscanf(d.Path, "/dev/nvidia%d", &id)
check(err)
realDevNames[d.UUID] = id
// var KiB uint64 = 1024
log.Infof("# device Memory: %d", uint(*d.Memory))
if getGPUMemory() == uint(0) {
setGPUMemory(uint(*d.Memory))
}
for j := uint(0); j < getGPUMemory(); j++ {
fakeID := generateFakeDeviceID(d.UUID, j)
if j == 0 {
log.Infoln("# Add first device ID: " + fakeID)
}
if j == getGPUMemory()-1 {
log.Infoln("# Add last device ID: " + fakeID)
}
devs = append(devs, &pluginapi.Device{
ID: fakeID,
Health: pluginapi.Healthy,
})
}
}
return devs, realDevNames
}
func deviceExists(devs []*pluginapi.Device, id string) bool {
for _, d := range devs {
if d.ID == id {
return true
}
}
return false
}
func watchXIDs(ctx context.Context, devs []*pluginapi.Device, xids chan<- *pluginapi.Device) {
eventSet := nvml.NewEventSet()
defer nvml.DeleteEventSet(eventSet)
for _, d := range devs {
realDeviceID := extractRealDeviceID(d.ID)
err := nvml.RegisterEventForDevice(eventSet, nvml.XidCriticalError, realDeviceID)
if err != nil && strings.HasSuffix(err.Error(), "Not Supported") {
log.Infof("Warning: %s (%s) is too old to support healthchecking: %s. Marking it unhealthy.", realDeviceID, d.ID, err)
xids <- d
continue
}
if err != nil {
log.Fatalf("Fatal error:", err)
}
}
for {
select {
case <-ctx.Done():
return
default:
}
e, err := nvml.WaitForEvent(eventSet, 5000)
if err != nil && e.Etype != nvml.XidCriticalError {
continue
}
// FIXME: formalize the full list and document it.
// http://docs.nvidia.com/deploy/xid-errors/index.html#topic_4
// Application errors: the GPU should still be healthy
if e.Edata == 31 || e.Edata == 43 || e.Edata == 45 {
continue
}
if e.UUID == nil || len(*e.UUID) == 0 {
// All devices are unhealthy
for _, d := range devs {
xids <- d
}
continue
}
for _, d := range devs {
if extractRealDeviceID(d.ID) == *e.UUID {
xids <- d
}
}
}
}

View File

@@ -0,0 +1,262 @@
package nvidia
import (
"encoding/json"
"fmt"
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/kubelet/client"
log "github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"os"
"sort"
"time"
)
var (
clientset *kubernetes.Clientset
nodeName string
retries = 8
)
func kubeInit() {
kubeconfigFile := os.Getenv("KUBECONFIG")
var err error
var config *rest.Config
if _, err = os.Stat(kubeconfigFile); err != nil {
log.V(5).Infof("kubeconfig %s failed to find due to %v", kubeconfigFile, err)
config, err = rest.InClusterConfig()
if err != nil {
log.Fatalf("Failed due to %v", err)
}
} else {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile)
if err != nil {
log.Fatalf("Failed due to %v", err)
}
}
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
log.Fatalf("Failed due to %v", err)
}
nodeName = os.Getenv("NODE_NAME")
if nodeName == "" {
log.Fatalln("Please set env NODE_NAME")
}
}
func disableCGPUIsolationOrNot() (bool, error) {
disable := false
node, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return disable, err
}
labels := node.ObjectMeta.Labels
value, ok := labels[EnvNodeLabelForDisableCGPU]
if ok && value == "true" {
log.Infof("enable gpusharing mode and disable cgpu mode")
disable = true
}
return disable, nil
}
func patchGPUCount(gpuCount int) error {
node, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
if val, ok := node.Status.Capacity[resourceCount]; ok {
if val.Value() == int64(gpuCount) {
log.Infof("No need to update Capacity %s", resourceCount)
return nil
}
}
newNode := node.DeepCopy()
newNode.Status.Capacity[resourceCount] = *resource.NewQuantity(int64(gpuCount), resource.DecimalSI)
newNode.Status.Allocatable[resourceCount] = *resource.NewQuantity(int64(gpuCount), resource.DecimalSI)
// content := fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/aliyun.com~gpu-count", "value": "%d"}]`, gpuCount)
// _, err = clientset.CoreV1().Nodes().PatchStatus(nodeName, []byte(content))
_, _, err = nodeutil.PatchNodeStatus(clientset.CoreV1(), types.NodeName(nodeName), node, newNode)
if err != nil {
log.Infof("Failed to update Capacity %s.", resourceCount)
} else {
log.Infof("Updated Capacity %s successfully.", resourceCount)
}
return err
}
func getPodList(kubeletClient *client.KubeletClient) (*v1.PodList, error) {
podList, err := kubeletClient.GetNodeRunningPods()
if err != nil {
return nil, err
}
list, _ := json.Marshal(podList)
log.V(8).Infof("get pods list %v", string(list))
resultPodList := &v1.PodList{}
for _, metaPod := range podList.Items {
if metaPod.Status.Phase != v1.PodPending {
continue
}
resultPodList.Items = append(resultPodList.Items, metaPod)
}
if len(resultPodList.Items) == 0 {
return nil, fmt.Errorf("not found pending pod")
}
return resultPodList, nil
}
func getPodListsByQueryKubelet(kubeletClient *client.KubeletClient) (*v1.PodList, error) {
podList, err := getPodList(kubeletClient)
for i := 0; i < retries && err != nil; i++ {
podList, err = getPodList(kubeletClient)
log.Warningf("failed to get pending pod list, retry")
time.Sleep(100 * time.Millisecond)
}
if err != nil {
log.Warningf("not found from kubelet /pods api, start to list apiserver")
podList, err = getPodListsByListAPIServer()
if err != nil {
return nil, err
}
}
return podList, nil
}
func getPodListsByListAPIServer() (*v1.PodList, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName, "status.phase": "Pending"})
podList, err := clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
for i := 0; i < 3 && err != nil; i++ {
podList, err = clientset.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
time.Sleep(1 * time.Second)
}
if err != nil {
return nil, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
}
return podList, nil
}
func getPendingPodsInNode(queryKubelet bool, kubeletClient *client.KubeletClient) ([]v1.Pod, error) {
// pods, err := m.lister.List(labels.Everything())
// if err != nil {
// return nil, err
// }
pods := []v1.Pod{}
podIDMap := map[types.UID]bool{}
var podList *v1.PodList
var err error
if queryKubelet {
podList, err = getPodListsByQueryKubelet(kubeletClient)
if err != nil {
return nil, err
}
} else {
podList, err = getPodListsByListAPIServer()
if err != nil {
return nil, err
}
}
log.V(5).Infof("all pod list %v", podList.Items)
// if log.V(5) {
for _, pod := range podList.Items {
if pod.Spec.NodeName != nodeName {
log.Warningf("Pod name %s in ns %s is not assigned to node %s as expected, it's placed on node %s ",
pod.Name,
pod.Namespace,
nodeName,
pod.Spec.NodeName)
} else {
log.Infof("list pod %s in ns %s in node %s and status is %s",
pod.Name,
pod.Namespace,
nodeName,
pod.Status.Phase,
)
if _, ok := podIDMap[pod.UID]; !ok {
pods = append(pods, pod)
podIDMap[pod.UID] = true
}
}
}
// }
return pods, nil
}
// pick up the gpushare pod with assigned status is false, and
func getCandidatePods(queryKubelet bool, client *client.KubeletClient) ([]*v1.Pod, error) {
candidatePods := []*v1.Pod{}
allPods, err := getPendingPodsInNode(queryKubelet, client)
if err != nil {
return candidatePods, err
}
for _, pod := range allPods {
current := pod
if isGPUMemoryAssumedPod(&current) {
candidatePods = append(candidatePods, &current)
}
}
if log.V(4) {
for _, pod := range candidatePods {
log.Infof("candidate pod %s in ns %s with timestamp %d is found.",
pod.Name,
pod.Namespace,
getAssumeTimeFromPodAnnotation(pod))
}
}
return makePodOrderdByAge(candidatePods), nil
}
// make the pod ordered by GPU assumed time
func makePodOrderdByAge(pods []*v1.Pod) []*v1.Pod {
newPodList := make(orderedPodByAssumeTime, 0, len(pods))
for _, v := range pods {
newPodList = append(newPodList, v)
}
sort.Sort(newPodList)
return []*v1.Pod(newPodList)
}
type orderedPodByAssumeTime []*v1.Pod
func (this orderedPodByAssumeTime) Len() int {
return len(this)
}
func (this orderedPodByAssumeTime) Less(i, j int) bool {
return getAssumeTimeFromPodAnnotation(this[i]) <= getAssumeTimeFromPodAnnotation(this[j])
}
func (this orderedPodByAssumeTime) Swap(i, j int) {
this[i], this[j] = this[j], this[i]
}

View File

@@ -0,0 +1,182 @@
package nvidia
import (
"encoding/json"
"fmt"
"strconv"
"time"
log "github.com/golang/glog"
v1 "k8s.io/api/core/v1"
)
// update pod env with assigned status
func updatePodAnnotations(oldPod *v1.Pod) (newPod *v1.Pod) {
newPod = oldPod.DeepCopy()
if len(newPod.ObjectMeta.Annotations) == 0 {
newPod.ObjectMeta.Annotations = map[string]string{}
}
now := time.Now()
newPod.ObjectMeta.Annotations[EnvAssignedFlag] = "true"
newPod.ObjectMeta.Annotations[EnvResourceAssumeTime] = fmt.Sprintf("%d", now.UnixNano())
return newPod
}
func patchPodAnnotationSpecAssigned() ([]byte, error) {
now := time.Now()
patchAnnotations := map[string]interface{}{
"metadata": map[string]map[string]string{"annotations": {
EnvAssignedFlag: "true",
EnvResourceAssumeTime: fmt.Sprintf("%d", now.UnixNano()),
}}}
return json.Marshal(patchAnnotations)
}
func getGPUIDFromPodAnnotation(pod *v1.Pod) (id int) {
var err error
id = -1
if len(pod.ObjectMeta.Annotations) > 0 {
value, found := pod.ObjectMeta.Annotations[EnvResourceIndex]
if found {
id, err = strconv.Atoi(value)
if err != nil {
log.Warningf("Failed to parse dev id %s due to %v for pod %s in ns %s",
value,
err,
pod.Name,
pod.Namespace)
id = -1
}
} else {
log.Warningf("Failed to get dev id %s for pod %s in ns %s",
pod.Name,
pod.Namespace)
}
}
return id
}
// get assumed timestamp
func getAssumeTimeFromPodAnnotation(pod *v1.Pod) (assumeTime uint64) {
if assumeTimeStr, ok := pod.ObjectMeta.Annotations[EnvResourceAssumeTime]; ok {
u64, err := strconv.ParseUint(assumeTimeStr, 10, 64)
if err != nil {
log.Warningf("Failed to parse assume Timestamp %s due to %v", assumeTimeStr, err)
} else {
assumeTime = u64
}
}
return assumeTime
}
// determine if the pod is GPU share pod, and is already assumed but not assigned
func isGPUMemoryAssumedPod(pod *v1.Pod) (assumed bool) {
log.V(6).Infof("Determine if the pod %v is GPUSharedAssumed pod", pod)
var ok bool
// 1. Check if it's for GPU share
if getGPUMemoryFromPodResource(pod) <= 0 {
log.V(6).Infof("Pod %s in namespace %s has not GPU Memory Request, so it's not GPUSharedAssumed assumed pod.",
pod.Name,
pod.Namespace)
return assumed
}
// 2. Check if it already has assume time
if _, ok = pod.ObjectMeta.Annotations[EnvResourceAssumeTime]; !ok {
log.V(4).Infof("No assume timestamp for pod %s in namespace %s, so it's not GPUSharedAssumed assumed pod.",
pod.Name,
pod.Namespace)
return assumed
}
// 3. Check if it has been assigned already
if assigned, ok := pod.ObjectMeta.Annotations[EnvAssignedFlag]; ok {
if assigned == "false" {
log.V(4).Infof("Found GPUSharedAssumed assumed pod %s in namespace %s.",
pod.Name,
pod.Namespace)
assumed = true
} else {
log.Infof("GPU assigned Flag for pod %s exists in namespace %s and its assigned status is %s, so it's not GPUSharedAssumed assumed pod.",
pod.Name,
pod.Namespace,
assigned)
}
} else {
log.Warningf("No GPU assigned Flag for pod %s in namespace %s, so it's not GPUSharedAssumed assumed pod.",
pod.Name,
pod.Namespace)
}
return assumed
}
// Get GPU Memory of the Pod
func getGPUMemoryFromPodResource(pod *v1.Pod) uint {
var total uint
containers := pod.Spec.Containers
for _, container := range containers {
if val, ok := container.Resources.Limits[resourceName]; ok {
total += uint(val.Value())
}
}
return total
}
func podIsNotRunning(pod v1.Pod) bool {
status := pod.Status
//deletionTimestamp
if pod.DeletionTimestamp != nil {
return true
}
// pod is scheduled but not initialized
if status.Phase == v1.PodPending && podConditionTrueOnly(status.Conditions, v1.PodScheduled) {
log.Infof("Pod %s only has PodScheduled, is not running", pod.Name)
return true
}
return status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(status.ContainerStatuses)) || (status.Phase == v1.PodPending && podConditionTrueOnly(status.Conditions, v1.PodScheduled))
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
func podConditionTrue(conditions []v1.PodCondition, expect v1.PodConditionType) bool {
for _, condition := range conditions {
if condition.Type == expect && condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func podConditionTrueOnly(conditions []v1.PodCondition, expect v1.PodConditionType) bool {
if len(conditions) != 1 {
return false
}
for _, condition := range conditions {
if condition.Type == expect && condition.Status == v1.ConditionTrue {
return true
}
}
return false
}

View File

@@ -0,0 +1,241 @@
package nvidia
import (
"github.com/AliyunContainerService/gpushare-device-plugin/pkg/kubelet/client"
"net"
"os"
"path"
"sync"
"time"
log "github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
)
// NvidiaDevicePlugin implements the Kubernetes device plugin API
type NvidiaDevicePlugin struct {
devs []*pluginapi.Device
realDevNames []string
devNameMap map[string]uint
devIndxMap map[uint]string
socket string
mps bool
healthCheck bool
disableCGPUIsolation bool
stop chan struct{}
health chan *pluginapi.Device
queryKubelet bool
kubeletClient *client.KubeletClient
server *grpc.Server
sync.RWMutex
}
// NewNvidiaDevicePlugin returns an initialized NvidiaDevicePlugin
func NewNvidiaDevicePlugin(mps, healthCheck, queryKubelet bool, client *client.KubeletClient) (*NvidiaDevicePlugin, error) {
devs, devNameMap := getDevices()
devList := []string{}
for dev, _ := range devNameMap {
devList = append(devList, dev)
}
log.Infof("Device Map: %v", devNameMap)
log.Infof("Device List: %v", devList)
err := patchGPUCount(len(devList))
if err != nil {
return nil, err
}
disableCGPUIsolation, err := disableCGPUIsolationOrNot()
if err != nil {
return nil, err
}
return &NvidiaDevicePlugin{
devs: devs,
realDevNames: devList,
devNameMap: devNameMap,
socket: serverSock,
mps: mps,
healthCheck: healthCheck,
disableCGPUIsolation: disableCGPUIsolation,
stop: make(chan struct{}),
health: make(chan *pluginapi.Device),
queryKubelet: queryKubelet,
kubeletClient: client,
}, nil
}
func (m *NvidiaDevicePlugin) GetDeviceNameByIndex(index uint) (name string, found bool) {
if len(m.devIndxMap) == 0 {
m.devIndxMap = map[uint]string{}
for k, v := range m.devNameMap {
m.devIndxMap[v] = k
}
log.Infof("Get devIndexMap: %v", m.devIndxMap)
}
name, found = m.devIndxMap[index]
return name, found
}
func (m *NvidiaDevicePlugin) GetDevicePluginOptions(context.Context, *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) {
return &pluginapi.DevicePluginOptions{}, nil
}
// dial establishes the gRPC communication with the registered device plugin.
func dial(unixSocketPath string, timeout time.Duration) (*grpc.ClientConn, error) {
c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(),
grpc.WithTimeout(timeout),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}),
)
if err != nil {
return nil, err
}
return c, nil
}
// Start starts the gRPC server of the device plugin
func (m *NvidiaDevicePlugin) Start() error {
err := m.cleanup()
if err != nil {
return err
}
sock, err := net.Listen("unix", m.socket)
if err != nil {
return err
}
m.server = grpc.NewServer([]grpc.ServerOption{}...)
pluginapi.RegisterDevicePluginServer(m.server, m)
go m.server.Serve(sock)
// Wait for server to start by launching a blocking connexion
conn, err := dial(m.socket, 5*time.Second)
if err != nil {
return err
}
conn.Close()
go m.healthcheck()
lastAllocateTime = time.Now()
return nil
}
// Stop stops the gRPC server
func (m *NvidiaDevicePlugin) Stop() error {
if m.server == nil {
return nil
}
m.server.Stop()
m.server = nil
close(m.stop)
return m.cleanup()
}
// Register registers the device plugin for the given resourceName with Kubelet.
func (m *NvidiaDevicePlugin) Register(kubeletEndpoint, resourceName string) error {
conn, err := dial(kubeletEndpoint, 5*time.Second)
if err != nil {
return err
}
defer conn.Close()
client := pluginapi.NewRegistrationClient(conn)
reqt := &pluginapi.RegisterRequest{
Version: pluginapi.Version,
Endpoint: path.Base(m.socket),
ResourceName: resourceName,
}
_, err = client.Register(context.Background(), reqt)
if err != nil {
return err
}
return nil
}
// ListAndWatch lists devices and update that list according to the health status
func (m *NvidiaDevicePlugin) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error {
s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs})
for {
select {
case <-m.stop:
return nil
case d := <-m.health:
// FIXME: there is no way to recover from the Unhealthy state.
d.Health = pluginapi.Unhealthy
s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs})
}
}
}
func (m *NvidiaDevicePlugin) unhealthy(dev *pluginapi.Device) {
m.health <- dev
}
func (m *NvidiaDevicePlugin) PreStartContainer(context.Context, *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) {
return &pluginapi.PreStartContainerResponse{}, nil
}
func (m *NvidiaDevicePlugin) cleanup() error {
if err := os.Remove(m.socket); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (m *NvidiaDevicePlugin) healthcheck() {
ctx, cancel := context.WithCancel(context.Background())
var xids chan *pluginapi.Device
if m.healthCheck {
xids = make(chan *pluginapi.Device)
go watchXIDs(ctx, m.devs, xids)
}
for {
select {
case <-m.stop:
cancel()
return
case dev := <-xids:
m.unhealthy(dev)
}
}
}
// Serve starts the gRPC server and register the device plugin to Kubelet
func (m *NvidiaDevicePlugin) Serve() error {
err := m.Start()
if err != nil {
log.Infof("Could not start device plugin: %s", err)
return err
}
log.Infoln("Starting to serve on", m.socket)
err = m.Register(pluginapi.KubeletSocket, resourceName)
if err != nil {
log.Infof("Could not register device plugin: %s", err)
m.Stop()
return err
}
log.Infoln("Registered device plugin with Kubelet")
return nil
}

View File

@@ -0,0 +1,32 @@
package nvidia
import (
"os"
"os/signal"
"github.com/fsnotify/fsnotify"
)
func newFSWatcher(files ...string) (*fsnotify.Watcher, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
for _, f := range files {
err = watcher.Add(f)
if err != nil {
watcher.Close()
return nil, err
}
}
return watcher, nil
}
func newOSWatcher(sigs ...os.Signal) chan os.Signal {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, sigs...)
return sigChan
}

View File

@@ -0,0 +1,134 @@
package client
import (
"encoding/json"
"fmt"
"io"
v1 "k8s.io/api/core/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/transport"
"net/http"
"time"
)
// KubeletClientConfig defines config parameters for the kubelet client
type KubeletClientConfig struct {
// Address specifies the kubelet address
Address string
// Port specifies the default port - used if no information about Kubelet port can be found in Node.NodeStatus.DaemonEndpoints.
Port uint
// TLSClientConfig contains settings to enable transport layer security
restclient.TLSClientConfig
// Server requires Bearer authentication
BearerToken string
// HTTPTimeout is used by the client to timeout http requests to Kubelet.
HTTPTimeout time.Duration
}
type KubeletClient struct {
defaultPort uint
host string
client *http.Client
}
func NewKubeletClient(config *KubeletClientConfig) (*KubeletClient, error) {
trans, err := makeTransport(config, true)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: trans,
Timeout: config.HTTPTimeout,
}
return &KubeletClient{
host: config.Address,
defaultPort: config.Port,
client: client,
}, nil
}
// transportConfig converts a client config to an appropriate transport config.
func (c *KubeletClientConfig) transportConfig() *transport.Config {
cfg := &transport.Config{
TLS: transport.TLSConfig{
CAFile: c.CAFile,
CAData: c.CAData,
CertFile: c.CertFile,
CertData: c.CertData,
KeyFile: c.KeyFile,
KeyData: c.KeyData,
},
BearerToken: c.BearerToken,
}
if !cfg.HasCA() {
cfg.TLS.Insecure = true
}
return cfg
}
// makeTransport creates a RoundTripper for HTTP Transport.
func makeTransport(config *KubeletClientConfig, insecureSkipTLSVerify bool) (http.RoundTripper, error) {
// do the insecureSkipTLSVerify on the pre-transport *before* we go get a potentially cached connection.
// transportConfig always produces a new struct pointer.
preTLSConfig := config.transportConfig()
if insecureSkipTLSVerify && preTLSConfig != nil {
preTLSConfig.TLS.Insecure = true
preTLSConfig.TLS.CAData = nil
preTLSConfig.TLS.CAFile = ""
}
tlsConfig, err := transport.TLSConfigFor(preTLSConfig)
if err != nil {
return nil, err
}
rt := http.DefaultTransport
if tlsConfig != nil {
// If SSH Tunnel is turned on
rt = utilnet.SetOldTransportDefaults(&http.Transport{
TLSClientConfig: tlsConfig,
})
}
return transport.HTTPWrappersForConfig(config.transportConfig(), rt)
}
func ReadAll(r io.Reader) ([]byte, error) {
b := make([]byte, 0, 512)
for {
if len(b) == cap(b) {
// Add more capacity (let append pick how much).
b = append(b, 0)[:len(b)]
}
n, err := r.Read(b[len(b):cap(b)])
b = b[:len(b)+n]
if err != nil {
if err == io.EOF {
err = nil
}
return b, err
}
}
}
func (k *KubeletClient) GetNodeRunningPods() (*v1.PodList, error) {
resp, err := k.client.Get(fmt.Sprintf("https://%v:%d/pods/", k.host, k.defaultPort))
if err != nil {
return nil, err
}
body, err := ReadAll(resp.Body)
if err != nil {
return nil, err
}
podLists := &v1.PodList{}
if err = json.Unmarshal(body, &podLists); err != nil {
return nil, err
}
return podLists, err
}

View File

@@ -0,0 +1,57 @@
package client
import (
"flag"
"fmt"
"io/ioutil"
"k8s.io/client-go/rest"
"testing"
"time"
)
var (
clientCert string
clientKey string
token string
timeout int
)
func TestNewKubeletClient(t *testing.T) {
flag.StringVar(&clientCert, "client-cert", "", "")
flag.StringVar(&clientKey, "client-key", "", "")
flag.StringVar(&token, "token", "", "")
flag.IntVar(&timeout, "timeout", 10, "")
flag.Parse()
if clientCert == "" && clientKey == "" && token == "" {
tokenByte, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
panic(fmt.Errorf("in cluster mode, find token failed, error: %v", err))
}
token = string(tokenByte)
}
c, err := NewKubeletClient(&KubeletClientConfig{
Address: "127.0.0.1",
Port: 10250,
TLSClientConfig: rest.TLSClientConfig{
Insecure: true,
ServerName: "kubelet",
CertFile: clientCert,
KeyFile: clientKey,
},
BearerToken: token,
HTTPTimeout: time.Duration(timeout) * time.Second,
})
if err != nil {
fmt.Println(err)
return
}
podsList, err := c.GetNodeRunningPods()
if err != nil {
fmt.Println(err)
return
}
fmt.Println(podsList)
}

View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2018, NVIDIA Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,634 @@
// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
package nvml
// #cgo LDFLAGS: -ldl -Wl,--unresolved-symbols=ignore-in-object-files
// #include "nvml_dl.h"
import "C"
import (
"errors"
"fmt"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
)
const (
szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE
szName = C.NVML_DEVICE_NAME_BUFFER_SIZE
szUUID = C.NVML_DEVICE_UUID_BUFFER_SIZE
szProcs = 32
szProcName = 64
XidCriticalError = C.nvmlEventTypeXidCriticalError
)
type handle struct{ dev C.nvmlDevice_t }
type EventSet struct{ set C.nvmlEventSet_t }
type Event struct {
UUID *string
Etype uint64
Edata uint64
}
func uintPtr(c C.uint) *uint {
i := uint(c)
return &i
}
func uint64Ptr(c C.ulonglong) *uint64 {
i := uint64(c)
return &i
}
func stringPtr(c *C.char) *string {
s := C.GoString(c)
return &s
}
func errorString(ret C.nvmlReturn_t) error {
if ret == C.NVML_SUCCESS {
return nil
}
err := C.GoString(C.nvmlErrorString(ret))
return fmt.Errorf("nvml: %v", err)
}
func init_() error {
r := C.nvmlInit_dl()
if r == C.NVML_ERROR_LIBRARY_NOT_FOUND {
return errors.New("could not load NVML library")
}
return errorString(r)
}
func NewEventSet() EventSet {
var set C.nvmlEventSet_t
C.nvmlEventSetCreate(&set)
return EventSet{set}
}
func RegisterEvent(es EventSet, event int) error {
n, err := deviceGetCount()
if err != nil {
return err
}
var i uint
for i = 0; i < n; i++ {
h, err := deviceGetHandleByIndex(i)
if err != nil {
return err
}
r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set)
if r != C.NVML_SUCCESS {
return errorString(r)
}
}
return nil
}
func RegisterEventForDevice(es EventSet, event int, uuid string) error {
n, err := deviceGetCount()
if err != nil {
return err
}
var i uint
for i = 0; i < n; i++ {
h, err := deviceGetHandleByIndex(i)
if err != nil {
return err
}
duuid, err := h.deviceGetUUID()
if err != nil {
return err
}
if *duuid != uuid {
continue
}
r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set)
if r != C.NVML_SUCCESS {
return errorString(r)
}
return nil
}
return fmt.Errorf("nvml: device not found")
}
func DeleteEventSet(es EventSet) {
C.nvmlEventSetFree(es.set)
}
func WaitForEvent(es EventSet, timeout uint) (Event, error) {
var data C.nvmlEventData_t
r := C.nvmlEventSetWait(es.set, &data, C.uint(timeout))
uuid, _ := handle{data.device}.deviceGetUUID()
return Event{
UUID: uuid,
Etype: uint64(data.eventType),
Edata: uint64(data.eventData),
},
errorString(r)
}
func shutdown() error {
return errorString(C.nvmlShutdown_dl())
}
func systemGetDriverVersion() (string, error) {
var driver [szDriver]C.char
r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver)
return C.GoString(&driver[0]), errorString(r)
}
func systemGetProcessName(pid uint) (string, error) {
var proc [szProcName]C.char
r := C.nvmlSystemGetProcessName(C.uint(pid), &proc[0], szProcName)
return C.GoString(&proc[0]), errorString(r)
}
func deviceGetCount() (uint, error) {
var n C.uint
r := C.nvmlDeviceGetCount(&n)
return uint(n), errorString(r)
}
func deviceGetHandleByIndex(idx uint) (handle, error) {
var dev C.nvmlDevice_t
r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev)
return handle{dev}, errorString(r)
}
func deviceGetTopologyCommonAncestor(h1, h2 handle) (*uint, error) {
var level C.nvmlGpuTopologyLevel_t
r := C.nvmlDeviceGetTopologyCommonAncestor_dl(h1.dev, h2.dev, &level)
if r == C.NVML_ERROR_FUNCTION_NOT_FOUND || r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(C.uint(level)), errorString(r)
}
func (h handle) deviceGetName() (*string, error) {
var name [szName]C.char
r := C.nvmlDeviceGetName(h.dev, &name[0], szName)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&name[0]), errorString(r)
}
func (h handle) deviceGetUUID() (*string, error) {
var uuid [szUUID]C.char
r := C.nvmlDeviceGetUUID(h.dev, &uuid[0], szUUID)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&uuid[0]), errorString(r)
}
func (h handle) deviceGetPciInfo() (*string, error) {
var pci C.nvmlPciInfo_t
r := C.nvmlDeviceGetPciInfo(h.dev, &pci)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&pci.busId[0]), errorString(r)
}
func (h handle) deviceGetMinorNumber() (*uint, error) {
var minor C.uint
r := C.nvmlDeviceGetMinorNumber(h.dev, &minor)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(minor), errorString(r)
}
func (h handle) deviceGetBAR1MemoryInfo() (*uint64, *uint64, error) {
var bar1 C.nvmlBAR1Memory_t
r := C.nvmlDeviceGetBAR1MemoryInfo(h.dev, &bar1)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
return uint64Ptr(bar1.bar1Total), uint64Ptr(bar1.bar1Used), errorString(r)
}
func (h handle) deviceGetPowerManagementLimit() (*uint, error) {
var power C.uint
r := C.nvmlDeviceGetPowerManagementLimit(h.dev, &power)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(power), errorString(r)
}
func (h handle) deviceGetMaxClockInfo() (*uint, *uint, error) {
var sm, mem C.uint
r := C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_SM, &sm)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem)
}
return uintPtr(sm), uintPtr(mem), errorString(r)
}
func (h handle) deviceGetMaxPcieLinkGeneration() (*uint, error) {
var link C.uint
r := C.nvmlDeviceGetMaxPcieLinkGeneration(h.dev, &link)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(link), errorString(r)
}
func (h handle) deviceGetMaxPcieLinkWidth() (*uint, error) {
var width C.uint
r := C.nvmlDeviceGetMaxPcieLinkWidth(h.dev, &width)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(width), errorString(r)
}
func (h handle) deviceGetPowerUsage() (*uint, error) {
var power C.uint
r := C.nvmlDeviceGetPowerUsage(h.dev, &power)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(power), errorString(r)
}
func (h handle) deviceGetTemperature() (*uint, error) {
var temp C.uint
r := C.nvmlDeviceGetTemperature(h.dev, C.NVML_TEMPERATURE_GPU, &temp)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(temp), errorString(r)
}
func (h handle) deviceGetUtilizationRates() (*uint, *uint, error) {
var usage C.nvmlUtilization_t
r := C.nvmlDeviceGetUtilizationRates(h.dev, &usage)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
return uintPtr(usage.gpu), uintPtr(usage.memory), errorString(r)
}
func (h handle) deviceGetEncoderUtilization() (*uint, error) {
var usage, sampling C.uint
r := C.nvmlDeviceGetEncoderUtilization(h.dev, &usage, &sampling)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(usage), errorString(r)
}
func (h handle) deviceGetDecoderUtilization() (*uint, error) {
var usage, sampling C.uint
r := C.nvmlDeviceGetDecoderUtilization(h.dev, &usage, &sampling)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(usage), errorString(r)
}
func (h handle) deviceGetMemoryInfo() (totalMem *uint64, devMem DeviceMemory, err error) {
var mem C.nvmlMemory_t
r := C.nvmlDeviceGetMemoryInfo(h.dev, &mem)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
err = errorString(r)
if r != C.NVML_SUCCESS {
return
}
totalMem = uint64Ptr(mem.total)
if totalMem != nil {
*totalMem /= 1024 * 1024 // MiB
}
devMem = DeviceMemory{
Used: uint64Ptr(mem.used),
Free: uint64Ptr(mem.free),
}
if devMem.Used != nil {
*devMem.Used /= 1024 * 1024 // MiB
}
if devMem.Free != nil {
*devMem.Free /= 1024 * 1024 // MiB
}
return
}
func (h handle) deviceGetClockInfo() (*uint, *uint, error) {
var sm, mem C.uint
r := C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_SM, &sm)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem)
}
return uintPtr(sm), uintPtr(mem), errorString(r)
}
func (h handle) deviceGetMemoryErrorCounter() (*uint64, *uint64, *uint64, error) {
var l1, l2, mem C.ulonglong
r := C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L1_CACHE, &l1)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L2_CACHE, &l2)
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_DEVICE_MEMORY, &mem)
}
return uint64Ptr(l1), uint64Ptr(l2), uint64Ptr(mem), errorString(r)
}
func (h handle) deviceGetPcieThroughput() (*uint, *uint, error) {
var rx, tx C.uint
r := C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_RX_BYTES, &rx)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_TX_BYTES, &tx)
}
return uintPtr(rx), uintPtr(tx), errorString(r)
}
func (h handle) deviceGetComputeRunningProcesses() ([]uint, []uint64, error) {
var procs [szProcs]C.nvmlProcessInfo_t
var count = C.uint(szProcs)
r := C.nvmlDeviceGetComputeRunningProcesses(h.dev, &count, &procs[0])
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
n := int(count)
pids := make([]uint, n)
mems := make([]uint64, n)
for i := 0; i < n; i++ {
pids[i] = uint(procs[i].pid)
mems[i] = uint64(procs[i].usedGpuMemory)
}
return pids, mems, errorString(r)
}
func (h handle) deviceGetGraphicsRunningProcesses() ([]uint, []uint64, error) {
var procs [szProcs]C.nvmlProcessInfo_t
var count = C.uint(szProcs)
r := C.nvmlDeviceGetGraphicsRunningProcesses(h.dev, &count, &procs[0])
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
n := int(count)
pids := make([]uint, n)
mems := make([]uint64, n)
for i := 0; i < n; i++ {
pids[i] = uint(procs[i].pid)
mems[i] = uint64(procs[i].usedGpuMemory)
}
return pids, mems, errorString(r)
}
func (h handle) deviceGetAllRunningProcesses() ([]ProcessInfo, error) {
cPids, cpMems, err := h.deviceGetComputeRunningProcesses()
if err != nil {
return nil, err
}
gPids, gpMems, err := h.deviceGetGraphicsRunningProcesses()
if err != nil {
return nil, err
}
allPids := make(map[uint]ProcessInfo)
for i, pid := range cPids {
name, err := processName(pid)
if err != nil {
return nil, err
}
allPids[pid] = ProcessInfo{
PID: pid,
Name: name,
MemoryUsed: cpMems[i] / (1024 * 1024), // MiB
Type: Compute,
}
}
for i, pid := range gPids {
pInfo, exists := allPids[pid]
if exists {
pInfo.Type = ComputeAndGraphics
allPids[pid] = pInfo
} else {
name, err := processName(pid)
if err != nil {
return nil, err
}
allPids[pid] = ProcessInfo{
PID: pid,
Name: name,
MemoryUsed: gpMems[i] / (1024 * 1024), // MiB
Type: Graphics,
}
}
}
var processInfo []ProcessInfo
for _, v := range allPids {
processInfo = append(processInfo, v)
}
sort.Slice(processInfo, func(i, j int) bool {
return processInfo[i].PID < processInfo[j].PID
})
return processInfo, nil
}
func (h handle) getClocksThrottleReasons() (reason ThrottleReason, err error) {
var clocksThrottleReasons C.ulonglong
r := C.nvmlDeviceGetCurrentClocksThrottleReasons(h.dev, &clocksThrottleReasons)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return ThrottleReasonUnknown, nil
}
if r != C.NVML_SUCCESS {
return ThrottleReasonUnknown, errorString(r)
}
switch clocksThrottleReasons {
case C.nvmlClocksThrottleReasonGpuIdle:
reason = ThrottleReasonGpuIdle
case C.nvmlClocksThrottleReasonApplicationsClocksSetting:
reason = ThrottleReasonApplicationsClocksSetting
case C.nvmlClocksThrottleReasonSwPowerCap:
reason = ThrottleReasonSwPowerCap
case C.nvmlClocksThrottleReasonHwSlowdown:
reason = ThrottleReasonHwSlowdown
case C.nvmlClocksThrottleReasonSyncBoost:
reason = ThrottleReasonSyncBoost
case C.nvmlClocksThrottleReasonSwThermalSlowdown:
reason = ThrottleReasonSwThermalSlowdown
case C.nvmlClocksThrottleReasonHwThermalSlowdown:
reason = ThrottleReasonHwThermalSlowdown
case C.nvmlClocksThrottleReasonHwPowerBrakeSlowdown:
reason = ThrottleReasonHwPowerBrakeSlowdown
case C.nvmlClocksThrottleReasonDisplayClockSetting:
reason = ThrottleReasonDisplayClockSetting
case C.nvmlClocksThrottleReasonNone:
reason = ThrottleReasonNone
}
return
}
func (h handle) getPerformanceState() (PerfState, error) {
var pstate C.nvmlPstates_t
r := C.nvmlDeviceGetPerformanceState(h.dev, &pstate)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return PerfStateUnknown, nil
}
if r != C.NVML_SUCCESS {
return PerfStateUnknown, errorString(r)
}
return PerfState(pstate), nil
}
func processName(pid uint) (string, error) {
f := `/proc/` + strconv.FormatUint(uint64(pid), 10) + `/comm`
d, err := ioutil.ReadFile(f)
if err != nil {
// TOCTOU: process terminated
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
return strings.TrimSuffix(string(d), "\n"), err
}
func (h handle) getAccountingInfo() (accountingInfo Accounting, err error) {
var mode C.nvmlEnableState_t
var buffer C.uint
r := C.nvmlDeviceGetAccountingMode(h.dev, &mode)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
if r != C.NVML_SUCCESS {
return accountingInfo, errorString(r)
}
r = C.nvmlDeviceGetAccountingBufferSize(h.dev, &buffer)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
if r != C.NVML_SUCCESS {
return accountingInfo, errorString(r)
}
accountingInfo = Accounting{
Mode: ModeState(mode),
BufferSize: uintPtr(buffer),
}
return
}
func (h handle) getDisplayInfo() (display Display, err error) {
var mode, isActive C.nvmlEnableState_t
r := C.nvmlDeviceGetDisplayActive(h.dev, &mode)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
if r != C.NVML_SUCCESS {
return display, errorString(r)
}
r = C.nvmlDeviceGetDisplayMode(h.dev, &isActive)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
if r != C.NVML_SUCCESS {
return display, errorString(r)
}
display = Display{
Mode: ModeState(mode),
Active: ModeState(isActive),
}
return
}
func (h handle) getPeristenceMode() (state ModeState, err error) {
var mode C.nvmlEnableState_t
r := C.nvmlDeviceGetPersistenceMode(h.dev, &mode)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return
}
return ModeState(mode), errorString(r)
}

View File

@@ -0,0 +1,533 @@
// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
package nvml
// #include "nvml_dl.h"
import "C"
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strconv"
"strings"
)
var (
ErrCPUAffinity = errors.New("failed to retrieve CPU affinity")
ErrUnsupportedP2PLink = errors.New("unsupported P2P link type")
ErrUnsupportedGPU = errors.New("unsupported GPU device")
)
type ModeState uint
const (
Enabled ModeState = iota
Disabled
)
func (m ModeState) String() string {
switch m {
case Enabled:
return "Enabled"
case Disabled:
return "Disabled"
}
return "N/A"
}
type Display struct {
Mode ModeState
Active ModeState
}
type Accounting struct {
Mode ModeState
BufferSize *uint
}
type DeviceMode struct {
DisplayInfo Display
Persistence ModeState
AccountingInfo Accounting
}
type ThrottleReason uint
const (
ThrottleReasonGpuIdle ThrottleReason = iota
ThrottleReasonApplicationsClocksSetting
ThrottleReasonSwPowerCap
ThrottleReasonHwSlowdown
ThrottleReasonSyncBoost
ThrottleReasonSwThermalSlowdown
ThrottleReasonHwThermalSlowdown
ThrottleReasonHwPowerBrakeSlowdown
ThrottleReasonDisplayClockSetting
ThrottleReasonNone
ThrottleReasonUnknown
)
func (r ThrottleReason) String() string {
switch r {
case ThrottleReasonGpuIdle:
return "Gpu Idle"
case ThrottleReasonApplicationsClocksSetting:
return "Applications Clocks Setting"
case ThrottleReasonSwPowerCap:
return "SW Power Cap"
case ThrottleReasonHwSlowdown:
return "HW Slowdown"
case ThrottleReasonSyncBoost:
return "Sync Boost"
case ThrottleReasonSwThermalSlowdown:
return "SW Thermal Slowdown"
case ThrottleReasonHwThermalSlowdown:
return "HW Thermal Slowdown"
case ThrottleReasonHwPowerBrakeSlowdown:
return "HW Power Brake Slowdown"
case ThrottleReasonDisplayClockSetting:
return "Display Clock Setting"
case ThrottleReasonNone:
return "No clocks throttling"
}
return "N/A"
}
type PerfState uint
const (
PerfStateMax = 0
PerfStateMin = 15
PerfStateUnknown = 32
)
func (p PerfState) String() string {
if p >= PerfStateMax && p <= PerfStateMin {
return fmt.Sprintf("P%d", p)
}
return "Unknown"
}
type ProcessType uint
const (
Compute ProcessType = iota
Graphics
ComputeAndGraphics
)
func (t ProcessType) String() string {
typ := "C+G"
if t == Compute {
typ = "C"
} else if t == Graphics {
typ = "G"
}
return typ
}
type P2PLinkType uint
const (
P2PLinkUnknown P2PLinkType = iota
P2PLinkCrossCPU
P2PLinkSameCPU
P2PLinkHostBridge
P2PLinkMultiSwitch
P2PLinkSingleSwitch
P2PLinkSameBoard
)
type P2PLink struct {
BusID string
Link P2PLinkType
}
func (t P2PLinkType) String() string {
switch t {
case P2PLinkCrossCPU:
return "Cross CPU socket"
case P2PLinkSameCPU:
return "Same CPU socket"
case P2PLinkHostBridge:
return "Host PCI bridge"
case P2PLinkMultiSwitch:
return "Multiple PCI switches"
case P2PLinkSingleSwitch:
return "Single PCI switch"
case P2PLinkSameBoard:
return "Same board"
case P2PLinkUnknown:
}
return "N/A"
}
type ClockInfo struct {
Cores *uint
Memory *uint
}
type PCIInfo struct {
BusID string
BAR1 *uint64
Bandwidth *uint
}
type Device struct {
handle
UUID string
Path string
Model *string
Power *uint
Memory *uint64
CPUAffinity *uint
PCI PCIInfo
Clocks ClockInfo
Topology []P2PLink
}
type UtilizationInfo struct {
GPU *uint
Memory *uint
Encoder *uint
Decoder *uint
}
type PCIThroughputInfo struct {
RX *uint
TX *uint
}
type PCIStatusInfo struct {
BAR1Used *uint64
Throughput PCIThroughputInfo
}
type ECCErrorsInfo struct {
L1Cache *uint64
L2Cache *uint64
Device *uint64
}
type DeviceMemory struct {
Used *uint64
Free *uint64
}
type MemoryInfo struct {
Global DeviceMemory
ECCErrors ECCErrorsInfo
}
type ProcessInfo struct {
PID uint
Name string
MemoryUsed uint64
Type ProcessType
}
type DeviceStatus struct {
Power *uint
Temperature *uint
Utilization UtilizationInfo
Memory MemoryInfo
Clocks ClockInfo
PCI PCIStatusInfo
Processes []ProcessInfo
Throttle ThrottleReason
Performance PerfState
}
func assert(err error) {
if err != nil {
panic(err)
}
}
func Init() error {
return init_()
}
func Shutdown() error {
return shutdown()
}
func GetDeviceCount() (uint, error) {
return deviceGetCount()
}
func GetDriverVersion() (string, error) {
return systemGetDriverVersion()
}
func numaNode(busid string) (uint, error) {
// discard leading zeros of busid
b, err := ioutil.ReadFile(fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", strings.ToLower(busid[4:])))
if err != nil {
// XXX report node 0 if NUMA support isn't enabled
return 0, nil
}
node, err := strconv.ParseInt(string(bytes.TrimSpace(b)), 10, 8)
if err != nil {
return 0, fmt.Errorf("%v: %v", ErrCPUAffinity, err)
}
if node < 0 {
node = 0 // XXX report node 0 instead of NUMA_NO_NODE
}
return uint(node), nil
}
func pciBandwidth(gen, width *uint) *uint {
m := map[uint]uint{
1: 250, // MB/s
2: 500,
3: 985,
4: 1969,
}
if gen == nil || width == nil {
return nil
}
bw := m[*gen] * *width
return &bw
}
func NewDevice(idx uint) (device *Device, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
h, err := deviceGetHandleByIndex(idx)
assert(err)
model, err := h.deviceGetName()
assert(err)
uuid, err := h.deviceGetUUID()
assert(err)
minor, err := h.deviceGetMinorNumber()
assert(err)
power, err := h.deviceGetPowerManagementLimit()
assert(err)
totalMem, _, err := h.deviceGetMemoryInfo()
assert(err)
busid, err := h.deviceGetPciInfo()
assert(err)
bar1, _, err := h.deviceGetBAR1MemoryInfo()
assert(err)
pcig, err := h.deviceGetMaxPcieLinkGeneration()
assert(err)
pciw, err := h.deviceGetMaxPcieLinkWidth()
assert(err)
ccore, cmem, err := h.deviceGetMaxClockInfo()
assert(err)
if minor == nil || busid == nil || uuid == nil {
return nil, ErrUnsupportedGPU
}
path := fmt.Sprintf("/dev/nvidia%d", *minor)
node, err := numaNode(*busid)
assert(err)
device = &Device{
handle: h,
UUID: *uuid,
Path: path,
Model: model,
Power: power,
Memory: totalMem,
CPUAffinity: &node,
PCI: PCIInfo{
BusID: *busid,
BAR1: bar1,
Bandwidth: pciBandwidth(pcig, pciw), // MB/s
},
Clocks: ClockInfo{
Cores: ccore, // MHz
Memory: cmem, // MHz
},
}
if power != nil {
*device.Power /= 1000 // W
}
if bar1 != nil {
*device.PCI.BAR1 /= 1024 * 1024 // MiB
}
return
}
func NewDeviceLite(idx uint) (device *Device, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
h, err := deviceGetHandleByIndex(idx)
assert(err)
uuid, err := h.deviceGetUUID()
assert(err)
minor, err := h.deviceGetMinorNumber()
assert(err)
busid, err := h.deviceGetPciInfo()
assert(err)
if minor == nil || busid == nil || uuid == nil {
return nil, ErrUnsupportedGPU
}
path := fmt.Sprintf("/dev/nvidia%d", *minor)
device = &Device{
handle: h,
UUID: *uuid,
Path: path,
PCI: PCIInfo{
BusID: *busid,
},
}
return
}
func (d *Device) Status() (status *DeviceStatus, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
power, err := d.deviceGetPowerUsage()
assert(err)
temp, err := d.deviceGetTemperature()
assert(err)
ugpu, umem, err := d.deviceGetUtilizationRates()
assert(err)
uenc, err := d.deviceGetEncoderUtilization()
assert(err)
udec, err := d.deviceGetDecoderUtilization()
assert(err)
_, devMem, err := d.deviceGetMemoryInfo()
assert(err)
ccore, cmem, err := d.deviceGetClockInfo()
assert(err)
_, bar1, err := d.deviceGetBAR1MemoryInfo()
assert(err)
el1, el2, emem, err := d.deviceGetMemoryErrorCounter()
assert(err)
pcirx, pcitx, err := d.deviceGetPcieThroughput()
assert(err)
throttle, err := d.getClocksThrottleReasons()
assert(err)
perfState, err := d.getPerformanceState()
assert(err)
processInfo, err := d.deviceGetAllRunningProcesses()
assert(err)
status = &DeviceStatus{
Power: power,
Temperature: temp, // °C
Utilization: UtilizationInfo{
GPU: ugpu, // %
Memory: umem, // %
Encoder: uenc, // %
Decoder: udec, // %
},
Memory: MemoryInfo{
Global: devMem,
ECCErrors: ECCErrorsInfo{
L1Cache: el1,
L2Cache: el2,
Device: emem,
},
},
Clocks: ClockInfo{
Cores: ccore, // MHz
Memory: cmem, // MHz
},
PCI: PCIStatusInfo{
BAR1Used: bar1,
Throughput: PCIThroughputInfo{
RX: pcirx,
TX: pcitx,
},
},
Throttle: throttle,
Performance: perfState,
Processes: processInfo,
}
if power != nil {
*status.Power /= 1000 // W
}
if bar1 != nil {
*status.PCI.BAR1Used /= 1024 * 1024 // MiB
}
if pcirx != nil {
*status.PCI.Throughput.RX /= 1000 // MB/s
}
if pcitx != nil {
*status.PCI.Throughput.TX /= 1000 // MB/s
}
return
}
func GetP2PLink(dev1, dev2 *Device) (link P2PLinkType, err error) {
level, err := deviceGetTopologyCommonAncestor(dev1.handle, dev2.handle)
if err != nil || level == nil {
return P2PLinkUnknown, err
}
switch *level {
case C.NVML_TOPOLOGY_INTERNAL:
link = P2PLinkSameBoard
case C.NVML_TOPOLOGY_SINGLE:
link = P2PLinkSingleSwitch
case C.NVML_TOPOLOGY_MULTIPLE:
link = P2PLinkMultiSwitch
case C.NVML_TOPOLOGY_HOSTBRIDGE:
link = P2PLinkHostBridge
case C.NVML_TOPOLOGY_CPU:
link = P2PLinkSameCPU
case C.NVML_TOPOLOGY_SYSTEM:
link = P2PLinkCrossCPU
default:
err = ErrUnsupportedP2PLink
}
return
}
func (d *Device) GetComputeRunningProcesses() ([]uint, []uint64, error) {
return d.handle.deviceGetComputeRunningProcesses()
}
func (d *Device) GetGraphicsRunningProcesses() ([]uint, []uint64, error) {
return d.handle.deviceGetGraphicsRunningProcesses()
}
func (d *Device) GetAllRunningProcesses() ([]ProcessInfo, error) {
return d.handle.deviceGetAllRunningProcesses()
}
func (d *Device) GetDeviceMode() (mode *DeviceMode, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
display, err := d.getDisplayInfo()
assert(err)
p, err := d.getPeristenceMode()
assert(err)
accounting, err := d.getAccountingInfo()
assert(err)
mode = &DeviceMode{
DisplayInfo: display,
Persistence: p,
AccountingInfo: accounting,
}
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,46 @@
// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
#include <stddef.h>
#include <dlfcn.h>
#include "nvml_dl.h"
#define DLSYM(x, sym) \
do { \
dlerror(); \
x = dlsym(handle, #sym); \
if (dlerror() != NULL) { \
return (NVML_ERROR_FUNCTION_NOT_FOUND); \
} \
} while (0)
typedef nvmlReturn_t (*nvmlSym_t)();
static void *handle;
nvmlReturn_t NVML_DL(nvmlInit)(void)
{
handle = dlopen("libnvidia-ml.so.1", RTLD_LAZY | RTLD_GLOBAL);
if (handle == NULL) {
return (NVML_ERROR_LIBRARY_NOT_FOUND);
}
return (nvmlInit());
}
nvmlReturn_t NVML_DL(nvmlShutdown)(void)
{
nvmlReturn_t r = nvmlShutdown();
if (r != NVML_SUCCESS) {
return (r);
}
return (dlclose(handle) ? NVML_ERROR_UNKNOWN : NVML_SUCCESS);
}
nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)(
nvmlDevice_t dev1, nvmlDevice_t dev2, nvmlGpuTopologyLevel_t *info)
{
nvmlSym_t sym;
DLSYM(sym, nvmlDeviceGetTopologyCommonAncestor);
return ((*sym)(dev1, dev2, info));
}

View File

@@ -0,0 +1,15 @@
// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
#ifndef _NVML_DL_H_
#define _NVML_DL_H_
#include "nvml.h"
#define NVML_DL(x) x##_dl
extern nvmlReturn_t NVML_DL(nvmlInit)(void);
extern nvmlReturn_t NVML_DL(nvmlShutdown)(void);
extern nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)(
nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *);
#endif // _NVML_DL_H_

View File

@@ -0,0 +1,25 @@
Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,36 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: #PACKAGE#
Source: https://github.com/NVIDIA/nvidia-docker
Files: *
Copyright: #YEAR# #USERNAME# <#EMAIL#>
License: BSD-3-Clause
Files: debian/*
Copyright: #YEAR# #USERNAME# <#EMAIL#>
License: BSD-3-Clause
License: BSD-3-Clause
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of #USERNAME# nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1 @@
../common/nvidia-docker.service

View File

@@ -0,0 +1,25 @@
Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1 @@
../../common/nvidia-docker.service

View File

@@ -0,0 +1,311 @@
// Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
package nvml
// #cgo LDFLAGS: -ldl -Wl,--unresolved-symbols=ignore-in-object-files
// #include "nvml_dl.h"
import "C"
import (
"errors"
"fmt"
)
const (
szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE
szName = C.NVML_DEVICE_NAME_BUFFER_SIZE
szUUID = C.NVML_DEVICE_UUID_BUFFER_SIZE
szProcs = 32
szProcName = 64
)
type handle struct{ dev C.nvmlDevice_t }
func uintPtr(c C.uint) *uint {
i := uint(c)
return &i
}
func uint64Ptr(c C.ulonglong) *uint64 {
i := uint64(c)
return &i
}
func stringPtr(c *C.char) *string {
s := C.GoString(c)
return &s
}
func errorString(ret C.nvmlReturn_t) error {
if ret == C.NVML_SUCCESS {
return nil
}
err := C.GoString(C.nvmlErrorString(ret))
return fmt.Errorf("nvml: %v", err)
}
func init_() error {
r := C.nvmlInit_dl()
if r == C.NVML_ERROR_LIBRARY_NOT_FOUND {
return errors.New("could not load NVML library")
}
return errorString(r)
}
func shutdown() error {
return errorString(C.nvmlShutdown_dl())
}
func systemGetDriverVersion() (string, error) {
var driver [szDriver]C.char
r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver)
return C.GoString(&driver[0]), errorString(r)
}
func systemGetProcessName(pid uint) (string, error) {
var proc [szProcName]C.char
r := C.nvmlSystemGetProcessName(C.uint(pid), &proc[0], szProcName)
return C.GoString(&proc[0]), errorString(r)
}
func deviceGetCount() (uint, error) {
var n C.uint
r := C.nvmlDeviceGetCount(&n)
return uint(n), errorString(r)
}
func deviceGetHandleByIndex(idx uint) (handle, error) {
var dev C.nvmlDevice_t
r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev)
return handle{dev}, errorString(r)
}
func deviceGetTopologyCommonAncestor(h1, h2 handle) (*uint, error) {
var level C.nvmlGpuTopologyLevel_t
r := C.nvmlDeviceGetTopologyCommonAncestor_dl(h1.dev, h2.dev, &level)
if r == C.NVML_ERROR_FUNCTION_NOT_FOUND || r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(C.uint(level)), errorString(r)
}
func (h handle) deviceGetName() (*string, error) {
var name [szName]C.char
r := C.nvmlDeviceGetName(h.dev, &name[0], szName)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&name[0]), errorString(r)
}
func (h handle) deviceGetUUID() (*string, error) {
var uuid [szUUID]C.char
r := C.nvmlDeviceGetUUID(h.dev, &uuid[0], szUUID)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&uuid[0]), errorString(r)
}
func (h handle) deviceGetPciInfo() (*string, error) {
var pci C.nvmlPciInfo_t
r := C.nvmlDeviceGetPciInfo(h.dev, &pci)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return stringPtr(&pci.busId[0]), errorString(r)
}
func (h handle) deviceGetMinorNumber() (*uint, error) {
var minor C.uint
r := C.nvmlDeviceGetMinorNumber(h.dev, &minor)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(minor), errorString(r)
}
func (h handle) deviceGetBAR1MemoryInfo() (*uint64, *uint64, error) {
var bar1 C.nvmlBAR1Memory_t
r := C.nvmlDeviceGetBAR1MemoryInfo(h.dev, &bar1)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
return uint64Ptr(bar1.bar1Total), uint64Ptr(bar1.bar1Used), errorString(r)
}
func (h handle) deviceGetPowerManagementLimit() (*uint, error) {
var power C.uint
r := C.nvmlDeviceGetPowerManagementLimit(h.dev, &power)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(power), errorString(r)
}
func (h handle) deviceGetMaxClockInfo() (*uint, *uint, error) {
var sm, mem C.uint
r := C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_SM, &sm)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem)
}
return uintPtr(sm), uintPtr(mem), errorString(r)
}
func (h handle) deviceGetMaxPcieLinkGeneration() (*uint, error) {
var link C.uint
r := C.nvmlDeviceGetMaxPcieLinkGeneration(h.dev, &link)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(link), errorString(r)
}
func (h handle) deviceGetMaxPcieLinkWidth() (*uint, error) {
var width C.uint
r := C.nvmlDeviceGetMaxPcieLinkWidth(h.dev, &width)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(width), errorString(r)
}
func (h handle) deviceGetPowerUsage() (*uint, error) {
var power C.uint
r := C.nvmlDeviceGetPowerUsage(h.dev, &power)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(power), errorString(r)
}
func (h handle) deviceGetTemperature() (*uint, error) {
var temp C.uint
r := C.nvmlDeviceGetTemperature(h.dev, C.NVML_TEMPERATURE_GPU, &temp)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(temp), errorString(r)
}
func (h handle) deviceGetUtilizationRates() (*uint, *uint, error) {
var usage C.nvmlUtilization_t
r := C.nvmlDeviceGetUtilizationRates(h.dev, &usage)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
return uintPtr(usage.gpu), uintPtr(usage.memory), errorString(r)
}
func (h handle) deviceGetEncoderUtilization() (*uint, error) {
var usage, sampling C.uint
r := C.nvmlDeviceGetEncoderUtilization(h.dev, &usage, &sampling)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(usage), errorString(r)
}
func (h handle) deviceGetDecoderUtilization() (*uint, error) {
var usage, sampling C.uint
r := C.nvmlDeviceGetDecoderUtilization(h.dev, &usage, &sampling)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uintPtr(usage), errorString(r)
}
func (h handle) deviceGetMemoryInfo() (*uint64, error) {
var mem C.nvmlMemory_t
r := C.nvmlDeviceGetMemoryInfo(h.dev, &mem)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil
}
return uint64Ptr(mem.used), errorString(r)
}
func (h handle) deviceGetClockInfo() (*uint, *uint, error) {
var sm, mem C.uint
r := C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_SM, &sm)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem)
}
return uintPtr(sm), uintPtr(mem), errorString(r)
}
func (h handle) deviceGetMemoryErrorCounter() (*uint64, *uint64, *uint64, error) {
var l1, l2, mem C.ulonglong
r := C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L1_CACHE, &l1)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L2_CACHE, &l2)
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_DEVICE_MEMORY, &mem)
}
return uint64Ptr(l1), uint64Ptr(l2), uint64Ptr(mem), errorString(r)
}
func (h handle) deviceGetPcieThroughput() (*uint, *uint, error) {
var rx, tx C.uint
r := C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_RX_BYTES, &rx)
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
if r == C.NVML_SUCCESS {
r = C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_TX_BYTES, &tx)
}
return uintPtr(rx), uintPtr(tx), errorString(r)
}
func (h handle) deviceGetComputeRunningProcesses() ([]uint, []uint64, error) {
var procs [szProcs]C.nvmlProcessInfo_t
var count = C.uint(szProcs)
r := C.nvmlDeviceGetComputeRunningProcesses(h.dev, &count, &procs[0])
if r == C.NVML_ERROR_NOT_SUPPORTED {
return nil, nil, nil
}
n := int(count)
pids := make([]uint, n)
mems := make([]uint64, n)
for i := 0; i < n; i++ {
pids[i] = uint(procs[i].pid)
mems[i] = uint64(procs[i].usedGpuMemory)
}
return pids, mems, errorString(r)
}

View File

@@ -0,0 +1,381 @@
// Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
package nvml
// #include "nvml_dl.h"
import "C"
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strconv"
"strings"
)
var (
ErrCPUAffinity = errors.New("failed to retrieve CPU affinity")
ErrUnsupportedP2PLink = errors.New("unsupported P2P link type")
ErrUnsupportedGPU = errors.New("unsupported GPU device")
)
type P2PLinkType uint
const (
P2PLinkUnknown P2PLinkType = iota
P2PLinkCrossCPU
P2PLinkSameCPU
P2PLinkHostBridge
P2PLinkMultiSwitch
P2PLinkSingleSwitch
P2PLinkSameBoard
)
type P2PLink struct {
BusID string
Link P2PLinkType
}
func (t P2PLinkType) String() string {
switch t {
case P2PLinkCrossCPU:
return "Cross CPU socket"
case P2PLinkSameCPU:
return "Same CPU socket"
case P2PLinkHostBridge:
return "Host PCI bridge"
case P2PLinkMultiSwitch:
return "Multiple PCI switches"
case P2PLinkSingleSwitch:
return "Single PCI switch"
case P2PLinkSameBoard:
return "Same board"
case P2PLinkUnknown:
}
return "N/A"
}
type ClockInfo struct {
Cores *uint
Memory *uint
}
type PCIInfo struct {
BusID string
BAR1 *uint64
Bandwidth *uint
}
type Device struct {
handle
UUID string
Path string
Model *string
Power *uint
CPUAffinity *uint
PCI PCIInfo
Clocks ClockInfo
Topology []P2PLink
}
type UtilizationInfo struct {
GPU *uint
Memory *uint
Encoder *uint
Decoder *uint
}
type PCIThroughputInfo struct {
RX *uint
TX *uint
}
type PCIStatusInfo struct {
BAR1Used *uint64
Throughput PCIThroughputInfo
}
type ECCErrorsInfo struct {
L1Cache *uint64
L2Cache *uint64
Global *uint64
}
type MemoryInfo struct {
GlobalUsed *uint64
ECCErrors ECCErrorsInfo
}
type ProcessInfo struct {
PID uint
Name string
MemoryUsed uint64
}
type DeviceStatus struct {
Power *uint
Temperature *uint
Utilization UtilizationInfo
Memory MemoryInfo
Clocks ClockInfo
PCI PCIStatusInfo
Processes []ProcessInfo
}
func assert(err error) {
if err != nil {
panic(err)
}
}
func Init() error {
return init_()
}
func Shutdown() error {
return shutdown()
}
func GetDeviceCount() (uint, error) {
return deviceGetCount()
}
func GetDriverVersion() (string, error) {
return systemGetDriverVersion()
}
func numaNode(busid string) (uint, error) {
b, err := ioutil.ReadFile(fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", strings.ToLower(busid)))
if err != nil {
// XXX report node 0 if NUMA support isn't enabled
return 0, nil
}
node, err := strconv.ParseInt(string(bytes.TrimSpace(b)), 10, 8)
if err != nil {
return 0, fmt.Errorf("%v: %v", ErrCPUAffinity, err)
}
if node < 0 {
node = 0 // XXX report node 0 instead of NUMA_NO_NODE
}
return uint(node), nil
}
func pciBandwidth(gen, width *uint) *uint {
m := map[uint]uint{
1: 250, // MB/s
2: 500,
3: 985,
4: 1969,
}
if gen == nil || width == nil {
return nil
}
bw := m[*gen] * *width
return &bw
}
func NewDevice(idx uint) (device *Device, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
h, err := deviceGetHandleByIndex(idx)
assert(err)
model, err := h.deviceGetName()
assert(err)
uuid, err := h.deviceGetUUID()
assert(err)
minor, err := h.deviceGetMinorNumber()
assert(err)
power, err := h.deviceGetPowerManagementLimit()
assert(err)
busid, err := h.deviceGetPciInfo()
assert(err)
bar1, _, err := h.deviceGetBAR1MemoryInfo()
assert(err)
pcig, err := h.deviceGetMaxPcieLinkGeneration()
assert(err)
pciw, err := h.deviceGetMaxPcieLinkWidth()
assert(err)
ccore, cmem, err := h.deviceGetMaxClockInfo()
assert(err)
if minor == nil || busid == nil || uuid == nil {
return nil, ErrUnsupportedGPU
}
path := fmt.Sprintf("/dev/nvidia%d", *minor)
node, err := numaNode(*busid)
assert(err)
device = &Device{
handle: h,
UUID: *uuid,
Path: path,
Model: model,
Power: power,
CPUAffinity: &node,
PCI: PCIInfo{
BusID: *busid,
BAR1: bar1,
Bandwidth: pciBandwidth(pcig, pciw), // MB/s
},
Clocks: ClockInfo{
Cores: ccore, // MHz
Memory: cmem, // MHz
},
}
if power != nil {
*device.Power /= 1000 // W
}
if bar1 != nil {
*device.PCI.BAR1 /= 1024 * 1024 // MiB
}
return
}
func NewDeviceLite(idx uint) (device *Device, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
h, err := deviceGetHandleByIndex(idx)
assert(err)
uuid, err := h.deviceGetUUID()
assert(err)
minor, err := h.deviceGetMinorNumber()
assert(err)
busid, err := h.deviceGetPciInfo()
assert(err)
if minor == nil || busid == nil || uuid == nil {
return nil, ErrUnsupportedGPU
}
path := fmt.Sprintf("/dev/nvidia%d", *minor)
device = &Device{
handle: h,
UUID: *uuid,
Path: path,
PCI: PCIInfo{
BusID: *busid,
},
}
return
}
func (d *Device) Status() (status *DeviceStatus, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
power, err := d.deviceGetPowerUsage()
assert(err)
temp, err := d.deviceGetTemperature()
assert(err)
ugpu, umem, err := d.deviceGetUtilizationRates()
assert(err)
uenc, err := d.deviceGetEncoderUtilization()
assert(err)
udec, err := d.deviceGetDecoderUtilization()
assert(err)
mem, err := d.deviceGetMemoryInfo()
assert(err)
ccore, cmem, err := d.deviceGetClockInfo()
assert(err)
_, bar1, err := d.deviceGetBAR1MemoryInfo()
assert(err)
pids, pmems, err := d.deviceGetComputeRunningProcesses()
assert(err)
el1, el2, emem, err := d.deviceGetMemoryErrorCounter()
assert(err)
pcirx, pcitx, err := d.deviceGetPcieThroughput()
assert(err)
status = &DeviceStatus{
Power: power,
Temperature: temp, // °C
Utilization: UtilizationInfo{
GPU: ugpu, // %
Memory: umem, // %
Encoder: uenc, // %
Decoder: udec, // %
},
Memory: MemoryInfo{
GlobalUsed: mem,
ECCErrors: ECCErrorsInfo{
L1Cache: el1,
L2Cache: el2,
Global: emem,
},
},
Clocks: ClockInfo{
Cores: ccore, // MHz
Memory: cmem, // MHz
},
PCI: PCIStatusInfo{
BAR1Used: bar1,
Throughput: PCIThroughputInfo{
RX: pcirx,
TX: pcitx,
},
},
}
if power != nil {
*status.Power /= 1000 // W
}
if mem != nil {
*status.Memory.GlobalUsed /= 1024 * 1024 // MiB
}
if bar1 != nil {
*status.PCI.BAR1Used /= 1024 * 1024 // MiB
}
if pcirx != nil {
*status.PCI.Throughput.RX /= 1000 // MB/s
}
if pcitx != nil {
*status.PCI.Throughput.TX /= 1000 // MB/s
}
for i := range pids {
name, err := systemGetProcessName(pids[i])
assert(err)
status.Processes = append(status.Processes, ProcessInfo{
PID: pids[i],
Name: name,
MemoryUsed: pmems[i] / (1024 * 1024), // MiB
})
}
return
}
func GetP2PLink(dev1, dev2 *Device) (link P2PLinkType, err error) {
level, err := deviceGetTopologyCommonAncestor(dev1.handle, dev2.handle)
if err != nil || level == nil {
return P2PLinkUnknown, err
}
switch *level {
case C.NVML_TOPOLOGY_INTERNAL:
link = P2PLinkSameBoard
case C.NVML_TOPOLOGY_SINGLE:
link = P2PLinkSingleSwitch
case C.NVML_TOPOLOGY_MULTIPLE:
link = P2PLinkMultiSwitch
case C.NVML_TOPOLOGY_HOSTBRIDGE:
link = P2PLinkHostBridge
case C.NVML_TOPOLOGY_CPU:
link = P2PLinkSameCPU
case C.NVML_TOPOLOGY_SYSTEM:
link = P2PLinkCrossCPU
default:
err = ErrUnsupportedP2PLink
}
return
}

View File

@@ -0,0 +1,46 @@
// Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#include <stddef.h>
#include <dlfcn.h>
#include "nvml_dl.h"
#define DLSYM(x, sym) \
do { \
dlerror(); \
x = dlsym(handle, #sym); \
if (dlerror() != NULL) { \
return (NVML_ERROR_FUNCTION_NOT_FOUND); \
} \
} while (0)
typedef nvmlReturn_t (*nvmlSym_t)();
static void *handle;
nvmlReturn_t NVML_DL(nvmlInit)(void)
{
handle = dlopen("libnvidia-ml.so.1", RTLD_LAZY | RTLD_GLOBAL);
if (handle == NULL) {
return (NVML_ERROR_LIBRARY_NOT_FOUND);
}
return (nvmlInit());
}
nvmlReturn_t NVML_DL(nvmlShutdown)(void)
{
nvmlReturn_t r = nvmlShutdown();
if (r != NVML_SUCCESS) {
return (r);
}
return (dlclose(handle) ? NVML_ERROR_UNKNOWN : NVML_SUCCESS);
}
nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)(
nvmlDevice_t dev1, nvmlDevice_t dev2, nvmlGpuTopologyLevel_t *info)
{
nvmlSym_t sym;
DLSYM(sym, nvmlDeviceGetTopologyCommonAncestor);
return ((*sym)(dev1, dev2, info));
}

View File

@@ -0,0 +1,15 @@
// Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#ifndef _NVML_DL_H_
#define _NVML_DL_H_
#include <nvml.h>
#define NVML_DL(x) x##_dl
extern nvmlReturn_t NVML_DL(nvmlInit)(void);
extern nvmlReturn_t NVML_DL(nvmlShutdown)(void);
extern nvmlReturn_t NVML_DL(nvmlDeviceGetTopologyCommonAncestor)(
nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *);
#endif // _NVML_DL_H_

View File

@@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -0,0 +1,145 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
type flag uintptr
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
}

View File

@@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe !go1.4
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

View File

@@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

View File

@@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

View File

@@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound:
d.w.Write(nilAngleBytes)
case cycleFound:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

View File

@@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound:
f.fs.Write(nilAngleBytes)
case cycleFound:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

View File

@@ -0,0 +1,5 @@
root = true
[*]
indent_style = tab
indent_size = 4

View File

@@ -0,0 +1,6 @@
# Setup a Global .gitignore for OS and editor generated files:
# https://help.github.com/articles/ignoring-files
# git config --global core.excludesfile ~/.gitignore_global
.vagrant
*.sublime-project

View File

@@ -0,0 +1,30 @@
sudo: false
language: go
go:
- 1.8.x
- 1.9.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
before_script:
- go get -u github.com/golang/lint/golint
script:
- go test -v --race ./...
after_script:
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- test -z "$(golint ./... | tee /dev/stderr)"
- go vet ./...
os:
- linux
- osx
notifications:
email: false

View File

@@ -0,0 +1,52 @@
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# You can update this list using the following command:
#
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
# Please keep the list sorted.
Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Amit Krishnan <amit.krishnan@oracle.com>
Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <chris@howey.me> <howeyc@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Daniel Wagner-Hall <dawagner@gmail.com>
Dave Cheney <dave@cheney.net>
Evan Phoenix <evan@fallingsnow.net>
Francisco Souza <f@souza.cc>
Hari haran <hariharan.uno@gmail.com>
John C Barstow
Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Nathan Youngman <git@nathany.com>
Nickolai Zeldovich <nickolai@csail.mit.edu>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
Rodrigo Chiossi <rodrigochiossi@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
Vahe Khachikyan <vahe@live.ca>
Yukang <moorekang@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
henrikedwards <henrik.edwards@gmail.com>
铁哥 <guotie.9@gmail.com>

View File

@@ -0,0 +1,317 @@
# Changelog
## v1.4.7 / 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
* Linux: Fix deadlock in Remove (thanks @aarondl)
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
* Docs: Moved FAQ into the README (thanks @vahe)
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## v1.4.2 / 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## v1.4.1 / 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## v1.4.0 / 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## v1.3.1 / 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## v1.3.0 / 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## v1.2.10 / 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## v1.2.9 / 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## v1.2.8 / 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## v1.2.5 / 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## v1.2.1 / 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## v1.2.0 / 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## v1.1.1 / 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## v1.1.0 / 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
* only need to store flags on directories
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
* done can be an unbuffered channel
* remove calls to os.NewSyscallError
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v1.0.4 / 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## v1.0.3 / 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## v1.0.2 / 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## v1.0.0 / 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
* Minor updates based on feedback from golint.
## dev / 2014-07-09
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
## dev / 2014-07-04
* kqueue: fix incorrect mutex used in Close()
* Update example to demonstrate usage of Op.
## dev / 2014-06-28
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
* Fix for String() method on Event (thanks Alex Brainman)
* Don't build on Plan 9 or Solaris (thanks @4ad)
## dev / 2014-06-21
* Events channel of type Event rather than *Event.
* [internal] use syscall constants directly for inotify and kqueue.
* [internal] kqueue: rename events to kevents and fileEvent to event.
## dev / 2014-06-19
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
* [internal] remove cookie from Event struct (unused).
* [internal] Event struct has the same definition across every OS.
* [internal] remove internal watch and removeWatch methods.
## dev / 2014-06-12
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
* [API] Pluralized channel names: Events and Errors.
* [API] Renamed FileEvent struct to Event.
* [API] Op constants replace methods like IsCreate().
## dev / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## dev / 2014-05-23
* [API] Remove current implementation of WatchFlags.
* current implementation doesn't take advantage of OS for efficiency
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## v0.9.3 / 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v0.9.2 / 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## v0.9.1 / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## v0.9.0 / 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## v0.8.12 / 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## v0.8.11 / 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## v0.8.9 / 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## v0.8.8 / 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## v0.8.7 / 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## v0.8.6 / 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## v0.8.5 / 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## v0.8.4 / 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## v0.8.3 / 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## v0.8.2 / 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## v0.8.1 / 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## v0.8.0 / 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## v0.7.4 / 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## v0.7.3 / 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## v0.7.2 / 2012-09-01
* kqueue: events for created directories
## v0.7.1 / 2012-07-14
* [Fix] for renaming files
## v0.7.0 / 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## v0.6.0 / 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## v0.5.1 / 2012-05-22
* [Fix] inotify: remove all watches before Close()
## v0.5.0 / 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## v0.4.0 / 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## v0.3.0 / 2012-02-19
* kqueue: add files when watch directory
## v0.2.0 / 2011-12-30
* update to latest Go weekly code
## v0.1.0 / 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21

View File

@@ -0,0 +1,77 @@
# Contributing
## Issues
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
* Please indicate the platform you are using fsnotify on.
* A code example to reproduce the problem is appreciated.
## Pull Requests
### Contributor License Agreement
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
Please indicate that you have signed the CLA in your pull request.
### How fsnotify is Developed
* Development is done on feature branches.
* Tests are run on BSD, Linux, macOS and Windows.
* Pull requests are reviewed and [applied to master][am] using [hub][].
* Maintainers may modify or squash commits rather than asking contributors to.
* To issue a new release, the maintainers will:
* Update the CHANGELOG
* Tag a version, which will become available through gopkg.in.
### How to Fork
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Ensure everything works and the tests pass (see below)
4. Commit your changes (`git commit -am 'Add some feature'`)
Contribute upstream:
1. Fork fsnotify on GitHub
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
3. Push to the branch (`git push fork my-new-feature`)
4. Create a new Pull Request on GitHub
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
### Testing
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

View File

@@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,79 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

View File

@@ -0,0 +1,37 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View File

@@ -0,0 +1,66 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"errors"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var ErrEventOverflow = errors.New("fsnotify queue overflow")

View File

@@ -0,0 +1,337 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

View File

@@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(0)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

View File

@@ -0,0 +1,521 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// send a "quit" message to the reader goroutine
close(w.done)
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
break loop
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
select {
case w.Errors <- err:
case <-w.done:
break loop
}
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel.
select {
case w.Events <- event:
case <-w.done:
break loop
}
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
// cleanup
err := unix.Close(w.kq)
if err != nil {
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
select {
case w.Errors <- err:
default:
}
}
close(w.Events)
close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
select {
case w.Errors <- err:
case <-w.done:
return
}
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
select {
case w.Events <- newCreateEvent(filePath):
case <-w.done:
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

View File

@@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY

View File

@@ -0,0 +1,12 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY

View File

@@ -0,0 +1,561 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

View File

@@ -0,0 +1,20 @@
# OSX leaves these everywhere on SMB shares
._*
# Eclipse files
.classpath
.project
.settings/**
# Emacs save files
*~
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Go test binaries
*.test

View File

@@ -0,0 +1,7 @@
language: go
go:
- 1.3
- 1.4
script:
- go test
- go build

View File

@@ -0,0 +1,50 @@
The MIT License (MIT)
Copyright (c) 2014 Sam Ghods
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,121 @@
# YAML marshaling and unmarshaling support for Go
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
```
BAD:
exampleKey: !!binary gIGC
GOOD:
exampleKey: gIGC
... and decode the base64 data in your code.
```
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
## Installation and usage
To install, run:
```
$ go get github.com/ghodss/yaml
```
And import using:
```
import "github.com/ghodss/yaml"
```
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
// Marshal a Person struct to YAML.
p := Person{"John", 30}
y, err := yaml.Marshal(p)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(p2)
/* Output:
{John 30}
*/
}
```
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"github.com/ghodss/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
name: John
age: 30
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
/* Output:
{"age":30,"name":"John"}
*/
}
```

View File

@@ -0,0 +1,501 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package yaml
import (
"bytes"
"encoding"
"encoding/json"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, nil, v
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@@ -0,0 +1,277 @@
package yaml
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"gopkg.in/yaml.v2"
)
// Marshals the object into JSON then converts JSON to YAML and returns the
// YAML.
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
}
return y, nil
}
// Converts YAML to JSON then uses JSON to unmarshal into an object.
func Unmarshal(y []byte, o interface{}) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = json.Unmarshal(j, o)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)
if err != nil {
return nil, err
}
// Marshal this object into YAML.
return yaml.Marshal(jsonObj)
}
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
// this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// * Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yaml.Unmarshal(y, &yamlObj)
if err != nil {
return nil, err
}
// YAML objects are not completely compatible with JSON objects (e.g. you
// can have non-string keys in YAML). So, convert the YAML-compatible object
// to a JSON-compatible object, failing with an error if irrecoverable
// incompatibilties happen along the way.
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
if err != nil {
return nil, err
}
// Convert this object to JSON and return the data.
return json.Marshal(jsonObj)
}
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
var err error
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
// interface). We pass decodingNull as false because we're not actually
// decoding into the value, we're just checking if the ultimate target is a
// string.
if jsonTarget != nil {
ju, tu, pv := indirect(*jsonTarget, false)
// We have a JSON or Text Umarshaler at this level, so we can't be trying
// to decode into a string.
if ju != nil || tu != nil {
jsonTarget = nil
} else {
jsonTarget = &pv
}
}
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
// if so, coerce. Else return normal.
// If yamlObj is a map or array, find the field that each key is
// unmarshaling to, and when you recurse pass the reflect.Value for that
// field back into this function.
switch typedYAMLObj := yamlObj.(type) {
case map[interface{}]interface{}:
// JSON does not support arbitrary keys in a map, so we must convert
// these keys to strings.
//
// From my reading of go-yaml v2 (specifically the resolve function),
// keys can only have the types string, int, int64, float64, binary
// (unsupported), or null (unsupported).
strMap := make(map[string]interface{})
for k, v := range typedYAMLObj {
// Resolve the key to a string first.
var keyString string
switch typedKey := k.(type) {
case string:
keyString = typedKey
case int:
keyString = strconv.Itoa(typedKey)
case int64:
// go-yaml will only return an int64 as a key if the system
// architecture is 32-bit and the key's value is between 32-bit
// and 64-bit. Otherwise the key type will simply be int.
keyString = strconv.FormatInt(typedKey, 10)
case float64:
// Stolen from go-yaml to use the same conversion to string as
// the go-yaml library uses to convert float to string when
// Marshaling.
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
keyString = s
case bool:
if typedKey {
keyString = "true"
} else {
keyString = "false"
}
default:
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
reflect.TypeOf(k), k, v)
}
// jsonTarget should be a struct or a map. If it's a struct, find
// the field it's going to map to and pass its reflect.Value. If
// it's a map, find the element type of the map and pass the
// reflect.Value created from that type. If it's neither, just pass
// nil - JSON conversion will error for us if it's a real issue.
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Struct {
keyBytes := []byte(keyString)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t.Type())
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, keyBytes) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential
// struct field.
jtf := t.Field(f.index[0])
strMap[keyString], err = convertToJSONableObject(v, &jtf)
if err != nil {
return nil, err
}
continue
}
} else if t.Kind() == reflect.Map {
// Create a zero value of the map's element type to use as
// the JSON target.
jtv := reflect.Zero(t.Type().Elem())
strMap[keyString], err = convertToJSONableObject(v, &jtv)
if err != nil {
return nil, err
}
continue
}
}
strMap[keyString], err = convertToJSONableObject(v, nil)
if err != nil {
return nil, err
}
}
return strMap, nil
case []interface{}:
// We need to recurse into arrays in case there are any
// map[interface{}]interface{}'s inside and to convert any
// numbers to strings.
// If jsonTarget is a slice (which it really should be), find the
// thing it's going to map to. If it's not a slice, just pass nil
// - JSON conversion will error for us if it's a real issue.
var jsonSliceElemValue *reflect.Value
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Slice {
// By default slices point to nil, but we need a reflect.Value
// pointing to a value of the slice type, so we create one here.
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
jsonSliceElemValue = &ev
}
}
// Make and use a new array.
arr := make([]interface{}, len(typedYAMLObj))
for i, v := range typedYAMLObj {
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
if err != nil {
return nil, err
}
}
return arr, nil
default:
// If the target type is a string and the YAML type is a number,
// convert the YAML type to a string.
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
// Based on my reading of go-yaml, it may return int, int64,
// float64, or uint64.
var s string
switch typedVal := typedYAMLObj.(type) {
case int:
s = strconv.FormatInt(int64(typedVal), 10)
case int64:
s = strconv.FormatInt(typedVal, 10)
case float64:
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
case uint64:
s = strconv.FormatUint(typedVal, 10)
case bool:
if typedVal {
s = "true"
} else {
s = "false"
}
}
if len(s) > 0 {
yamlObj = interface{}(s)
}
}
return yamlObj, nil
}
return nil, nil
}

Some files were not shown because too many files have changed in this diff Show More